X86ISelLowering.cpp revision 87c8935fd55f3177e787e192b5ed4686b6073c61
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/VectorExtras.h" 27#include "llvm/Analysis/ScalarEvolutionExpressions.h" 28#include "llvm/CodeGen/CallingConvLower.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunction.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/SelectionDAG.h" 33#include "llvm/CodeGen/SSARegMap.h" 34#include "llvm/Support/MathExtras.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Target/TargetOptions.h" 38#include "llvm/ADT/StringExtras.h" 39#include "llvm/ParameterAttributes.h" 40using namespace llvm; 41 42X86TargetLowering::X86TargetLowering(TargetMachine &TM) 43 : TargetLowering(TM) { 44 Subtarget = &TM.getSubtarget<X86Subtarget>(); 45 X86ScalarSSEf64 = Subtarget->hasSSE2(); 46 X86ScalarSSEf32 = Subtarget->hasSSE1(); 47 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 48 49 50 RegInfo = TM.getRegisterInfo(); 51 52 // Set up the TargetLowering object. 53 54 // X86 is weird, it always uses i8 for shift amounts and setcc results. 55 setShiftAmountType(MVT::i8); 56 setSetCCResultType(MVT::i8); 57 setSetCCResultContents(ZeroOrOneSetCCResult); 58 setSchedulingPreference(SchedulingForRegPressure); 59 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 60 setStackPointerRegisterToSaveRestore(X86StackPtr); 61 62 if (Subtarget->isTargetDarwin()) { 63 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 64 setUseUnderscoreSetJmp(false); 65 setUseUnderscoreLongJmp(false); 66 } else if (Subtarget->isTargetMingw()) { 67 // MS runtime is weird: it exports _setjmp, but longjmp! 68 setUseUnderscoreSetJmp(true); 69 setUseUnderscoreLongJmp(false); 70 } else { 71 setUseUnderscoreSetJmp(true); 72 setUseUnderscoreLongJmp(true); 73 } 74 75 // Set up the register classes. 76 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 77 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 78 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 79 if (Subtarget->is64Bit()) 80 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 81 82 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 83 84 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 85 // operation. 86 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 87 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 88 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 89 90 if (Subtarget->is64Bit()) { 91 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 92 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 93 } else { 94 if (X86ScalarSSEf64) 95 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 96 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 97 else 98 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 99 } 100 101 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 102 // this operation. 103 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 104 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 105 // SSE has no i16 to fp conversion, only i32 106 if (X86ScalarSSEf32) { 107 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 108 // f32 and f64 cases are Legal, f80 case is not 109 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 110 } else { 111 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 112 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 113 } 114 115 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 116 // are Legal, f80 is custom lowered. 117 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 118 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 119 120 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 121 // this operation. 122 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 123 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 124 125 if (X86ScalarSSEf32) { 126 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 127 // f32 and f64 cases are Legal, f80 case is not 128 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 129 } else { 130 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 131 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 132 } 133 134 // Handle FP_TO_UINT by promoting the destination to a larger signed 135 // conversion. 136 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 137 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 138 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 139 140 if (Subtarget->is64Bit()) { 141 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 142 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 143 } else { 144 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 145 // Expand FP_TO_UINT into a select. 146 // FIXME: We would like to use a Custom expander here eventually to do 147 // the optimal thing for SSE vs. the default expansion in the legalizer. 148 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 149 else 150 // With SSE3 we can use fisttpll to convert to a signed i64. 151 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 152 } 153 154 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 155 if (!X86ScalarSSEf64) { 156 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 157 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 158 } 159 160 // Scalar integer multiply, multiply-high, divide, and remainder are 161 // lowered to use operations that produce two results, to match the 162 // available instructions. This exposes the two-result form to trivial 163 // CSE, which is able to combine x/y and x%y into a single instruction, 164 // for example. The single-result multiply instructions are introduced 165 // in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part 166 // is not needed. 167 setOperationAction(ISD::MUL , MVT::i8 , Expand); 168 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 169 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 170 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 171 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 172 setOperationAction(ISD::SREM , MVT::i8 , Expand); 173 setOperationAction(ISD::UREM , MVT::i8 , Expand); 174 setOperationAction(ISD::MUL , MVT::i16 , Expand); 175 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 176 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 177 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 178 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 179 setOperationAction(ISD::SREM , MVT::i16 , Expand); 180 setOperationAction(ISD::UREM , MVT::i16 , Expand); 181 setOperationAction(ISD::MUL , MVT::i32 , Expand); 182 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 183 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 184 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 185 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 186 setOperationAction(ISD::SREM , MVT::i32 , Expand); 187 setOperationAction(ISD::UREM , MVT::i32 , Expand); 188 setOperationAction(ISD::MUL , MVT::i64 , Expand); 189 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 190 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 191 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 192 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 193 setOperationAction(ISD::SREM , MVT::i64 , Expand); 194 setOperationAction(ISD::UREM , MVT::i64 , Expand); 195 196 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 197 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 198 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 199 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 200 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 201 if (Subtarget->is64Bit()) 202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 204 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 205 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 206 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 207 setOperationAction(ISD::FREM , MVT::f64 , Expand); 208 209 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 210 setOperationAction(ISD::CTTZ , MVT::i8 , Expand); 211 setOperationAction(ISD::CTLZ , MVT::i8 , Expand); 212 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 213 setOperationAction(ISD::CTTZ , MVT::i16 , Expand); 214 setOperationAction(ISD::CTLZ , MVT::i16 , Expand); 215 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 216 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 217 setOperationAction(ISD::CTLZ , MVT::i32 , Expand); 218 if (Subtarget->is64Bit()) { 219 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 220 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 221 setOperationAction(ISD::CTLZ , MVT::i64 , Expand); 222 } 223 224 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 225 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 226 227 // These should be promoted to a larger select which is supported. 228 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 229 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 230 // X86 wants to expand cmov itself. 231 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 232 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 233 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 234 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 235 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 236 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 237 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 238 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 239 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 240 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 241 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 242 if (Subtarget->is64Bit()) { 243 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 244 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 245 } 246 // X86 ret instruction may pop stack. 247 setOperationAction(ISD::RET , MVT::Other, Custom); 248 if (!Subtarget->is64Bit()) 249 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 250 251 // Darwin ABI issue. 252 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 253 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 254 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 255 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 256 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 257 if (Subtarget->is64Bit()) { 258 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 259 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 260 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 261 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 262 } 263 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 264 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 265 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 266 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 267 // X86 wants to expand memset / memcpy itself. 268 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 269 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 270 271 // Use the default ISD::LOCATION expansion. 272 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 273 // FIXME - use subtarget debug flags 274 if (!Subtarget->isTargetDarwin() && 275 !Subtarget->isTargetELF() && 276 !Subtarget->isTargetCygMing()) 277 setOperationAction(ISD::LABEL, MVT::Other, Expand); 278 279 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 280 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 281 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 282 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 283 if (Subtarget->is64Bit()) { 284 // FIXME: Verify 285 setExceptionPointerRegister(X86::RAX); 286 setExceptionSelectorRegister(X86::RDX); 287 } else { 288 setExceptionPointerRegister(X86::EAX); 289 setExceptionSelectorRegister(X86::EDX); 290 } 291 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 292 293 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 294 295 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 296 setOperationAction(ISD::VASTART , MVT::Other, Custom); 297 setOperationAction(ISD::VAARG , MVT::Other, Expand); 298 setOperationAction(ISD::VAEND , MVT::Other, Expand); 299 if (Subtarget->is64Bit()) 300 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 301 else 302 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 303 304 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 305 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 306 if (Subtarget->is64Bit()) 307 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 308 if (Subtarget->isTargetCygMing()) 309 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 310 else 311 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 312 313 if (X86ScalarSSEf64) { 314 // f32 and f64 use SSE. 315 // Set up the FP register classes. 316 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 317 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 318 319 // Use ANDPD to simulate FABS. 320 setOperationAction(ISD::FABS , MVT::f64, Custom); 321 setOperationAction(ISD::FABS , MVT::f32, Custom); 322 323 // Use XORP to simulate FNEG. 324 setOperationAction(ISD::FNEG , MVT::f64, Custom); 325 setOperationAction(ISD::FNEG , MVT::f32, Custom); 326 327 // Use ANDPD and ORPD to simulate FCOPYSIGN. 328 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 329 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 330 331 // We don't support sin/cos/fmod 332 setOperationAction(ISD::FSIN , MVT::f64, Expand); 333 setOperationAction(ISD::FCOS , MVT::f64, Expand); 334 setOperationAction(ISD::FREM , MVT::f64, Expand); 335 setOperationAction(ISD::FSIN , MVT::f32, Expand); 336 setOperationAction(ISD::FCOS , MVT::f32, Expand); 337 setOperationAction(ISD::FREM , MVT::f32, Expand); 338 339 // Expand FP immediates into loads from the stack, except for the special 340 // cases we handle. 341 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 342 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 343 addLegalFPImmediate(APFloat(+0.0)); // xorpd 344 addLegalFPImmediate(APFloat(+0.0f)); // xorps 345 346 // Conversions to long double (in X87) go through memory. 347 setConvertAction(MVT::f32, MVT::f80, Expand); 348 setConvertAction(MVT::f64, MVT::f80, Expand); 349 350 // Conversions from long double (in X87) go through memory. 351 setConvertAction(MVT::f80, MVT::f32, Expand); 352 setConvertAction(MVT::f80, MVT::f64, Expand); 353 } else if (X86ScalarSSEf32) { 354 // Use SSE for f32, x87 for f64. 355 // Set up the FP register classes. 356 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 357 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 358 359 // Use ANDPS to simulate FABS. 360 setOperationAction(ISD::FABS , MVT::f32, Custom); 361 362 // Use XORP to simulate FNEG. 363 setOperationAction(ISD::FNEG , MVT::f32, Custom); 364 365 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 366 367 // Use ANDPS and ORPS to simulate FCOPYSIGN. 368 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 369 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 370 371 // We don't support sin/cos/fmod 372 setOperationAction(ISD::FSIN , MVT::f32, Expand); 373 setOperationAction(ISD::FCOS , MVT::f32, Expand); 374 setOperationAction(ISD::FREM , MVT::f32, Expand); 375 376 // Expand FP immediates into loads from the stack, except for the special 377 // cases we handle. 378 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 379 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 380 addLegalFPImmediate(APFloat(+0.0f)); // xorps 381 addLegalFPImmediate(APFloat(+0.0)); // FLD0 382 addLegalFPImmediate(APFloat(+1.0)); // FLD1 383 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 384 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 385 386 // SSE->x87 conversions go through memory. 387 setConvertAction(MVT::f32, MVT::f64, Expand); 388 setConvertAction(MVT::f32, MVT::f80, Expand); 389 390 // x87->SSE truncations need to go through memory. 391 setConvertAction(MVT::f80, MVT::f32, Expand); 392 setConvertAction(MVT::f64, MVT::f32, Expand); 393 // And x87->x87 truncations also. 394 setConvertAction(MVT::f80, MVT::f64, Expand); 395 396 if (!UnsafeFPMath) { 397 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 398 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 399 } 400 } else { 401 // f32 and f64 in x87. 402 // Set up the FP register classes. 403 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 404 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 405 406 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 407 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 408 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 409 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 410 411 // Floating truncations need to go through memory. 412 setConvertAction(MVT::f80, MVT::f32, Expand); 413 setConvertAction(MVT::f64, MVT::f32, Expand); 414 setConvertAction(MVT::f80, MVT::f64, Expand); 415 416 if (!UnsafeFPMath) { 417 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 418 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 419 } 420 421 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 422 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 423 addLegalFPImmediate(APFloat(+0.0)); // FLD0 424 addLegalFPImmediate(APFloat(+1.0)); // FLD1 425 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 426 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 427 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 428 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 429 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 430 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 431 } 432 433 // Long double always uses X87. 434 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 435 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 436 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 437 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 438 if (!UnsafeFPMath) { 439 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 440 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 441 } 442 443 // Always use a library call for pow. 444 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 445 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 446 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 447 448 // First set operation action for all vector types to expand. Then we 449 // will selectively turn on ones that can be effectively codegen'd. 450 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 451 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 452 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 453 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 454 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 455 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 456 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 457 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 458 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 459 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 460 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 461 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 462 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 463 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 464 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 465 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 466 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 467 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 468 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 469 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 470 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 471 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 472 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 473 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 474 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 475 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 476 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 477 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 478 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 479 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 480 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 481 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 482 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 483 } 484 485 if (Subtarget->hasMMX()) { 486 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 487 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 488 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 489 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 490 491 // FIXME: add MMX packed arithmetics 492 493 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 494 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 495 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 496 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 497 498 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 499 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 500 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 501 502 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 503 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 504 505 setOperationAction(ISD::AND, MVT::v8i8, Promote); 506 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 507 setOperationAction(ISD::AND, MVT::v4i16, Promote); 508 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 509 setOperationAction(ISD::AND, MVT::v2i32, Promote); 510 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 511 setOperationAction(ISD::AND, MVT::v1i64, Legal); 512 513 setOperationAction(ISD::OR, MVT::v8i8, Promote); 514 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 515 setOperationAction(ISD::OR, MVT::v4i16, Promote); 516 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 517 setOperationAction(ISD::OR, MVT::v2i32, Promote); 518 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 519 setOperationAction(ISD::OR, MVT::v1i64, Legal); 520 521 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 522 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 523 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 524 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 525 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 526 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 527 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 528 529 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 530 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 531 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 532 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 533 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 534 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 535 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 536 537 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 538 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 539 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 540 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 541 542 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 543 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 544 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 545 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 546 547 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 548 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 549 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom); 550 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 551 } 552 553 if (Subtarget->hasSSE1()) { 554 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 555 556 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 557 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 558 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 559 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 560 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 561 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 562 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 563 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 564 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 565 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 566 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 567 } 568 569 if (Subtarget->hasSSE2()) { 570 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 571 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 572 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 573 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 574 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 575 576 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 577 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 578 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 579 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 580 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 581 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 582 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 583 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 584 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 585 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 586 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 587 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 588 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 589 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 590 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 591 592 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 593 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 594 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 595 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 596 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 597 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 598 599 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 600 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 601 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 602 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 603 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 604 } 605 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 606 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 607 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 608 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 609 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 610 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 611 612 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 613 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 614 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 615 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 616 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 617 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 618 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 619 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 620 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 621 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 622 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 623 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 624 } 625 626 // Custom lower v2i64 and v2f64 selects. 627 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 628 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 629 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 630 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 631 } 632 633 // We want to custom lower some of our intrinsics. 634 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 635 636 // We have target-specific dag combine patterns for the following nodes: 637 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 638 setTargetDAGCombine(ISD::SELECT); 639 640 computeRegisterProperties(); 641 642 // FIXME: These should be based on subtarget info. Plus, the values should 643 // be smaller when we are in optimizing for size mode. 644 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 645 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 646 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 647 allowUnalignedMemoryAccesses = true; // x86 supports it! 648} 649 650 651//===----------------------------------------------------------------------===// 652// Return Value Calling Convention Implementation 653//===----------------------------------------------------------------------===// 654 655#include "X86GenCallingConv.inc" 656 657/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 658/// exists skip possible ISD:TokenFactor. 659static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 660 if (Chain.getOpcode()==X86ISD::TAILCALL) { 661 return Chain; 662 } else if (Chain.getOpcode()==ISD::TokenFactor) { 663 if (Chain.getNumOperands() && 664 Chain.getOperand(0).getOpcode()==X86ISD::TAILCALL) 665 return Chain.getOperand(0); 666 } 667 return Chain; 668} 669 670/// LowerRET - Lower an ISD::RET node. 671SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 672 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 673 674 SmallVector<CCValAssign, 16> RVLocs; 675 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 676 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 677 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 678 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 679 680 // If this is the first return lowered for this function, add the regs to the 681 // liveout set for the function. 682 if (DAG.getMachineFunction().liveout_empty()) { 683 for (unsigned i = 0; i != RVLocs.size(); ++i) 684 if (RVLocs[i].isRegLoc()) 685 DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg()); 686 } 687 SDOperand Chain = Op.getOperand(0); 688 689 // Handle tail call return. 690 Chain = GetPossiblePreceedingTailCall(Chain); 691 if (Chain.getOpcode() == X86ISD::TAILCALL) { 692 SDOperand TailCall = Chain; 693 SDOperand TargetAddress = TailCall.getOperand(1); 694 SDOperand StackAdjustment = TailCall.getOperand(2); 695 assert ( ((TargetAddress.getOpcode() == ISD::Register && 696 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 697 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 698 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 699 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 700 "Expecting an global address, external symbol, or register"); 701 assert( StackAdjustment.getOpcode() == ISD::Constant && 702 "Expecting a const value"); 703 704 SmallVector<SDOperand,8> Operands; 705 Operands.push_back(Chain.getOperand(0)); 706 Operands.push_back(TargetAddress); 707 Operands.push_back(StackAdjustment); 708 // Copy registers used by the call. Last operand is a flag so it is not 709 // copied. 710 for(unsigned i=3; i < TailCall.getNumOperands()-1;i++) { 711 Operands.push_back(Chain.getOperand(i)); 712 } 713 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], Operands.size()); 714 } 715 716 // Regular return. 717 SDOperand Flag; 718 719 // Copy the result values into the output registers. 720 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 721 RVLocs[0].getLocReg() != X86::ST0) { 722 for (unsigned i = 0; i != RVLocs.size(); ++i) { 723 CCValAssign &VA = RVLocs[i]; 724 assert(VA.isRegLoc() && "Can only return in registers!"); 725 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 726 Flag); 727 Flag = Chain.getValue(1); 728 } 729 } else { 730 // We need to handle a destination of ST0 specially, because it isn't really 731 // a register. 732 SDOperand Value = Op.getOperand(1); 733 734 // If this is an FP return with ScalarSSE, we need to move the value from 735 // an XMM register onto the fp-stack. 736 if ((X86ScalarSSEf32 && RVLocs[0].getValVT()==MVT::f32) || 737 (X86ScalarSSEf64 && RVLocs[0].getValVT()==MVT::f64)) { 738 SDOperand MemLoc; 739 740 // If this is a load into a scalarsse value, don't store the loaded value 741 // back to the stack, only to reload it: just replace the scalar-sse load. 742 if (ISD::isNON_EXTLoad(Value.Val) && 743 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) { 744 Chain = Value.getOperand(0); 745 MemLoc = Value.getOperand(1); 746 } else { 747 // Spill the value to memory and reload it into top of stack. 748 unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8; 749 MachineFunction &MF = DAG.getMachineFunction(); 750 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 751 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy()); 752 Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0); 753 } 754 SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other); 755 SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())}; 756 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 757 Chain = Value.getValue(1); 758 } 759 760 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 761 SDOperand Ops[] = { Chain, Value }; 762 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 763 Flag = Chain.getValue(1); 764 } 765 766 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 767 if (Flag.Val) 768 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 769 else 770 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 771} 772 773 774/// LowerCallResult - Lower the result values of an ISD::CALL into the 775/// appropriate copies out of appropriate physical registers. This assumes that 776/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 777/// being lowered. The returns a SDNode with the same number of values as the 778/// ISD::CALL. 779SDNode *X86TargetLowering:: 780LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 781 unsigned CallingConv, SelectionDAG &DAG) { 782 783 // Assign locations to each value returned by this call. 784 SmallVector<CCValAssign, 16> RVLocs; 785 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 786 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 787 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 788 789 790 SmallVector<SDOperand, 8> ResultVals; 791 792 // Copy all of the result registers out of their specified physreg. 793 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 794 for (unsigned i = 0; i != RVLocs.size(); ++i) { 795 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 796 RVLocs[i].getValVT(), InFlag).getValue(1); 797 InFlag = Chain.getValue(2); 798 ResultVals.push_back(Chain.getValue(0)); 799 } 800 } else { 801 // Copies from the FP stack are special, as ST0 isn't a valid register 802 // before the fp stackifier runs. 803 804 // Copy ST0 into an RFP register with FP_GET_RESULT. 805 SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag); 806 SDOperand GROps[] = { Chain, InFlag }; 807 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 808 Chain = RetVal.getValue(1); 809 InFlag = RetVal.getValue(2); 810 811 // If we are using ScalarSSE, store ST(0) to the stack and reload it into 812 // an XMM register. 813 if ((X86ScalarSSEf32 && RVLocs[0].getValVT() == MVT::f32) || 814 (X86ScalarSSEf64 && RVLocs[0].getValVT() == MVT::f64)) { 815 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 816 // shouldn't be necessary except that RFP cannot be live across 817 // multiple blocks. When stackifier is fixed, they can be uncoupled. 818 MachineFunction &MF = DAG.getMachineFunction(); 819 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 820 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 821 SDOperand Ops[] = { 822 Chain, RetVal, StackSlot, DAG.getValueType(RVLocs[0].getValVT()), InFlag 823 }; 824 Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5); 825 RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0); 826 Chain = RetVal.getValue(1); 827 } 828 ResultVals.push_back(RetVal); 829 } 830 831 // Merge everything together with a MERGE_VALUES node. 832 ResultVals.push_back(Chain); 833 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 834 &ResultVals[0], ResultVals.size()).Val; 835} 836 837 838//===----------------------------------------------------------------------===// 839// C & StdCall & Fast Calling Convention implementation 840//===----------------------------------------------------------------------===// 841// StdCall calling convention seems to be standard for many Windows' API 842// routines and around. It differs from C calling convention just a little: 843// callee should clean up the stack, not caller. Symbols should be also 844// decorated in some fancy way :) It doesn't support any vector arguments. 845// For info on fast calling convention see Fast Calling Convention (tail call) 846// implementation LowerX86_32FastCCCallTo. 847 848/// AddLiveIn - This helper function adds the specified physical register to the 849/// MachineFunction as a live in value. It also creates a corresponding virtual 850/// register for it. 851static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 852 const TargetRegisterClass *RC) { 853 assert(RC->contains(PReg) && "Not the correct regclass!"); 854 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC); 855 MF.addLiveIn(PReg, VReg); 856 return VReg; 857} 858 859// align stack arguments according to platform alignment needed for tail calls 860unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG& DAG); 861 862SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 863 const CCValAssign &VA, 864 MachineFrameInfo *MFI, 865 SDOperand Root, unsigned i) { 866 // Create the nodes corresponding to a load from this parameter slot. 867 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 868 VA.getLocMemOffset()); 869 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 870 871 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 872 873 if (Flags & ISD::ParamFlags::ByVal) 874 return FIN; 875 else 876 return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0); 877} 878 879SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG, 880 bool isStdCall) { 881 unsigned NumArgs = Op.Val->getNumValues() - 1; 882 MachineFunction &MF = DAG.getMachineFunction(); 883 MachineFrameInfo *MFI = MF.getFrameInfo(); 884 SDOperand Root = Op.getOperand(0); 885 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 886 unsigned CC = MF.getFunction()->getCallingConv(); 887 // Assign locations to all of the incoming arguments. 888 SmallVector<CCValAssign, 16> ArgLocs; 889 CCState CCInfo(CC, isVarArg, 890 getTargetMachine(), ArgLocs); 891 // Check for possible tail call calling convention. 892 if (CC == CallingConv::Fast && PerformTailCallOpt) 893 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_TailCall); 894 else 895 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C); 896 897 SmallVector<SDOperand, 8> ArgValues; 898 unsigned LastVal = ~0U; 899 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 900 CCValAssign &VA = ArgLocs[i]; 901 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 902 // places. 903 assert(VA.getValNo() != LastVal && 904 "Don't support value assigned to multiple locs yet"); 905 LastVal = VA.getValNo(); 906 907 if (VA.isRegLoc()) { 908 MVT::ValueType RegVT = VA.getLocVT(); 909 TargetRegisterClass *RC; 910 if (RegVT == MVT::i32) 911 RC = X86::GR32RegisterClass; 912 else { 913 assert(MVT::isVector(RegVT)); 914 RC = X86::VR128RegisterClass; 915 } 916 917 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 918 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 919 920 // If this is an 8 or 16-bit value, it is really passed promoted to 32 921 // bits. Insert an assert[sz]ext to capture this, then truncate to the 922 // right size. 923 if (VA.getLocInfo() == CCValAssign::SExt) 924 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 925 DAG.getValueType(VA.getValVT())); 926 else if (VA.getLocInfo() == CCValAssign::ZExt) 927 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 928 DAG.getValueType(VA.getValVT())); 929 930 if (VA.getLocInfo() != CCValAssign::Full) 931 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 932 933 ArgValues.push_back(ArgValue); 934 } else { 935 assert(VA.isMemLoc()); 936 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 937 } 938 } 939 940 unsigned StackSize = CCInfo.getNextStackOffset(); 941 // align stack specially for tail calls 942 if (CC==CallingConv::Fast) 943 StackSize = GetAlignedArgumentStackSize(StackSize,DAG); 944 945 ArgValues.push_back(Root); 946 947 // If the function takes variable number of arguments, make a frame index for 948 // the start of the first vararg value... for expansion of llvm.va_start. 949 if (isVarArg) 950 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 951 952 // Tail call calling convention (CallingConv::Fast) does not support varargs. 953 assert( !(isVarArg && CC == CallingConv::Fast) && 954 "CallingConv::Fast does not support varargs."); 955 956 if (isStdCall && !isVarArg && 957 (CC==CallingConv::Fast && PerformTailCallOpt || CC!=CallingConv::Fast)) { 958 BytesToPopOnReturn = StackSize; // Callee pops everything.. 959 BytesCallerReserves = 0; 960 } else { 961 BytesToPopOnReturn = 0; // Callee pops nothing. 962 963 // If this is an sret function, the return should pop the hidden pointer. 964 if (NumArgs && 965 (cast<ConstantSDNode>(Op.getOperand(3))->getValue() & 966 ISD::ParamFlags::StructReturn)) 967 BytesToPopOnReturn = 4; 968 969 BytesCallerReserves = StackSize; 970 } 971 972 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 973 974 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 975 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 976 977 // Return the new list of results. 978 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 979 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 980} 981 982SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, 983 unsigned CC) { 984 SDOperand Chain = Op.getOperand(0); 985 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 986 SDOperand Callee = Op.getOperand(4); 987 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 988 989 // Analyze operands of the call, assigning locations to each operand. 990 SmallVector<CCValAssign, 16> ArgLocs; 991 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 992 if(CC==CallingConv::Fast && PerformTailCallOpt) 993 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall); 994 else 995 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C); 996 997 // Get a count of how many bytes are to be pushed on the stack. 998 unsigned NumBytes = CCInfo.getNextStackOffset(); 999 if (CC==CallingConv::Fast) 1000 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1001 1002 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1003 1004 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1005 SmallVector<SDOperand, 8> MemOpChains; 1006 1007 SDOperand StackPtr; 1008 1009 // Walk the register/memloc assignments, inserting copies/loads. 1010 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1011 CCValAssign &VA = ArgLocs[i]; 1012 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1013 1014 // Promote the value if needed. 1015 switch (VA.getLocInfo()) { 1016 default: assert(0 && "Unknown loc info!"); 1017 case CCValAssign::Full: break; 1018 case CCValAssign::SExt: 1019 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1020 break; 1021 case CCValAssign::ZExt: 1022 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1023 break; 1024 case CCValAssign::AExt: 1025 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1026 break; 1027 } 1028 1029 if (VA.isRegLoc()) { 1030 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1031 } else { 1032 assert(VA.isMemLoc()); 1033 if (StackPtr.Val == 0) 1034 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1035 1036 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1037 Arg)); 1038 } 1039 } 1040 1041 // If the first argument is an sret pointer, remember it. 1042 bool isSRet = NumOps && 1043 (cast<ConstantSDNode>(Op.getOperand(6))->getValue() & 1044 ISD::ParamFlags::StructReturn); 1045 1046 if (!MemOpChains.empty()) 1047 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1048 &MemOpChains[0], MemOpChains.size()); 1049 1050 // Build a sequence of copy-to-reg nodes chained together with token chain 1051 // and flag operands which copy the outgoing args into registers. 1052 SDOperand InFlag; 1053 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1054 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1055 InFlag); 1056 InFlag = Chain.getValue(1); 1057 } 1058 1059 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1060 // GOT pointer. 1061 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1062 Subtarget->isPICStyleGOT()) { 1063 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1064 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1065 InFlag); 1066 InFlag = Chain.getValue(1); 1067 } 1068 1069 // If the callee is a GlobalAddress node (quite common, every direct call is) 1070 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1071 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1072 // We should use extra load for direct calls to dllimported functions in 1073 // non-JIT mode. 1074 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1075 getTargetMachine(), true)) 1076 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1077 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1078 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1079 1080 // Returns a chain & a flag for retval copy to use. 1081 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1082 SmallVector<SDOperand, 8> Ops; 1083 Ops.push_back(Chain); 1084 Ops.push_back(Callee); 1085 1086 // Add argument registers to the end of the list so that they are known live 1087 // into the call. 1088 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1089 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1090 RegsToPass[i].second.getValueType())); 1091 1092 // Add an implicit use GOT pointer in EBX. 1093 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1094 Subtarget->isPICStyleGOT()) 1095 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1096 1097 if (InFlag.Val) 1098 Ops.push_back(InFlag); 1099 1100 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1101 InFlag = Chain.getValue(1); 1102 1103 // Create the CALLSEQ_END node. 1104 unsigned NumBytesForCalleeToPush = 0; 1105 1106 if (CC == CallingConv::X86_StdCall || 1107 (CC == CallingConv::Fast && PerformTailCallOpt)) { 1108 if (isVarArg) 1109 NumBytesForCalleeToPush = isSRet ? 4 : 0; 1110 else 1111 NumBytesForCalleeToPush = NumBytes; 1112 assert(!(isVarArg && CC==CallingConv::Fast) && 1113 "CallingConv::Fast does not support varargs."); 1114 } else { 1115 // If this is is a call to a struct-return function, the callee 1116 // pops the hidden struct pointer, so we have to push it back. 1117 // This is common for Darwin/X86, Linux & Mingw32 targets. 1118 NumBytesForCalleeToPush = isSRet ? 4 : 0; 1119 } 1120 1121 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1122 Ops.clear(); 1123 Ops.push_back(Chain); 1124 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1125 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 1126 Ops.push_back(InFlag); 1127 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1128 InFlag = Chain.getValue(1); 1129 1130 // Handle result values, copying them out of physregs into vregs that we 1131 // return. 1132 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1133} 1134 1135 1136//===----------------------------------------------------------------------===// 1137// FastCall Calling Convention implementation 1138//===----------------------------------------------------------------------===// 1139// 1140// The X86 'fastcall' calling convention passes up to two integer arguments in 1141// registers (an appropriate portion of ECX/EDX), passes arguments in C order, 1142// and requires that the callee pop its arguments off the stack (allowing proper 1143// tail calls), and has the same return value conventions as C calling convs. 1144// 1145// This calling convention always arranges for the callee pop value to be 8n+4 1146// bytes, which is needed for tail recursion elimination and stack alignment 1147// reasons. 1148SDOperand 1149X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) { 1150 MachineFunction &MF = DAG.getMachineFunction(); 1151 MachineFrameInfo *MFI = MF.getFrameInfo(); 1152 SDOperand Root = Op.getOperand(0); 1153 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1154 1155 // Assign locations to all of the incoming arguments. 1156 SmallVector<CCValAssign, 16> ArgLocs; 1157 CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg, 1158 getTargetMachine(), ArgLocs); 1159 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall); 1160 1161 SmallVector<SDOperand, 8> ArgValues; 1162 unsigned LastVal = ~0U; 1163 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1164 CCValAssign &VA = ArgLocs[i]; 1165 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1166 // places. 1167 assert(VA.getValNo() != LastVal && 1168 "Don't support value assigned to multiple locs yet"); 1169 LastVal = VA.getValNo(); 1170 1171 if (VA.isRegLoc()) { 1172 MVT::ValueType RegVT = VA.getLocVT(); 1173 TargetRegisterClass *RC; 1174 if (RegVT == MVT::i32) 1175 RC = X86::GR32RegisterClass; 1176 else { 1177 assert(MVT::isVector(RegVT)); 1178 RC = X86::VR128RegisterClass; 1179 } 1180 1181 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1182 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1183 1184 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1185 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1186 // right size. 1187 if (VA.getLocInfo() == CCValAssign::SExt) 1188 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1189 DAG.getValueType(VA.getValVT())); 1190 else if (VA.getLocInfo() == CCValAssign::ZExt) 1191 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1192 DAG.getValueType(VA.getValVT())); 1193 1194 if (VA.getLocInfo() != CCValAssign::Full) 1195 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1196 1197 ArgValues.push_back(ArgValue); 1198 } else { 1199 assert(VA.isMemLoc()); 1200 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1201 } 1202 } 1203 1204 ArgValues.push_back(Root); 1205 1206 unsigned StackSize = CCInfo.getNextStackOffset(); 1207 1208 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 1209 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1210 // arguments and the arguments after the retaddr has been pushed are 1211 // aligned. 1212 if ((StackSize & 7) == 0) 1213 StackSize += 4; 1214 } 1215 1216 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1217 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 1218 BytesToPopOnReturn = StackSize; // Callee pops all stack arguments. 1219 BytesCallerReserves = 0; 1220 1221 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1222 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1223 1224 // Return the new list of results. 1225 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1226 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1227} 1228 1229SDOperand 1230X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1231 const SDOperand &StackPtr, 1232 const CCValAssign &VA, 1233 SDOperand Chain, 1234 SDOperand Arg) { 1235 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1236 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1237 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1238 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1239 if (Flags & ISD::ParamFlags::ByVal) { 1240 unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >> 1241 ISD::ParamFlags::ByValAlignOffs); 1242 1243 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1244 ISD::ParamFlags::ByValSizeOffs; 1245 1246 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1247 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1248 1249 return DAG.getNode(ISD::MEMCPY, MVT::Other, Chain, PtrOff, Arg, SizeNode, 1250 AlignNode); 1251 } else { 1252 return DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1253 } 1254} 1255 1256SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, 1257 unsigned CC) { 1258 SDOperand Chain = Op.getOperand(0); 1259 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1260 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1261 SDOperand Callee = Op.getOperand(4); 1262 1263 // Analyze operands of the call, assigning locations to each operand. 1264 SmallVector<CCValAssign, 16> ArgLocs; 1265 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1266 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall); 1267 1268 // Get a count of how many bytes are to be pushed on the stack. 1269 unsigned NumBytes = CCInfo.getNextStackOffset(); 1270 1271 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 1272 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1273 // arguments and the arguments after the retaddr has been pushed are 1274 // aligned. 1275 if ((NumBytes & 7) == 0) 1276 NumBytes += 4; 1277 } 1278 1279 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1280 1281 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1282 SmallVector<SDOperand, 8> MemOpChains; 1283 1284 SDOperand StackPtr; 1285 1286 // Walk the register/memloc assignments, inserting copies/loads. 1287 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1288 CCValAssign &VA = ArgLocs[i]; 1289 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1290 1291 // Promote the value if needed. 1292 switch (VA.getLocInfo()) { 1293 default: assert(0 && "Unknown loc info!"); 1294 case CCValAssign::Full: break; 1295 case CCValAssign::SExt: 1296 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1297 break; 1298 case CCValAssign::ZExt: 1299 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1300 break; 1301 case CCValAssign::AExt: 1302 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1303 break; 1304 } 1305 1306 if (VA.isRegLoc()) { 1307 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1308 } else { 1309 assert(VA.isMemLoc()); 1310 if (StackPtr.Val == 0) 1311 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1312 1313 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1314 Arg)); 1315 } 1316 } 1317 1318 if (!MemOpChains.empty()) 1319 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1320 &MemOpChains[0], MemOpChains.size()); 1321 1322 // Build a sequence of copy-to-reg nodes chained together with token chain 1323 // and flag operands which copy the outgoing args into registers. 1324 SDOperand InFlag; 1325 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1326 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1327 InFlag); 1328 InFlag = Chain.getValue(1); 1329 } 1330 1331 // If the callee is a GlobalAddress node (quite common, every direct call is) 1332 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1333 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1334 // We should use extra load for direct calls to dllimported functions in 1335 // non-JIT mode. 1336 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1337 getTargetMachine(), true)) 1338 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1339 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1340 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1341 1342 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1343 // GOT pointer. 1344 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1345 Subtarget->isPICStyleGOT()) { 1346 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1347 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1348 InFlag); 1349 InFlag = Chain.getValue(1); 1350 } 1351 1352 // Returns a chain & a flag for retval copy to use. 1353 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1354 SmallVector<SDOperand, 8> Ops; 1355 Ops.push_back(Chain); 1356 Ops.push_back(Callee); 1357 1358 // Add argument registers to the end of the list so that they are known live 1359 // into the call. 1360 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1361 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1362 RegsToPass[i].second.getValueType())); 1363 1364 // Add an implicit use GOT pointer in EBX. 1365 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1366 Subtarget->isPICStyleGOT()) 1367 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1368 1369 if (InFlag.Val) 1370 Ops.push_back(InFlag); 1371 1372 assert(isTailCall==false && "no tail call here"); 1373 Chain = DAG.getNode(X86ISD::CALL, 1374 NodeTys, &Ops[0], Ops.size()); 1375 InFlag = Chain.getValue(1); 1376 1377 // Returns a flag for retval copy to use. 1378 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1379 Ops.clear(); 1380 Ops.push_back(Chain); 1381 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1382 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1383 Ops.push_back(InFlag); 1384 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1385 InFlag = Chain.getValue(1); 1386 1387 // Handle result values, copying them out of physregs into vregs that we 1388 // return. 1389 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1390} 1391 1392//===----------------------------------------------------------------------===// 1393// Fast Calling Convention (tail call) implementation 1394//===----------------------------------------------------------------------===// 1395 1396// Like std call, callee cleans arguments, convention except that ECX is 1397// reserved for storing the tail called function address. Only 2 registers are 1398// free for argument passing (inreg). Tail call optimization is performed 1399// provided: 1400// * tailcallopt is enabled 1401// * caller/callee are fastcc 1402// * elf/pic is disabled OR 1403// * elf/pic enabled + callee is in module + callee has 1404// visibility protected or hidden 1405// To keep the stack aligned according to platform abi the function 1406// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1407// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1408// If a tail called function callee has more arguments than the caller the 1409// caller needs to make sure that there is room to move the RETADDR to. This is 1410// achieved by reserving an area the size of the argument delta right after the 1411// original REtADDR, but before the saved framepointer or the spilled registers 1412// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1413// stack layout: 1414// arg1 1415// arg2 1416// RETADDR 1417// [ new RETADDR 1418// move area ] 1419// (possible EBP) 1420// ESI 1421// EDI 1422// local1 .. 1423 1424/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1425/// for a 16 byte align requirement. 1426unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1427 SelectionDAG& DAG) { 1428 if (PerformTailCallOpt) { 1429 MachineFunction &MF = DAG.getMachineFunction(); 1430 const TargetMachine &TM = MF.getTarget(); 1431 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1432 unsigned StackAlignment = TFI.getStackAlignment(); 1433 uint64_t AlignMask = StackAlignment - 1; 1434 int64_t Offset = StackSize; 1435 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1436 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1437 // Number smaller than 12 so just add the difference. 1438 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1439 } else { 1440 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1441 Offset = ((~AlignMask) & Offset) + StackAlignment + 1442 (StackAlignment-SlotSize); 1443 } 1444 StackSize = Offset; 1445 } 1446 return StackSize; 1447} 1448 1449/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1450// following the call is a return. A function is eligible if caller/callee 1451// calling conventions match, currently only fastcc supports tail calls, and the 1452// function CALL is immediatly followed by a RET. 1453bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1454 SDOperand Ret, 1455 SelectionDAG& DAG) const { 1456 bool IsEligible = false; 1457 1458 // Check whether CALL node immediatly preceeds the RET node and whether the 1459 // return uses the result of the node or is a void return. 1460 if ((Ret.getNumOperands() == 1 && 1461 (Ret.getOperand(0)== SDOperand(Call.Val,1) || 1462 Ret.getOperand(0)== SDOperand(Call.Val,0))) || 1463 (Ret.getOperand(0)== SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1464 Ret.getOperand(1)== SDOperand(Call.Val,0))) { 1465 MachineFunction &MF = DAG.getMachineFunction(); 1466 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1467 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1468 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1469 SDOperand Callee = Call.getOperand(4); 1470 // On elf/pic %ebx needs to be livein. 1471 if(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1472 Subtarget->isPICStyleGOT()) { 1473 // Can only do local tail calls with PIC. 1474 GlobalValue * GV = 0; 1475 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 1476 if(G != 0 && 1477 (GV = G->getGlobal()) && 1478 (GV->hasHiddenVisibility() || GV->hasProtectedVisibility())) 1479 IsEligible=true; 1480 } else { 1481 IsEligible=true; 1482 } 1483 } 1484 } 1485 return IsEligible; 1486} 1487 1488SDOperand X86TargetLowering::LowerX86_TailCallTo(SDOperand Op, 1489 SelectionDAG &DAG, 1490 unsigned CC) { 1491 SDOperand Chain = Op.getOperand(0); 1492 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1493 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1494 SDOperand Callee = Op.getOperand(4); 1495 bool is64Bit = Subtarget->is64Bit(); 1496 1497 assert(isTailCall && PerformTailCallOpt && "Should only emit tail calls."); 1498 1499 // Analyze operands of the call, assigning locations to each operand. 1500 SmallVector<CCValAssign, 16> ArgLocs; 1501 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1502 if (is64Bit) 1503 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall); 1504 else 1505 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall); 1506 1507 1508 // Lower arguments at fp - stackoffset + fpdiff. 1509 MachineFunction &MF = DAG.getMachineFunction(); 1510 1511 unsigned NumBytesToBePushed = 1512 GetAlignedArgumentStackSize(CCInfo.getNextStackOffset(), DAG); 1513 1514 unsigned NumBytesCallerPushed = 1515 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1516 int FPDiff = NumBytesCallerPushed - NumBytesToBePushed; 1517 1518 // Set the delta of movement of the returnaddr stackslot. 1519 // But only set if delta is greater than previous delta. 1520 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1521 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1522 1523 // Adjust the ret address stack slot. 1524 if (FPDiff) { 1525 MVT::ValueType VT = is64Bit ? MVT::i64 : MVT::i32; 1526 SDOperand RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1527 RetAddrFrIdx = 1528 DAG.getLoad(VT, DAG.getEntryNode(),RetAddrFrIdx, NULL, 0); 1529 // Emit a store of the saved ret value to the new location. 1530 int SlotSize = is64Bit ? 8 : 4; 1531 int NewReturnAddrFI = 1532 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1533 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1534 Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0); 1535 } 1536 1537 Chain = DAG. 1538 getCALLSEQ_START(Chain, DAG.getConstant(NumBytesToBePushed, getPointerTy())); 1539 1540 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1541 SmallVector<SDOperand, 8> MemOpChains; 1542 SmallVector<SDOperand, 8> MemOpChains2; 1543 SDOperand FramePtr, StackPtr; 1544 SDOperand PtrOff; 1545 SDOperand FIN; 1546 int FI = 0; 1547 1548 // Walk the register/memloc assignments, inserting copies/loads. Lower 1549 // arguments first to the stack slot where they would normally - in case of a 1550 // normal function call - be. 1551 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1552 CCValAssign &VA = ArgLocs[i]; 1553 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1554 1555 // Promote the value if needed. 1556 switch (VA.getLocInfo()) { 1557 default: assert(0 && "Unknown loc info!"); 1558 case CCValAssign::Full: break; 1559 case CCValAssign::SExt: 1560 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1561 break; 1562 case CCValAssign::ZExt: 1563 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1564 break; 1565 case CCValAssign::AExt: 1566 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1567 break; 1568 } 1569 1570 if (VA.isRegLoc()) { 1571 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1572 } else { 1573 assert(VA.isMemLoc()); 1574 if (StackPtr.Val == 0) 1575 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1576 1577 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1578 Arg)); 1579 } 1580 } 1581 1582 if (!MemOpChains.empty()) 1583 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1584 &MemOpChains[0], MemOpChains.size()); 1585 1586 // Build a sequence of copy-to-reg nodes chained together with token chain 1587 // and flag operands which copy the outgoing args into registers. 1588 SDOperand InFlag; 1589 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1590 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1591 InFlag); 1592 InFlag = Chain.getValue(1); 1593 } 1594 InFlag = SDOperand(); 1595 // Copy from stack slots to stack slot of a tail called function. This needs 1596 // to be done because if we would lower the arguments directly to their real 1597 // stack slot we might end up overwriting each other. 1598 // TODO: To make this more efficient (sometimes saving a store/load) we could 1599 // analyse the arguments and emit this store/load/store sequence only for 1600 // arguments which would be overwritten otherwise. 1601 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1602 CCValAssign &VA = ArgLocs[i]; 1603 if (!VA.isRegLoc()) { 1604 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1605 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1606 1607 // Get source stack slot. 1608 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1609 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1610 // Create frame index. 1611 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1612 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1613 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1614 FIN = DAG.getFrameIndex(FI, MVT::i32); 1615 if (Flags & ISD::ParamFlags::ByVal) { 1616 // Copy relative to framepointer. 1617 unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >> 1618 ISD::ParamFlags::ByValAlignOffs); 1619 1620 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1621 ISD::ParamFlags::ByValSizeOffs; 1622 1623 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1624 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1625 // Copy relative to framepointer. 1626 MemOpChains2.push_back(DAG.getNode(ISD::MEMCPY, MVT::Other, Chain, FIN, 1627 PtrOff, SizeNode, AlignNode)); 1628 } else { 1629 SDOperand LoadedArg = DAG.getLoad(VA.getValVT(), Chain, PtrOff, NULL,0); 1630 // Store relative to framepointer. 1631 MemOpChains2.push_back(DAG.getStore(Chain, LoadedArg, FIN, NULL, 0)); 1632 } 1633 } 1634 } 1635 1636 if (!MemOpChains2.empty()) 1637 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1638 &MemOpChains2[0], MemOpChains.size()); 1639 1640 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1641 // GOT pointer. 1642 // Does not work with tail call since ebx is not restored correctly by 1643 // tailcaller. TODO: at least for x86 - verify for x86-64 1644 1645 // If the callee is a GlobalAddress node (quite common, every direct call is) 1646 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1647 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1648 // We should use extra load for direct calls to dllimported functions in 1649 // non-JIT mode. 1650 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1651 getTargetMachine(), true)) 1652 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1653 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1654 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1655 else { 1656 assert(Callee.getOpcode() == ISD::LOAD && 1657 "Function destination must be loaded into virtual register"); 1658 unsigned Opc = is64Bit ? X86::R9 : X86::ECX; 1659 1660 Chain = DAG.getCopyToReg(Chain, 1661 DAG.getRegister(Opc, getPointerTy()) , 1662 Callee,InFlag); 1663 Callee = DAG.getRegister(Opc, getPointerTy()); 1664 // Add register as live out. 1665 DAG.getMachineFunction().addLiveOut(Opc); 1666 } 1667 1668 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1669 SmallVector<SDOperand, 8> Ops; 1670 1671 Ops.push_back(Chain); 1672 Ops.push_back(DAG.getConstant(NumBytesToBePushed, getPointerTy())); 1673 Ops.push_back(DAG.getConstant(0, getPointerTy())); 1674 if (InFlag.Val) 1675 Ops.push_back(InFlag); 1676 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1677 InFlag = Chain.getValue(1); 1678 1679 // Returns a chain & a flag for retval copy to use. 1680 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1681 Ops.clear(); 1682 Ops.push_back(Chain); 1683 Ops.push_back(Callee); 1684 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1685 // Add argument registers to the end of the list so that they are known live 1686 // into the call. 1687 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1688 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1689 RegsToPass[i].second.getValueType())); 1690 if (InFlag.Val) 1691 Ops.push_back(InFlag); 1692 assert(InFlag.Val && 1693 "Flag must be set. Depend on flag being set in LowerRET"); 1694 Chain = DAG.getNode(X86ISD::TAILCALL, 1695 Op.Val->getVTList(), &Ops[0], Ops.size()); 1696 1697 return SDOperand(Chain.Val, Op.ResNo); 1698} 1699 1700//===----------------------------------------------------------------------===// 1701// X86-64 C Calling Convention implementation 1702//===----------------------------------------------------------------------===// 1703 1704SDOperand 1705X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) { 1706 MachineFunction &MF = DAG.getMachineFunction(); 1707 MachineFrameInfo *MFI = MF.getFrameInfo(); 1708 SDOperand Root = Op.getOperand(0); 1709 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1710 unsigned CC= MF.getFunction()->getCallingConv(); 1711 1712 static const unsigned GPR64ArgRegs[] = { 1713 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1714 }; 1715 static const unsigned XMMArgRegs[] = { 1716 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1717 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1718 }; 1719 1720 1721 // Assign locations to all of the incoming arguments. 1722 SmallVector<CCValAssign, 16> ArgLocs; 1723 CCState CCInfo(CC, isVarArg, 1724 getTargetMachine(), ArgLocs); 1725 if (CC == CallingConv::Fast && PerformTailCallOpt) 1726 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_TailCall); 1727 else 1728 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C); 1729 1730 SmallVector<SDOperand, 8> ArgValues; 1731 unsigned LastVal = ~0U; 1732 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1733 CCValAssign &VA = ArgLocs[i]; 1734 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1735 // places. 1736 assert(VA.getValNo() != LastVal && 1737 "Don't support value assigned to multiple locs yet"); 1738 LastVal = VA.getValNo(); 1739 1740 if (VA.isRegLoc()) { 1741 MVT::ValueType RegVT = VA.getLocVT(); 1742 TargetRegisterClass *RC; 1743 if (RegVT == MVT::i32) 1744 RC = X86::GR32RegisterClass; 1745 else if (RegVT == MVT::i64) 1746 RC = X86::GR64RegisterClass; 1747 else if (RegVT == MVT::f32) 1748 RC = X86::FR32RegisterClass; 1749 else if (RegVT == MVT::f64) 1750 RC = X86::FR64RegisterClass; 1751 else { 1752 assert(MVT::isVector(RegVT)); 1753 if (MVT::getSizeInBits(RegVT) == 64) { 1754 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1755 RegVT = MVT::i64; 1756 } else 1757 RC = X86::VR128RegisterClass; 1758 } 1759 1760 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1761 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1762 1763 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1764 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1765 // right size. 1766 if (VA.getLocInfo() == CCValAssign::SExt) 1767 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1768 DAG.getValueType(VA.getValVT())); 1769 else if (VA.getLocInfo() == CCValAssign::ZExt) 1770 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1771 DAG.getValueType(VA.getValVT())); 1772 1773 if (VA.getLocInfo() != CCValAssign::Full) 1774 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1775 1776 // Handle MMX values passed in GPRs. 1777 if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1778 MVT::getSizeInBits(RegVT) == 64) 1779 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1780 1781 ArgValues.push_back(ArgValue); 1782 } else { 1783 assert(VA.isMemLoc()); 1784 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1785 } 1786 } 1787 1788 unsigned StackSize = CCInfo.getNextStackOffset(); 1789 if (CC==CallingConv::Fast) 1790 StackSize =GetAlignedArgumentStackSize(StackSize, DAG); 1791 1792 // If the function takes variable number of arguments, make a frame index for 1793 // the start of the first vararg value... for expansion of llvm.va_start. 1794 if (isVarArg) { 1795 assert(CC!=CallingConv::Fast 1796 && "Var arg not supported with calling convention fastcc"); 1797 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1798 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1799 1800 // For X86-64, if there are vararg parameters that are passed via 1801 // registers, then we must store them to their spots on the stack so they 1802 // may be loaded by deferencing the result of va_next. 1803 VarArgsGPOffset = NumIntRegs * 8; 1804 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1805 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1806 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1807 1808 // Store the integer parameter registers. 1809 SmallVector<SDOperand, 8> MemOps; 1810 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1811 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1812 DAG.getConstant(VarArgsGPOffset, getPointerTy())); 1813 for (; NumIntRegs != 6; ++NumIntRegs) { 1814 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1815 X86::GR64RegisterClass); 1816 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1817 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1818 MemOps.push_back(Store); 1819 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1820 DAG.getConstant(8, getPointerTy())); 1821 } 1822 1823 // Now store the XMM (fp + vector) parameter registers. 1824 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1825 DAG.getConstant(VarArgsFPOffset, getPointerTy())); 1826 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1827 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1828 X86::VR128RegisterClass); 1829 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1830 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1831 MemOps.push_back(Store); 1832 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1833 DAG.getConstant(16, getPointerTy())); 1834 } 1835 if (!MemOps.empty()) 1836 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1837 &MemOps[0], MemOps.size()); 1838 } 1839 1840 ArgValues.push_back(Root); 1841 // Tail call convention (fastcc) needs callee pop. 1842 if (CC == CallingConv::Fast && PerformTailCallOpt) { 1843 BytesToPopOnReturn = StackSize; // Callee pops everything. 1844 BytesCallerReserves = 0; 1845 } else { 1846 BytesToPopOnReturn = 0; // Callee pops nothing. 1847 BytesCallerReserves = StackSize; 1848 } 1849 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1850 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1851 1852 // Return the new list of results. 1853 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1854 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1855} 1856 1857SDOperand 1858X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG, 1859 unsigned CC) { 1860 SDOperand Chain = Op.getOperand(0); 1861 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1862 SDOperand Callee = Op.getOperand(4); 1863 1864 // Analyze operands of the call, assigning locations to each operand. 1865 SmallVector<CCValAssign, 16> ArgLocs; 1866 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1867 if (CC==CallingConv::Fast && PerformTailCallOpt) 1868 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall); 1869 else 1870 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C); 1871 1872 // Get a count of how many bytes are to be pushed on the stack. 1873 unsigned NumBytes = CCInfo.getNextStackOffset(); 1874 if (CC == CallingConv::Fast) 1875 NumBytes = GetAlignedArgumentStackSize(NumBytes,DAG); 1876 1877 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1878 1879 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1880 SmallVector<SDOperand, 8> MemOpChains; 1881 1882 SDOperand StackPtr; 1883 1884 // Walk the register/memloc assignments, inserting copies/loads. 1885 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1886 CCValAssign &VA = ArgLocs[i]; 1887 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1888 1889 // Promote the value if needed. 1890 switch (VA.getLocInfo()) { 1891 default: assert(0 && "Unknown loc info!"); 1892 case CCValAssign::Full: break; 1893 case CCValAssign::SExt: 1894 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1895 break; 1896 case CCValAssign::ZExt: 1897 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1898 break; 1899 case CCValAssign::AExt: 1900 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1901 break; 1902 } 1903 1904 if (VA.isRegLoc()) { 1905 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1906 } else { 1907 assert(VA.isMemLoc()); 1908 if (StackPtr.Val == 0) 1909 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1910 1911 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1912 Arg)); 1913 } 1914 } 1915 1916 if (!MemOpChains.empty()) 1917 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1918 &MemOpChains[0], MemOpChains.size()); 1919 1920 // Build a sequence of copy-to-reg nodes chained together with token chain 1921 // and flag operands which copy the outgoing args into registers. 1922 SDOperand InFlag; 1923 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1924 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1925 InFlag); 1926 InFlag = Chain.getValue(1); 1927 } 1928 1929 if (isVarArg) { 1930 assert ( CallingConv::Fast != CC && 1931 "Var args not supported with calling convention fastcc"); 1932 1933 // From AMD64 ABI document: 1934 // For calls that may call functions that use varargs or stdargs 1935 // (prototype-less calls or calls to functions containing ellipsis (...) in 1936 // the declaration) %al is used as hidden argument to specify the number 1937 // of SSE registers used. The contents of %al do not need to match exactly 1938 // the number of registers, but must be an ubound on the number of SSE 1939 // registers used and is in the range 0 - 8 inclusive. 1940 1941 // Count the number of XMM registers allocated. 1942 static const unsigned XMMArgRegs[] = { 1943 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1944 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1945 }; 1946 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1947 1948 Chain = DAG.getCopyToReg(Chain, X86::AL, 1949 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1950 InFlag = Chain.getValue(1); 1951 } 1952 1953 // If the callee is a GlobalAddress node (quite common, every direct call is) 1954 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1955 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1956 // We should use extra load for direct calls to dllimported functions in 1957 // non-JIT mode. 1958 if (getTargetMachine().getCodeModel() != CodeModel::Large 1959 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1960 getTargetMachine(), true)) 1961 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1962 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1963 if (getTargetMachine().getCodeModel() != CodeModel::Large) 1964 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1965 1966 // Returns a chain & a flag for retval copy to use. 1967 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1968 SmallVector<SDOperand, 8> Ops; 1969 Ops.push_back(Chain); 1970 Ops.push_back(Callee); 1971 1972 // Add argument registers to the end of the list so that they are known live 1973 // into the call. 1974 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1975 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1976 RegsToPass[i].second.getValueType())); 1977 1978 if (InFlag.Val) 1979 Ops.push_back(InFlag); 1980 1981 Chain = DAG.getNode(X86ISD::CALL, 1982 NodeTys, &Ops[0], Ops.size()); 1983 InFlag = Chain.getValue(1); 1984 int NumBytesForCalleeToPush = 0; 1985 if (CC==CallingConv::Fast && PerformTailCallOpt) { 1986 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1987 } else { 1988 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1989 } 1990 // Returns a flag for retval copy to use. 1991 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1992 Ops.clear(); 1993 Ops.push_back(Chain); 1994 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1995 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 1996 Ops.push_back(InFlag); 1997 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1998 InFlag = Chain.getValue(1); 1999 2000 // Handle result values, copying them out of physregs into vregs that we 2001 // return. 2002 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 2003} 2004 2005 2006//===----------------------------------------------------------------------===// 2007// Other Lowering Hooks 2008//===----------------------------------------------------------------------===// 2009 2010 2011SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 2012 MachineFunction &MF = DAG.getMachineFunction(); 2013 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2014 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2015 2016 if (ReturnAddrIndex == 0) { 2017 // Set up a frame object for the return address. 2018 if (Subtarget->is64Bit()) 2019 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 2020 else 2021 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 2022 2023 FuncInfo->setRAIndex(ReturnAddrIndex); 2024 } 2025 2026 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2027} 2028 2029 2030 2031/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 2032/// specific condition code. It returns a false if it cannot do a direct 2033/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 2034/// needed. 2035static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2036 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 2037 SelectionDAG &DAG) { 2038 X86CC = X86::COND_INVALID; 2039 if (!isFP) { 2040 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2041 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2042 // X > -1 -> X == 0, jump !sign. 2043 RHS = DAG.getConstant(0, RHS.getValueType()); 2044 X86CC = X86::COND_NS; 2045 return true; 2046 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2047 // X < 0 -> X == 0, jump on sign. 2048 X86CC = X86::COND_S; 2049 return true; 2050 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 2051 // X < 1 -> X <= 0 2052 RHS = DAG.getConstant(0, RHS.getValueType()); 2053 X86CC = X86::COND_LE; 2054 return true; 2055 } 2056 } 2057 2058 switch (SetCCOpcode) { 2059 default: break; 2060 case ISD::SETEQ: X86CC = X86::COND_E; break; 2061 case ISD::SETGT: X86CC = X86::COND_G; break; 2062 case ISD::SETGE: X86CC = X86::COND_GE; break; 2063 case ISD::SETLT: X86CC = X86::COND_L; break; 2064 case ISD::SETLE: X86CC = X86::COND_LE; break; 2065 case ISD::SETNE: X86CC = X86::COND_NE; break; 2066 case ISD::SETULT: X86CC = X86::COND_B; break; 2067 case ISD::SETUGT: X86CC = X86::COND_A; break; 2068 case ISD::SETULE: X86CC = X86::COND_BE; break; 2069 case ISD::SETUGE: X86CC = X86::COND_AE; break; 2070 } 2071 } else { 2072 // On a floating point condition, the flags are set as follows: 2073 // ZF PF CF op 2074 // 0 | 0 | 0 | X > Y 2075 // 0 | 0 | 1 | X < Y 2076 // 1 | 0 | 0 | X == Y 2077 // 1 | 1 | 1 | unordered 2078 bool Flip = false; 2079 switch (SetCCOpcode) { 2080 default: break; 2081 case ISD::SETUEQ: 2082 case ISD::SETEQ: X86CC = X86::COND_E; break; 2083 case ISD::SETOLT: Flip = true; // Fallthrough 2084 case ISD::SETOGT: 2085 case ISD::SETGT: X86CC = X86::COND_A; break; 2086 case ISD::SETOLE: Flip = true; // Fallthrough 2087 case ISD::SETOGE: 2088 case ISD::SETGE: X86CC = X86::COND_AE; break; 2089 case ISD::SETUGT: Flip = true; // Fallthrough 2090 case ISD::SETULT: 2091 case ISD::SETLT: X86CC = X86::COND_B; break; 2092 case ISD::SETUGE: Flip = true; // Fallthrough 2093 case ISD::SETULE: 2094 case ISD::SETLE: X86CC = X86::COND_BE; break; 2095 case ISD::SETONE: 2096 case ISD::SETNE: X86CC = X86::COND_NE; break; 2097 case ISD::SETUO: X86CC = X86::COND_P; break; 2098 case ISD::SETO: X86CC = X86::COND_NP; break; 2099 } 2100 if (Flip) 2101 std::swap(LHS, RHS); 2102 } 2103 2104 return X86CC != X86::COND_INVALID; 2105} 2106 2107/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2108/// code. Current x86 isa includes the following FP cmov instructions: 2109/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2110static bool hasFPCMov(unsigned X86CC) { 2111 switch (X86CC) { 2112 default: 2113 return false; 2114 case X86::COND_B: 2115 case X86::COND_BE: 2116 case X86::COND_E: 2117 case X86::COND_P: 2118 case X86::COND_A: 2119 case X86::COND_AE: 2120 case X86::COND_NE: 2121 case X86::COND_NP: 2122 return true; 2123 } 2124} 2125 2126/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 2127/// true if Op is undef or if its value falls within the specified range (L, H]. 2128static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 2129 if (Op.getOpcode() == ISD::UNDEF) 2130 return true; 2131 2132 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 2133 return (Val >= Low && Val < Hi); 2134} 2135 2136/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 2137/// true if Op is undef or if its value equal to the specified value. 2138static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 2139 if (Op.getOpcode() == ISD::UNDEF) 2140 return true; 2141 return cast<ConstantSDNode>(Op)->getValue() == Val; 2142} 2143 2144/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 2145/// specifies a shuffle of elements that is suitable for input to PSHUFD. 2146bool X86::isPSHUFDMask(SDNode *N) { 2147 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2148 2149 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 2150 return false; 2151 2152 // Check if the value doesn't reference the second vector. 2153 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2154 SDOperand Arg = N->getOperand(i); 2155 if (Arg.getOpcode() == ISD::UNDEF) continue; 2156 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2157 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 2158 return false; 2159 } 2160 2161 return true; 2162} 2163 2164/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 2165/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 2166bool X86::isPSHUFHWMask(SDNode *N) { 2167 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2168 2169 if (N->getNumOperands() != 8) 2170 return false; 2171 2172 // Lower quadword copied in order. 2173 for (unsigned i = 0; i != 4; ++i) { 2174 SDOperand Arg = N->getOperand(i); 2175 if (Arg.getOpcode() == ISD::UNDEF) continue; 2176 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2177 if (cast<ConstantSDNode>(Arg)->getValue() != i) 2178 return false; 2179 } 2180 2181 // Upper quadword shuffled. 2182 for (unsigned i = 4; i != 8; ++i) { 2183 SDOperand Arg = N->getOperand(i); 2184 if (Arg.getOpcode() == ISD::UNDEF) continue; 2185 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2186 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2187 if (Val < 4 || Val > 7) 2188 return false; 2189 } 2190 2191 return true; 2192} 2193 2194/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2195/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2196bool X86::isPSHUFLWMask(SDNode *N) { 2197 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2198 2199 if (N->getNumOperands() != 8) 2200 return false; 2201 2202 // Upper quadword copied in order. 2203 for (unsigned i = 4; i != 8; ++i) 2204 if (!isUndefOrEqual(N->getOperand(i), i)) 2205 return false; 2206 2207 // Lower quadword shuffled. 2208 for (unsigned i = 0; i != 4; ++i) 2209 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2210 return false; 2211 2212 return true; 2213} 2214 2215/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2216/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2217static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2218 if (NumElems != 2 && NumElems != 4) return false; 2219 2220 unsigned Half = NumElems / 2; 2221 for (unsigned i = 0; i < Half; ++i) 2222 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2223 return false; 2224 for (unsigned i = Half; i < NumElems; ++i) 2225 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2226 return false; 2227 2228 return true; 2229} 2230 2231bool X86::isSHUFPMask(SDNode *N) { 2232 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2233 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2234} 2235 2236/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2237/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2238/// half elements to come from vector 1 (which would equal the dest.) and 2239/// the upper half to come from vector 2. 2240static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2241 if (NumOps != 2 && NumOps != 4) return false; 2242 2243 unsigned Half = NumOps / 2; 2244 for (unsigned i = 0; i < Half; ++i) 2245 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2246 return false; 2247 for (unsigned i = Half; i < NumOps; ++i) 2248 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2249 return false; 2250 return true; 2251} 2252 2253static bool isCommutedSHUFP(SDNode *N) { 2254 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2255 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2256} 2257 2258/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2259/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2260bool X86::isMOVHLPSMask(SDNode *N) { 2261 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2262 2263 if (N->getNumOperands() != 4) 2264 return false; 2265 2266 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2267 return isUndefOrEqual(N->getOperand(0), 6) && 2268 isUndefOrEqual(N->getOperand(1), 7) && 2269 isUndefOrEqual(N->getOperand(2), 2) && 2270 isUndefOrEqual(N->getOperand(3), 3); 2271} 2272 2273/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2274/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2275/// <2, 3, 2, 3> 2276bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2277 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2278 2279 if (N->getNumOperands() != 4) 2280 return false; 2281 2282 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2283 return isUndefOrEqual(N->getOperand(0), 2) && 2284 isUndefOrEqual(N->getOperand(1), 3) && 2285 isUndefOrEqual(N->getOperand(2), 2) && 2286 isUndefOrEqual(N->getOperand(3), 3); 2287} 2288 2289/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2290/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2291bool X86::isMOVLPMask(SDNode *N) { 2292 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2293 2294 unsigned NumElems = N->getNumOperands(); 2295 if (NumElems != 2 && NumElems != 4) 2296 return false; 2297 2298 for (unsigned i = 0; i < NumElems/2; ++i) 2299 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2300 return false; 2301 2302 for (unsigned i = NumElems/2; i < NumElems; ++i) 2303 if (!isUndefOrEqual(N->getOperand(i), i)) 2304 return false; 2305 2306 return true; 2307} 2308 2309/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2310/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2311/// and MOVLHPS. 2312bool X86::isMOVHPMask(SDNode *N) { 2313 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2314 2315 unsigned NumElems = N->getNumOperands(); 2316 if (NumElems != 2 && NumElems != 4) 2317 return false; 2318 2319 for (unsigned i = 0; i < NumElems/2; ++i) 2320 if (!isUndefOrEqual(N->getOperand(i), i)) 2321 return false; 2322 2323 for (unsigned i = 0; i < NumElems/2; ++i) { 2324 SDOperand Arg = N->getOperand(i + NumElems/2); 2325 if (!isUndefOrEqual(Arg, i + NumElems)) 2326 return false; 2327 } 2328 2329 return true; 2330} 2331 2332/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2333/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2334bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2335 bool V2IsSplat = false) { 2336 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2337 return false; 2338 2339 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2340 SDOperand BitI = Elts[i]; 2341 SDOperand BitI1 = Elts[i+1]; 2342 if (!isUndefOrEqual(BitI, j)) 2343 return false; 2344 if (V2IsSplat) { 2345 if (isUndefOrEqual(BitI1, NumElts)) 2346 return false; 2347 } else { 2348 if (!isUndefOrEqual(BitI1, j + NumElts)) 2349 return false; 2350 } 2351 } 2352 2353 return true; 2354} 2355 2356bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2357 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2358 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2359} 2360 2361/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2362/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2363bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2364 bool V2IsSplat = false) { 2365 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2366 return false; 2367 2368 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2369 SDOperand BitI = Elts[i]; 2370 SDOperand BitI1 = Elts[i+1]; 2371 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2372 return false; 2373 if (V2IsSplat) { 2374 if (isUndefOrEqual(BitI1, NumElts)) 2375 return false; 2376 } else { 2377 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2378 return false; 2379 } 2380 } 2381 2382 return true; 2383} 2384 2385bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2386 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2387 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2388} 2389 2390/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2391/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2392/// <0, 0, 1, 1> 2393bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2394 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2395 2396 unsigned NumElems = N->getNumOperands(); 2397 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2398 return false; 2399 2400 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2401 SDOperand BitI = N->getOperand(i); 2402 SDOperand BitI1 = N->getOperand(i+1); 2403 2404 if (!isUndefOrEqual(BitI, j)) 2405 return false; 2406 if (!isUndefOrEqual(BitI1, j)) 2407 return false; 2408 } 2409 2410 return true; 2411} 2412 2413/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2414/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2415/// <2, 2, 3, 3> 2416bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2417 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2418 2419 unsigned NumElems = N->getNumOperands(); 2420 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2421 return false; 2422 2423 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2424 SDOperand BitI = N->getOperand(i); 2425 SDOperand BitI1 = N->getOperand(i + 1); 2426 2427 if (!isUndefOrEqual(BitI, j)) 2428 return false; 2429 if (!isUndefOrEqual(BitI1, j)) 2430 return false; 2431 } 2432 2433 return true; 2434} 2435 2436/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2437/// specifies a shuffle of elements that is suitable for input to MOVSS, 2438/// MOVSD, and MOVD, i.e. setting the lowest element. 2439static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2440 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2441 return false; 2442 2443 if (!isUndefOrEqual(Elts[0], NumElts)) 2444 return false; 2445 2446 for (unsigned i = 1; i < NumElts; ++i) { 2447 if (!isUndefOrEqual(Elts[i], i)) 2448 return false; 2449 } 2450 2451 return true; 2452} 2453 2454bool X86::isMOVLMask(SDNode *N) { 2455 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2456 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2457} 2458 2459/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2460/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2461/// element of vector 2 and the other elements to come from vector 1 in order. 2462static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2463 bool V2IsSplat = false, 2464 bool V2IsUndef = false) { 2465 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2466 return false; 2467 2468 if (!isUndefOrEqual(Ops[0], 0)) 2469 return false; 2470 2471 for (unsigned i = 1; i < NumOps; ++i) { 2472 SDOperand Arg = Ops[i]; 2473 if (!(isUndefOrEqual(Arg, i+NumOps) || 2474 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2475 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2476 return false; 2477 } 2478 2479 return true; 2480} 2481 2482static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2483 bool V2IsUndef = false) { 2484 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2485 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2486 V2IsSplat, V2IsUndef); 2487} 2488 2489/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2490/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2491bool X86::isMOVSHDUPMask(SDNode *N) { 2492 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2493 2494 if (N->getNumOperands() != 4) 2495 return false; 2496 2497 // Expect 1, 1, 3, 3 2498 for (unsigned i = 0; i < 2; ++i) { 2499 SDOperand Arg = N->getOperand(i); 2500 if (Arg.getOpcode() == ISD::UNDEF) continue; 2501 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2502 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2503 if (Val != 1) return false; 2504 } 2505 2506 bool HasHi = false; 2507 for (unsigned i = 2; i < 4; ++i) { 2508 SDOperand Arg = N->getOperand(i); 2509 if (Arg.getOpcode() == ISD::UNDEF) continue; 2510 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2511 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2512 if (Val != 3) return false; 2513 HasHi = true; 2514 } 2515 2516 // Don't use movshdup if it can be done with a shufps. 2517 return HasHi; 2518} 2519 2520/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2521/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2522bool X86::isMOVSLDUPMask(SDNode *N) { 2523 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2524 2525 if (N->getNumOperands() != 4) 2526 return false; 2527 2528 // Expect 0, 0, 2, 2 2529 for (unsigned i = 0; i < 2; ++i) { 2530 SDOperand Arg = N->getOperand(i); 2531 if (Arg.getOpcode() == ISD::UNDEF) continue; 2532 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2533 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2534 if (Val != 0) return false; 2535 } 2536 2537 bool HasHi = false; 2538 for (unsigned i = 2; i < 4; ++i) { 2539 SDOperand Arg = N->getOperand(i); 2540 if (Arg.getOpcode() == ISD::UNDEF) continue; 2541 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2542 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2543 if (Val != 2) return false; 2544 HasHi = true; 2545 } 2546 2547 // Don't use movshdup if it can be done with a shufps. 2548 return HasHi; 2549} 2550 2551/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2552/// specifies a identity operation on the LHS or RHS. 2553static bool isIdentityMask(SDNode *N, bool RHS = false) { 2554 unsigned NumElems = N->getNumOperands(); 2555 for (unsigned i = 0; i < NumElems; ++i) 2556 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2557 return false; 2558 return true; 2559} 2560 2561/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2562/// a splat of a single element. 2563static bool isSplatMask(SDNode *N) { 2564 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2565 2566 // This is a splat operation if each element of the permute is the same, and 2567 // if the value doesn't reference the second vector. 2568 unsigned NumElems = N->getNumOperands(); 2569 SDOperand ElementBase; 2570 unsigned i = 0; 2571 for (; i != NumElems; ++i) { 2572 SDOperand Elt = N->getOperand(i); 2573 if (isa<ConstantSDNode>(Elt)) { 2574 ElementBase = Elt; 2575 break; 2576 } 2577 } 2578 2579 if (!ElementBase.Val) 2580 return false; 2581 2582 for (; i != NumElems; ++i) { 2583 SDOperand Arg = N->getOperand(i); 2584 if (Arg.getOpcode() == ISD::UNDEF) continue; 2585 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2586 if (Arg != ElementBase) return false; 2587 } 2588 2589 // Make sure it is a splat of the first vector operand. 2590 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2591} 2592 2593/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2594/// a splat of a single element and it's a 2 or 4 element mask. 2595bool X86::isSplatMask(SDNode *N) { 2596 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2597 2598 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2599 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2600 return false; 2601 return ::isSplatMask(N); 2602} 2603 2604/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2605/// specifies a splat of zero element. 2606bool X86::isSplatLoMask(SDNode *N) { 2607 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2608 2609 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2610 if (!isUndefOrEqual(N->getOperand(i), 0)) 2611 return false; 2612 return true; 2613} 2614 2615/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2616/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2617/// instructions. 2618unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2619 unsigned NumOperands = N->getNumOperands(); 2620 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2621 unsigned Mask = 0; 2622 for (unsigned i = 0; i < NumOperands; ++i) { 2623 unsigned Val = 0; 2624 SDOperand Arg = N->getOperand(NumOperands-i-1); 2625 if (Arg.getOpcode() != ISD::UNDEF) 2626 Val = cast<ConstantSDNode>(Arg)->getValue(); 2627 if (Val >= NumOperands) Val -= NumOperands; 2628 Mask |= Val; 2629 if (i != NumOperands - 1) 2630 Mask <<= Shift; 2631 } 2632 2633 return Mask; 2634} 2635 2636/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2637/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2638/// instructions. 2639unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2640 unsigned Mask = 0; 2641 // 8 nodes, but we only care about the last 4. 2642 for (unsigned i = 7; i >= 4; --i) { 2643 unsigned Val = 0; 2644 SDOperand Arg = N->getOperand(i); 2645 if (Arg.getOpcode() != ISD::UNDEF) 2646 Val = cast<ConstantSDNode>(Arg)->getValue(); 2647 Mask |= (Val - 4); 2648 if (i != 4) 2649 Mask <<= 2; 2650 } 2651 2652 return Mask; 2653} 2654 2655/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2656/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2657/// instructions. 2658unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2659 unsigned Mask = 0; 2660 // 8 nodes, but we only care about the first 4. 2661 for (int i = 3; i >= 0; --i) { 2662 unsigned Val = 0; 2663 SDOperand Arg = N->getOperand(i); 2664 if (Arg.getOpcode() != ISD::UNDEF) 2665 Val = cast<ConstantSDNode>(Arg)->getValue(); 2666 Mask |= Val; 2667 if (i != 0) 2668 Mask <<= 2; 2669 } 2670 2671 return Mask; 2672} 2673 2674/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2675/// specifies a 8 element shuffle that can be broken into a pair of 2676/// PSHUFHW and PSHUFLW. 2677static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2678 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2679 2680 if (N->getNumOperands() != 8) 2681 return false; 2682 2683 // Lower quadword shuffled. 2684 for (unsigned i = 0; i != 4; ++i) { 2685 SDOperand Arg = N->getOperand(i); 2686 if (Arg.getOpcode() == ISD::UNDEF) continue; 2687 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2688 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2689 if (Val > 4) 2690 return false; 2691 } 2692 2693 // Upper quadword shuffled. 2694 for (unsigned i = 4; i != 8; ++i) { 2695 SDOperand Arg = N->getOperand(i); 2696 if (Arg.getOpcode() == ISD::UNDEF) continue; 2697 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2698 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2699 if (Val < 4 || Val > 7) 2700 return false; 2701 } 2702 2703 return true; 2704} 2705 2706/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as 2707/// values in ther permute mask. 2708static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2709 SDOperand &V2, SDOperand &Mask, 2710 SelectionDAG &DAG) { 2711 MVT::ValueType VT = Op.getValueType(); 2712 MVT::ValueType MaskVT = Mask.getValueType(); 2713 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2714 unsigned NumElems = Mask.getNumOperands(); 2715 SmallVector<SDOperand, 8> MaskVec; 2716 2717 for (unsigned i = 0; i != NumElems; ++i) { 2718 SDOperand Arg = Mask.getOperand(i); 2719 if (Arg.getOpcode() == ISD::UNDEF) { 2720 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2721 continue; 2722 } 2723 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2724 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2725 if (Val < NumElems) 2726 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2727 else 2728 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2729 } 2730 2731 std::swap(V1, V2); 2732 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2733 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2734} 2735 2736/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2737/// match movhlps. The lower half elements should come from upper half of 2738/// V1 (and in order), and the upper half elements should come from the upper 2739/// half of V2 (and in order). 2740static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2741 unsigned NumElems = Mask->getNumOperands(); 2742 if (NumElems != 4) 2743 return false; 2744 for (unsigned i = 0, e = 2; i != e; ++i) 2745 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2746 return false; 2747 for (unsigned i = 2; i != 4; ++i) 2748 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2749 return false; 2750 return true; 2751} 2752 2753/// isScalarLoadToVector - Returns true if the node is a scalar load that 2754/// is promoted to a vector. 2755static inline bool isScalarLoadToVector(SDNode *N) { 2756 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2757 N = N->getOperand(0).Val; 2758 return ISD::isNON_EXTLoad(N); 2759 } 2760 return false; 2761} 2762 2763/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2764/// match movlp{s|d}. The lower half elements should come from lower half of 2765/// V1 (and in order), and the upper half elements should come from the upper 2766/// half of V2 (and in order). And since V1 will become the source of the 2767/// MOVLP, it must be either a vector load or a scalar load to vector. 2768static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2769 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2770 return false; 2771 // Is V2 is a vector load, don't do this transformation. We will try to use 2772 // load folding shufps op. 2773 if (ISD::isNON_EXTLoad(V2)) 2774 return false; 2775 2776 unsigned NumElems = Mask->getNumOperands(); 2777 if (NumElems != 2 && NumElems != 4) 2778 return false; 2779 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2780 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2781 return false; 2782 for (unsigned i = NumElems/2; i != NumElems; ++i) 2783 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2784 return false; 2785 return true; 2786} 2787 2788/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2789/// all the same. 2790static bool isSplatVector(SDNode *N) { 2791 if (N->getOpcode() != ISD::BUILD_VECTOR) 2792 return false; 2793 2794 SDOperand SplatValue = N->getOperand(0); 2795 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2796 if (N->getOperand(i) != SplatValue) 2797 return false; 2798 return true; 2799} 2800 2801/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2802/// to an undef. 2803static bool isUndefShuffle(SDNode *N) { 2804 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2805 return false; 2806 2807 SDOperand V1 = N->getOperand(0); 2808 SDOperand V2 = N->getOperand(1); 2809 SDOperand Mask = N->getOperand(2); 2810 unsigned NumElems = Mask.getNumOperands(); 2811 for (unsigned i = 0; i != NumElems; ++i) { 2812 SDOperand Arg = Mask.getOperand(i); 2813 if (Arg.getOpcode() != ISD::UNDEF) { 2814 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2815 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2816 return false; 2817 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2818 return false; 2819 } 2820 } 2821 return true; 2822} 2823 2824/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2825/// constant +0.0. 2826static inline bool isZeroNode(SDOperand Elt) { 2827 return ((isa<ConstantSDNode>(Elt) && 2828 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2829 (isa<ConstantFPSDNode>(Elt) && 2830 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2831} 2832 2833/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2834/// to an zero vector. 2835static bool isZeroShuffle(SDNode *N) { 2836 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2837 return false; 2838 2839 SDOperand V1 = N->getOperand(0); 2840 SDOperand V2 = N->getOperand(1); 2841 SDOperand Mask = N->getOperand(2); 2842 unsigned NumElems = Mask.getNumOperands(); 2843 for (unsigned i = 0; i != NumElems; ++i) { 2844 SDOperand Arg = Mask.getOperand(i); 2845 if (Arg.getOpcode() != ISD::UNDEF) { 2846 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2847 if (Idx < NumElems) { 2848 unsigned Opc = V1.Val->getOpcode(); 2849 if (Opc == ISD::UNDEF) 2850 continue; 2851 if (Opc != ISD::BUILD_VECTOR || 2852 !isZeroNode(V1.Val->getOperand(Idx))) 2853 return false; 2854 } else if (Idx >= NumElems) { 2855 unsigned Opc = V2.Val->getOpcode(); 2856 if (Opc == ISD::UNDEF) 2857 continue; 2858 if (Opc != ISD::BUILD_VECTOR || 2859 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2860 return false; 2861 } 2862 } 2863 } 2864 return true; 2865} 2866 2867/// getZeroVector - Returns a vector of specified type with all zero elements. 2868/// 2869static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2870 assert(MVT::isVector(VT) && "Expected a vector type"); 2871 unsigned NumElems = MVT::getVectorNumElements(VT); 2872 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2873 bool isFP = MVT::isFloatingPoint(EVT); 2874 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT); 2875 SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero); 2876 return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size()); 2877} 2878 2879/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2880/// that point to V2 points to its first element. 2881static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2882 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2883 2884 bool Changed = false; 2885 SmallVector<SDOperand, 8> MaskVec; 2886 unsigned NumElems = Mask.getNumOperands(); 2887 for (unsigned i = 0; i != NumElems; ++i) { 2888 SDOperand Arg = Mask.getOperand(i); 2889 if (Arg.getOpcode() != ISD::UNDEF) { 2890 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2891 if (Val > NumElems) { 2892 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2893 Changed = true; 2894 } 2895 } 2896 MaskVec.push_back(Arg); 2897 } 2898 2899 if (Changed) 2900 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2901 &MaskVec[0], MaskVec.size()); 2902 return Mask; 2903} 2904 2905/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2906/// operation of specified width. 2907static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2908 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2909 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2910 2911 SmallVector<SDOperand, 8> MaskVec; 2912 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2913 for (unsigned i = 1; i != NumElems; ++i) 2914 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2915 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2916} 2917 2918/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2919/// of specified width. 2920static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2921 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2922 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2923 SmallVector<SDOperand, 8> MaskVec; 2924 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2925 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2926 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2927 } 2928 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2929} 2930 2931/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2932/// of specified width. 2933static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2934 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2935 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2936 unsigned Half = NumElems/2; 2937 SmallVector<SDOperand, 8> MaskVec; 2938 for (unsigned i = 0; i != Half; ++i) { 2939 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2940 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2941 } 2942 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2943} 2944 2945/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2946/// 2947static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2948 SDOperand V1 = Op.getOperand(0); 2949 SDOperand Mask = Op.getOperand(2); 2950 MVT::ValueType VT = Op.getValueType(); 2951 unsigned NumElems = Mask.getNumOperands(); 2952 Mask = getUnpacklMask(NumElems, DAG); 2953 while (NumElems != 4) { 2954 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2955 NumElems >>= 1; 2956 } 2957 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2958 2959 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 2960 Mask = getZeroVector(MaskVT, DAG); 2961 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2962 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2963 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2964} 2965 2966/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2967/// vector of zero or undef vector. 2968static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2969 unsigned NumElems, unsigned Idx, 2970 bool isZero, SelectionDAG &DAG) { 2971 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2972 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2973 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2974 SDOperand Zero = DAG.getConstant(0, EVT); 2975 SmallVector<SDOperand, 8> MaskVec(NumElems, Zero); 2976 MaskVec[Idx] = DAG.getConstant(NumElems, EVT); 2977 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2978 &MaskVec[0], MaskVec.size()); 2979 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2980} 2981 2982/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2983/// 2984static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2985 unsigned NumNonZero, unsigned NumZero, 2986 SelectionDAG &DAG, TargetLowering &TLI) { 2987 if (NumNonZero > 8) 2988 return SDOperand(); 2989 2990 SDOperand V(0, 0); 2991 bool First = true; 2992 for (unsigned i = 0; i < 16; ++i) { 2993 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2994 if (ThisIsNonZero && First) { 2995 if (NumZero) 2996 V = getZeroVector(MVT::v8i16, DAG); 2997 else 2998 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2999 First = false; 3000 } 3001 3002 if ((i & 1) != 0) { 3003 SDOperand ThisElt(0, 0), LastElt(0, 0); 3004 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 3005 if (LastIsNonZero) { 3006 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 3007 } 3008 if (ThisIsNonZero) { 3009 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 3010 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 3011 ThisElt, DAG.getConstant(8, MVT::i8)); 3012 if (LastIsNonZero) 3013 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 3014 } else 3015 ThisElt = LastElt; 3016 3017 if (ThisElt.Val) 3018 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 3019 DAG.getConstant(i/2, TLI.getPointerTy())); 3020 } 3021 } 3022 3023 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 3024} 3025 3026/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 3027/// 3028static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 3029 unsigned NumNonZero, unsigned NumZero, 3030 SelectionDAG &DAG, TargetLowering &TLI) { 3031 if (NumNonZero > 4) 3032 return SDOperand(); 3033 3034 SDOperand V(0, 0); 3035 bool First = true; 3036 for (unsigned i = 0; i < 8; ++i) { 3037 bool isNonZero = (NonZeros & (1 << i)) != 0; 3038 if (isNonZero) { 3039 if (First) { 3040 if (NumZero) 3041 V = getZeroVector(MVT::v8i16, DAG); 3042 else 3043 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 3044 First = false; 3045 } 3046 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 3047 DAG.getConstant(i, TLI.getPointerTy())); 3048 } 3049 } 3050 3051 return V; 3052} 3053 3054SDOperand 3055X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3056 // All zero's are handled with pxor. 3057 if (ISD::isBuildVectorAllZeros(Op.Val)) 3058 return Op; 3059 3060 // All one's are handled with pcmpeqd. 3061 if (ISD::isBuildVectorAllOnes(Op.Val)) 3062 return Op; 3063 3064 MVT::ValueType VT = Op.getValueType(); 3065 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3066 unsigned EVTBits = MVT::getSizeInBits(EVT); 3067 3068 unsigned NumElems = Op.getNumOperands(); 3069 unsigned NumZero = 0; 3070 unsigned NumNonZero = 0; 3071 unsigned NonZeros = 0; 3072 unsigned NumNonZeroImms = 0; 3073 std::set<SDOperand> Values; 3074 for (unsigned i = 0; i < NumElems; ++i) { 3075 SDOperand Elt = Op.getOperand(i); 3076 if (Elt.getOpcode() != ISD::UNDEF) { 3077 Values.insert(Elt); 3078 if (isZeroNode(Elt)) 3079 NumZero++; 3080 else { 3081 NonZeros |= (1 << i); 3082 NumNonZero++; 3083 if (Elt.getOpcode() == ISD::Constant || 3084 Elt.getOpcode() == ISD::ConstantFP) 3085 NumNonZeroImms++; 3086 } 3087 } 3088 } 3089 3090 if (NumNonZero == 0) { 3091 if (NumZero == 0) 3092 // All undef vector. Return an UNDEF. 3093 return DAG.getNode(ISD::UNDEF, VT); 3094 else 3095 // A mix of zero and undef. Return a zero vector. 3096 return getZeroVector(VT, DAG); 3097 } 3098 3099 // Splat is obviously ok. Let legalizer expand it to a shuffle. 3100 if (Values.size() == 1) 3101 return SDOperand(); 3102 3103 // Special case for single non-zero element. 3104 if (NumNonZero == 1) { 3105 unsigned Idx = CountTrailingZeros_32(NonZeros); 3106 SDOperand Item = Op.getOperand(Idx); 3107 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 3108 if (Idx == 0) 3109 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 3110 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 3111 NumZero > 0, DAG); 3112 3113 if (EVTBits == 32) { 3114 // Turn it into a shuffle of zero and zero-extended scalar to vector. 3115 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 3116 DAG); 3117 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3118 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3119 SmallVector<SDOperand, 8> MaskVec; 3120 for (unsigned i = 0; i < NumElems; i++) 3121 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 3122 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3123 &MaskVec[0], MaskVec.size()); 3124 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 3125 DAG.getNode(ISD::UNDEF, VT), Mask); 3126 } 3127 } 3128 3129 // A vector full of immediates; various special cases are already 3130 // handled, so this is best done with a single constant-pool load. 3131 if (NumNonZero == NumNonZeroImms) 3132 return SDOperand(); 3133 3134 // Let legalizer expand 2-wide build_vectors. 3135 if (EVTBits == 64) 3136 return SDOperand(); 3137 3138 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3139 if (EVTBits == 8 && NumElems == 16) { 3140 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3141 *this); 3142 if (V.Val) return V; 3143 } 3144 3145 if (EVTBits == 16 && NumElems == 8) { 3146 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3147 *this); 3148 if (V.Val) return V; 3149 } 3150 3151 // If element VT is == 32 bits, turn it into a number of shuffles. 3152 SmallVector<SDOperand, 8> V; 3153 V.resize(NumElems); 3154 if (NumElems == 4 && NumZero > 0) { 3155 for (unsigned i = 0; i < 4; ++i) { 3156 bool isZero = !(NonZeros & (1 << i)); 3157 if (isZero) 3158 V[i] = getZeroVector(VT, DAG); 3159 else 3160 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3161 } 3162 3163 for (unsigned i = 0; i < 2; ++i) { 3164 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3165 default: break; 3166 case 0: 3167 V[i] = V[i*2]; // Must be a zero vector. 3168 break; 3169 case 1: 3170 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3171 getMOVLMask(NumElems, DAG)); 3172 break; 3173 case 2: 3174 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3175 getMOVLMask(NumElems, DAG)); 3176 break; 3177 case 3: 3178 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3179 getUnpacklMask(NumElems, DAG)); 3180 break; 3181 } 3182 } 3183 3184 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3185 // clears the upper bits. 3186 // FIXME: we can do the same for v4f32 case when we know both parts of 3187 // the lower half come from scalar_to_vector (loadf32). We should do 3188 // that in post legalizer dag combiner with target specific hooks. 3189 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3190 return V[0]; 3191 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3192 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3193 SmallVector<SDOperand, 8> MaskVec; 3194 bool Reverse = (NonZeros & 0x3) == 2; 3195 for (unsigned i = 0; i < 2; ++i) 3196 if (Reverse) 3197 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3198 else 3199 MaskVec.push_back(DAG.getConstant(i, EVT)); 3200 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3201 for (unsigned i = 0; i < 2; ++i) 3202 if (Reverse) 3203 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3204 else 3205 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3206 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3207 &MaskVec[0], MaskVec.size()); 3208 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3209 } 3210 3211 if (Values.size() > 2) { 3212 // Expand into a number of unpckl*. 3213 // e.g. for v4f32 3214 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3215 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3216 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3217 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3218 for (unsigned i = 0; i < NumElems; ++i) 3219 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3220 NumElems >>= 1; 3221 while (NumElems != 0) { 3222 for (unsigned i = 0; i < NumElems; ++i) 3223 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3224 UnpckMask); 3225 NumElems >>= 1; 3226 } 3227 return V[0]; 3228 } 3229 3230 return SDOperand(); 3231} 3232 3233SDOperand 3234X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3235 SDOperand V1 = Op.getOperand(0); 3236 SDOperand V2 = Op.getOperand(1); 3237 SDOperand PermMask = Op.getOperand(2); 3238 MVT::ValueType VT = Op.getValueType(); 3239 unsigned NumElems = PermMask.getNumOperands(); 3240 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3241 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3242 bool V1IsSplat = false; 3243 bool V2IsSplat = false; 3244 3245 if (isUndefShuffle(Op.Val)) 3246 return DAG.getNode(ISD::UNDEF, VT); 3247 3248 if (isZeroShuffle(Op.Val)) 3249 return getZeroVector(VT, DAG); 3250 3251 if (isIdentityMask(PermMask.Val)) 3252 return V1; 3253 else if (isIdentityMask(PermMask.Val, true)) 3254 return V2; 3255 3256 if (isSplatMask(PermMask.Val)) { 3257 if (NumElems <= 4) return Op; 3258 // Promote it to a v4i32 splat. 3259 return PromoteSplat(Op, DAG); 3260 } 3261 3262 if (X86::isMOVLMask(PermMask.Val)) 3263 return (V1IsUndef) ? V2 : Op; 3264 3265 if (X86::isMOVSHDUPMask(PermMask.Val) || 3266 X86::isMOVSLDUPMask(PermMask.Val) || 3267 X86::isMOVHLPSMask(PermMask.Val) || 3268 X86::isMOVHPMask(PermMask.Val) || 3269 X86::isMOVLPMask(PermMask.Val)) 3270 return Op; 3271 3272 if (ShouldXformToMOVHLPS(PermMask.Val) || 3273 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3274 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3275 3276 bool Commuted = false; 3277 V1IsSplat = isSplatVector(V1.Val); 3278 V2IsSplat = isSplatVector(V2.Val); 3279 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3280 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3281 std::swap(V1IsSplat, V2IsSplat); 3282 std::swap(V1IsUndef, V2IsUndef); 3283 Commuted = true; 3284 } 3285 3286 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3287 if (V2IsUndef) return V1; 3288 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3289 if (V2IsSplat) { 3290 // V2 is a splat, so the mask may be malformed. That is, it may point 3291 // to any V2 element. The instruction selectior won't like this. Get 3292 // a corrected mask and commute to form a proper MOVS{S|D}. 3293 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3294 if (NewMask.Val != PermMask.Val) 3295 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3296 } 3297 return Op; 3298 } 3299 3300 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3301 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3302 X86::isUNPCKLMask(PermMask.Val) || 3303 X86::isUNPCKHMask(PermMask.Val)) 3304 return Op; 3305 3306 if (V2IsSplat) { 3307 // Normalize mask so all entries that point to V2 points to its first 3308 // element then try to match unpck{h|l} again. If match, return a 3309 // new vector_shuffle with the corrected mask. 3310 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3311 if (NewMask.Val != PermMask.Val) { 3312 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3313 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3314 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3315 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3316 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3317 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3318 } 3319 } 3320 } 3321 3322 // Normalize the node to match x86 shuffle ops if needed 3323 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3324 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3325 3326 if (Commuted) { 3327 // Commute is back and try unpck* again. 3328 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3329 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3330 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3331 X86::isUNPCKLMask(PermMask.Val) || 3332 X86::isUNPCKHMask(PermMask.Val)) 3333 return Op; 3334 } 3335 3336 // If VT is integer, try PSHUF* first, then SHUFP*. 3337 if (MVT::isInteger(VT)) { 3338 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3339 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3340 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3341 X86::isPSHUFDMask(PermMask.Val)) || 3342 X86::isPSHUFHWMask(PermMask.Val) || 3343 X86::isPSHUFLWMask(PermMask.Val)) { 3344 if (V2.getOpcode() != ISD::UNDEF) 3345 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3346 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3347 return Op; 3348 } 3349 3350 if (X86::isSHUFPMask(PermMask.Val) && 3351 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3352 return Op; 3353 3354 // Handle v8i16 shuffle high / low shuffle node pair. 3355 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) { 3356 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3357 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 3358 SmallVector<SDOperand, 8> MaskVec; 3359 for (unsigned i = 0; i != 4; ++i) 3360 MaskVec.push_back(PermMask.getOperand(i)); 3361 for (unsigned i = 4; i != 8; ++i) 3362 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3363 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3364 &MaskVec[0], MaskVec.size()); 3365 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3366 MaskVec.clear(); 3367 for (unsigned i = 0; i != 4; ++i) 3368 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3369 for (unsigned i = 4; i != 8; ++i) 3370 MaskVec.push_back(PermMask.getOperand(i)); 3371 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size()); 3372 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 3373 } 3374 } else { 3375 // Floating point cases in the other order. 3376 if (X86::isSHUFPMask(PermMask.Val)) 3377 return Op; 3378 if (X86::isPSHUFDMask(PermMask.Val) || 3379 X86::isPSHUFHWMask(PermMask.Val) || 3380 X86::isPSHUFLWMask(PermMask.Val)) { 3381 if (V2.getOpcode() != ISD::UNDEF) 3382 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3383 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3384 return Op; 3385 } 3386 } 3387 3388 if (NumElems == 4 && 3389 // Don't do this for MMX. 3390 MVT::getSizeInBits(VT) != 64) { 3391 MVT::ValueType MaskVT = PermMask.getValueType(); 3392 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3393 SmallVector<std::pair<int, int>, 8> Locs; 3394 Locs.reserve(NumElems); 3395 SmallVector<SDOperand, 8> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3396 SmallVector<SDOperand, 8> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3397 unsigned NumHi = 0; 3398 unsigned NumLo = 0; 3399 // If no more than two elements come from either vector. This can be 3400 // implemented with two shuffles. First shuffle gather the elements. 3401 // The second shuffle, which takes the first shuffle as both of its 3402 // vector operands, put the elements into the right order. 3403 for (unsigned i = 0; i != NumElems; ++i) { 3404 SDOperand Elt = PermMask.getOperand(i); 3405 if (Elt.getOpcode() == ISD::UNDEF) { 3406 Locs[i] = std::make_pair(-1, -1); 3407 } else { 3408 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3409 if (Val < NumElems) { 3410 Locs[i] = std::make_pair(0, NumLo); 3411 Mask1[NumLo] = Elt; 3412 NumLo++; 3413 } else { 3414 Locs[i] = std::make_pair(1, NumHi); 3415 if (2+NumHi < NumElems) 3416 Mask1[2+NumHi] = Elt; 3417 NumHi++; 3418 } 3419 } 3420 } 3421 if (NumLo <= 2 && NumHi <= 2) { 3422 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3423 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3424 &Mask1[0], Mask1.size())); 3425 for (unsigned i = 0; i != NumElems; ++i) { 3426 if (Locs[i].first == -1) 3427 continue; 3428 else { 3429 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3430 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3431 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3432 } 3433 } 3434 3435 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3436 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3437 &Mask2[0], Mask2.size())); 3438 } 3439 3440 // Break it into (shuffle shuffle_hi, shuffle_lo). 3441 Locs.clear(); 3442 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3443 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3444 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3445 unsigned MaskIdx = 0; 3446 unsigned LoIdx = 0; 3447 unsigned HiIdx = NumElems/2; 3448 for (unsigned i = 0; i != NumElems; ++i) { 3449 if (i == NumElems/2) { 3450 MaskPtr = &HiMask; 3451 MaskIdx = 1; 3452 LoIdx = 0; 3453 HiIdx = NumElems/2; 3454 } 3455 SDOperand Elt = PermMask.getOperand(i); 3456 if (Elt.getOpcode() == ISD::UNDEF) { 3457 Locs[i] = std::make_pair(-1, -1); 3458 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3459 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3460 (*MaskPtr)[LoIdx] = Elt; 3461 LoIdx++; 3462 } else { 3463 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3464 (*MaskPtr)[HiIdx] = Elt; 3465 HiIdx++; 3466 } 3467 } 3468 3469 SDOperand LoShuffle = 3470 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3471 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3472 &LoMask[0], LoMask.size())); 3473 SDOperand HiShuffle = 3474 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3475 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3476 &HiMask[0], HiMask.size())); 3477 SmallVector<SDOperand, 8> MaskOps; 3478 for (unsigned i = 0; i != NumElems; ++i) { 3479 if (Locs[i].first == -1) { 3480 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3481 } else { 3482 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3483 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3484 } 3485 } 3486 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3487 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3488 &MaskOps[0], MaskOps.size())); 3489 } 3490 3491 return SDOperand(); 3492} 3493 3494SDOperand 3495X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3496 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3497 return SDOperand(); 3498 3499 MVT::ValueType VT = Op.getValueType(); 3500 // TODO: handle v16i8. 3501 if (MVT::getSizeInBits(VT) == 16) { 3502 // Transform it so it match pextrw which produces a 32-bit result. 3503 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3504 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3505 Op.getOperand(0), Op.getOperand(1)); 3506 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3507 DAG.getValueType(VT)); 3508 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3509 } else if (MVT::getSizeInBits(VT) == 32) { 3510 SDOperand Vec = Op.getOperand(0); 3511 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3512 if (Idx == 0) 3513 return Op; 3514 // SHUFPS the element to the lowest double word, then movss. 3515 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3516 SmallVector<SDOperand, 8> IdxVec; 3517 IdxVec. 3518 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3519 IdxVec. 3520 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3521 IdxVec. 3522 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3523 IdxVec. 3524 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3525 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3526 &IdxVec[0], IdxVec.size()); 3527 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3528 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3529 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3530 DAG.getConstant(0, getPointerTy())); 3531 } else if (MVT::getSizeInBits(VT) == 64) { 3532 SDOperand Vec = Op.getOperand(0); 3533 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3534 if (Idx == 0) 3535 return Op; 3536 3537 // UNPCKHPD the element to the lowest double word, then movsd. 3538 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3539 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3540 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3541 SmallVector<SDOperand, 8> IdxVec; 3542 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3543 IdxVec. 3544 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3545 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3546 &IdxVec[0], IdxVec.size()); 3547 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3548 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3549 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3550 DAG.getConstant(0, getPointerTy())); 3551 } 3552 3553 return SDOperand(); 3554} 3555 3556SDOperand 3557X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3558 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3559 // as its second argument. 3560 MVT::ValueType VT = Op.getValueType(); 3561 MVT::ValueType BaseVT = MVT::getVectorElementType(VT); 3562 SDOperand N0 = Op.getOperand(0); 3563 SDOperand N1 = Op.getOperand(1); 3564 SDOperand N2 = Op.getOperand(2); 3565 if (MVT::getSizeInBits(BaseVT) == 16) { 3566 if (N1.getValueType() != MVT::i32) 3567 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3568 if (N2.getValueType() != MVT::i32) 3569 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy()); 3570 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3571 } else if (MVT::getSizeInBits(BaseVT) == 32) { 3572 unsigned Idx = cast<ConstantSDNode>(N2)->getValue(); 3573 if (Idx == 0) { 3574 // Use a movss. 3575 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1); 3576 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3577 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 3578 SmallVector<SDOperand, 8> MaskVec; 3579 MaskVec.push_back(DAG.getConstant(4, BaseVT)); 3580 for (unsigned i = 1; i <= 3; ++i) 3581 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 3582 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1, 3583 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3584 &MaskVec[0], MaskVec.size())); 3585 } else { 3586 // Use two pinsrw instructions to insert a 32 bit value. 3587 Idx <<= 1; 3588 if (MVT::isFloatingPoint(N1.getValueType())) { 3589 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1); 3590 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1); 3591 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1, 3592 DAG.getConstant(0, getPointerTy())); 3593 } 3594 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0); 3595 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3596 DAG.getConstant(Idx, getPointerTy())); 3597 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8)); 3598 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 3599 DAG.getConstant(Idx+1, getPointerTy())); 3600 return DAG.getNode(ISD::BIT_CONVERT, VT, N0); 3601 } 3602 } 3603 3604 return SDOperand(); 3605} 3606 3607SDOperand 3608X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3609 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3610 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3611} 3612 3613// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3614// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3615// one of the above mentioned nodes. It has to be wrapped because otherwise 3616// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3617// be used to form addressing mode. These wrapped nodes will be selected 3618// into MOV32ri. 3619SDOperand 3620X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3621 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3622 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3623 getPointerTy(), 3624 CP->getAlignment()); 3625 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3626 // With PIC, the address is actually $g + Offset. 3627 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3628 !Subtarget->isPICStyleRIPRel()) { 3629 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3630 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3631 Result); 3632 } 3633 3634 return Result; 3635} 3636 3637SDOperand 3638X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3639 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3640 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3641 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3642 // With PIC, the address is actually $g + Offset. 3643 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3644 !Subtarget->isPICStyleRIPRel()) { 3645 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3646 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3647 Result); 3648 } 3649 3650 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3651 // load the value at address GV, not the value of GV itself. This means that 3652 // the GlobalAddress must be in the base or index register of the address, not 3653 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3654 // The same applies for external symbols during PIC codegen 3655 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3656 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 3657 3658 return Result; 3659} 3660 3661// Lower ISD::GlobalTLSAddress using the "general dynamic" model 3662static SDOperand 3663LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3664 const MVT::ValueType PtrVT) { 3665 SDOperand InFlag; 3666 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 3667 DAG.getNode(X86ISD::GlobalBaseReg, 3668 PtrVT), InFlag); 3669 InFlag = Chain.getValue(1); 3670 3671 // emit leal symbol@TLSGD(,%ebx,1), %eax 3672 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 3673 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3674 GA->getValueType(0), 3675 GA->getOffset()); 3676 SDOperand Ops[] = { Chain, TGA, InFlag }; 3677 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 3678 InFlag = Result.getValue(2); 3679 Chain = Result.getValue(1); 3680 3681 // call ___tls_get_addr. This function receives its argument in 3682 // the register EAX. 3683 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 3684 InFlag = Chain.getValue(1); 3685 3686 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3687 SDOperand Ops1[] = { Chain, 3688 DAG.getTargetExternalSymbol("___tls_get_addr", 3689 PtrVT), 3690 DAG.getRegister(X86::EAX, PtrVT), 3691 DAG.getRegister(X86::EBX, PtrVT), 3692 InFlag }; 3693 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 3694 InFlag = Chain.getValue(1); 3695 3696 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 3697} 3698 3699// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 3700// "local exec" model. 3701static SDOperand 3702LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3703 const MVT::ValueType PtrVT) { 3704 // Get the Thread Pointer 3705 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 3706 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 3707 // exec) 3708 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3709 GA->getValueType(0), 3710 GA->getOffset()); 3711 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 3712 3713 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 3714 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0); 3715 3716 // The address of the thread local variable is the add of the thread 3717 // pointer with the offset of the variable. 3718 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 3719} 3720 3721SDOperand 3722X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 3723 // TODO: implement the "local dynamic" model 3724 // TODO: implement the "initial exec"model for pic executables 3725 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 3726 "TLS not implemented for non-ELF and 64-bit targets"); 3727 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3728 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 3729 // otherwise use the "Local Exec"TLS Model 3730 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 3731 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 3732 else 3733 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 3734} 3735 3736SDOperand 3737X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3738 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3739 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3740 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3741 // With PIC, the address is actually $g + Offset. 3742 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3743 !Subtarget->isPICStyleRIPRel()) { 3744 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3745 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3746 Result); 3747 } 3748 3749 return Result; 3750} 3751 3752SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 3753 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3754 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 3755 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3756 // With PIC, the address is actually $g + Offset. 3757 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3758 !Subtarget->isPICStyleRIPRel()) { 3759 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3760 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3761 Result); 3762 } 3763 3764 return Result; 3765} 3766 3767SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3768 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3769 "Not an i64 shift!"); 3770 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3771 SDOperand ShOpLo = Op.getOperand(0); 3772 SDOperand ShOpHi = Op.getOperand(1); 3773 SDOperand ShAmt = Op.getOperand(2); 3774 SDOperand Tmp1 = isSRA ? 3775 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3776 DAG.getConstant(0, MVT::i32); 3777 3778 SDOperand Tmp2, Tmp3; 3779 if (Op.getOpcode() == ISD::SHL_PARTS) { 3780 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3781 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3782 } else { 3783 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3784 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3785 } 3786 3787 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3788 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3789 DAG.getConstant(32, MVT::i8)); 3790 SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32, 3791 AndNode, DAG.getConstant(0, MVT::i8)); 3792 3793 SDOperand Hi, Lo; 3794 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3795 unsigned Opc = X86ISD::CMOV; 3796 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3797 SmallVector<SDOperand, 4> Ops; 3798 if (Op.getOpcode() == ISD::SHL_PARTS) { 3799 Ops.push_back(Tmp2); 3800 Ops.push_back(Tmp3); 3801 Ops.push_back(CC); 3802 Ops.push_back(Cond); 3803 Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); 3804 3805 Ops.clear(); 3806 Ops.push_back(Tmp3); 3807 Ops.push_back(Tmp1); 3808 Ops.push_back(CC); 3809 Ops.push_back(Cond); 3810 Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); 3811 } else { 3812 Ops.push_back(Tmp2); 3813 Ops.push_back(Tmp3); 3814 Ops.push_back(CC); 3815 Ops.push_back(Cond); 3816 Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); 3817 3818 Ops.clear(); 3819 Ops.push_back(Tmp3); 3820 Ops.push_back(Tmp1); 3821 Ops.push_back(CC); 3822 Ops.push_back(Cond); 3823 Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); 3824 } 3825 3826 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3827 Ops.clear(); 3828 Ops.push_back(Lo); 3829 Ops.push_back(Hi); 3830 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3831} 3832 3833SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3834 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3835 Op.getOperand(0).getValueType() >= MVT::i16 && 3836 "Unknown SINT_TO_FP to lower!"); 3837 3838 SDOperand Result; 3839 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3840 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3841 MachineFunction &MF = DAG.getMachineFunction(); 3842 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3843 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3844 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3845 StackSlot, NULL, 0); 3846 3847 // These are really Legal; caller falls through into that case. 3848 if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f32 && X86ScalarSSEf32) 3849 return Result; 3850 if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f64 && X86ScalarSSEf64) 3851 return Result; 3852 if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 && 3853 Subtarget->is64Bit()) 3854 return Result; 3855 3856 // Build the FILD 3857 SDVTList Tys; 3858 bool useSSE = (X86ScalarSSEf32 && Op.getValueType() == MVT::f32) || 3859 (X86ScalarSSEf64 && Op.getValueType() == MVT::f64); 3860 if (useSSE) 3861 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 3862 else 3863 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 3864 SmallVector<SDOperand, 8> Ops; 3865 Ops.push_back(Chain); 3866 Ops.push_back(StackSlot); 3867 Ops.push_back(DAG.getValueType(SrcVT)); 3868 Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 3869 Tys, &Ops[0], Ops.size()); 3870 3871 if (useSSE) { 3872 Chain = Result.getValue(1); 3873 SDOperand InFlag = Result.getValue(2); 3874 3875 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 3876 // shouldn't be necessary except that RFP cannot be live across 3877 // multiple blocks. When stackifier is fixed, they can be uncoupled. 3878 MachineFunction &MF = DAG.getMachineFunction(); 3879 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 3880 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3881 Tys = DAG.getVTList(MVT::Other); 3882 SmallVector<SDOperand, 8> Ops; 3883 Ops.push_back(Chain); 3884 Ops.push_back(Result); 3885 Ops.push_back(StackSlot); 3886 Ops.push_back(DAG.getValueType(Op.getValueType())); 3887 Ops.push_back(InFlag); 3888 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 3889 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 3890 } 3891 3892 return Result; 3893} 3894 3895SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 3896 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 3897 "Unknown FP_TO_SINT to lower!"); 3898 SDOperand Result; 3899 3900 // These are really Legal. 3901 if (Op.getValueType() == MVT::i32 && 3902 X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) 3903 return Result; 3904 if (Op.getValueType() == MVT::i32 && 3905 X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64) 3906 return Result; 3907 if (Subtarget->is64Bit() && 3908 Op.getValueType() == MVT::i64 && 3909 Op.getOperand(0).getValueType() != MVT::f80) 3910 return Result; 3911 3912 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 3913 // stack slot. 3914 MachineFunction &MF = DAG.getMachineFunction(); 3915 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 3916 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3917 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3918 unsigned Opc; 3919 switch (Op.getValueType()) { 3920 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 3921 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 3922 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 3923 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 3924 } 3925 3926 SDOperand Chain = DAG.getEntryNode(); 3927 SDOperand Value = Op.getOperand(0); 3928 if ((X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) || 3929 (X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)) { 3930 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 3931 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 3932 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 3933 SDOperand Ops[] = { 3934 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 3935 }; 3936 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 3937 Chain = Value.getValue(1); 3938 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3939 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3940 } 3941 3942 // Build the FP_TO_INT*_IN_MEM 3943 SDOperand Ops[] = { Chain, Value, StackSlot }; 3944 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 3945 3946 // Load the result. 3947 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 3948} 3949 3950SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 3951 MVT::ValueType VT = Op.getValueType(); 3952 MVT::ValueType EltVT = VT; 3953 if (MVT::isVector(VT)) 3954 EltVT = MVT::getVectorElementType(VT); 3955 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 3956 std::vector<Constant*> CV; 3957 if (EltVT == MVT::f64) { 3958 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 3959 CV.push_back(C); 3960 CV.push_back(C); 3961 } else { 3962 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 3963 CV.push_back(C); 3964 CV.push_back(C); 3965 CV.push_back(C); 3966 CV.push_back(C); 3967 } 3968 Constant *C = ConstantVector::get(CV); 3969 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 3970 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 3971 false, 16); 3972 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 3973} 3974 3975SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 3976 MVT::ValueType VT = Op.getValueType(); 3977 MVT::ValueType EltVT = VT; 3978 unsigned EltNum = 1; 3979 if (MVT::isVector(VT)) { 3980 EltVT = MVT::getVectorElementType(VT); 3981 EltNum = MVT::getVectorNumElements(VT); 3982 } 3983 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 3984 std::vector<Constant*> CV; 3985 if (EltVT == MVT::f64) { 3986 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 3987 CV.push_back(C); 3988 CV.push_back(C); 3989 } else { 3990 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 3991 CV.push_back(C); 3992 CV.push_back(C); 3993 CV.push_back(C); 3994 CV.push_back(C); 3995 } 3996 Constant *C = ConstantVector::get(CV); 3997 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 3998 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 3999 false, 16); 4000 if (MVT::isVector(VT)) { 4001 return DAG.getNode(ISD::BIT_CONVERT, VT, 4002 DAG.getNode(ISD::XOR, MVT::v2i64, 4003 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4004 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4005 } else { 4006 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4007 } 4008} 4009 4010SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4011 SDOperand Op0 = Op.getOperand(0); 4012 SDOperand Op1 = Op.getOperand(1); 4013 MVT::ValueType VT = Op.getValueType(); 4014 MVT::ValueType SrcVT = Op1.getValueType(); 4015 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4016 4017 // If second operand is smaller, extend it first. 4018 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4019 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4020 SrcVT = VT; 4021 SrcTy = MVT::getTypeForValueType(SrcVT); 4022 } 4023 4024 // First get the sign bit of second operand. 4025 std::vector<Constant*> CV; 4026 if (SrcVT == MVT::f64) { 4027 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4028 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4029 } else { 4030 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4031 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4032 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4033 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4034 } 4035 Constant *C = ConstantVector::get(CV); 4036 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4037 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, NULL, 0, 4038 false, 16); 4039 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4040 4041 // Shift sign bit right or left if the two operands have different types. 4042 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4043 // Op0 is MVT::f32, Op1 is MVT::f64. 4044 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4045 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4046 DAG.getConstant(32, MVT::i32)); 4047 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4048 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4049 DAG.getConstant(0, getPointerTy())); 4050 } 4051 4052 // Clear first operand sign bit. 4053 CV.clear(); 4054 if (VT == MVT::f64) { 4055 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4056 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4057 } else { 4058 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4059 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4060 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4061 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4062 } 4063 C = ConstantVector::get(CV); 4064 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4065 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0, 4066 false, 16); 4067 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4068 4069 // Or the value with the sign bit. 4070 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4071} 4072 4073SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4074 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4075 SDOperand Cond; 4076 SDOperand Op0 = Op.getOperand(0); 4077 SDOperand Op1 = Op.getOperand(1); 4078 SDOperand CC = Op.getOperand(2); 4079 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4080 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4081 unsigned X86CC; 4082 4083 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4084 Op0, Op1, DAG)) { 4085 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4086 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4087 DAG.getConstant(X86CC, MVT::i8), Cond); 4088 } 4089 4090 assert(isFP && "Illegal integer SetCC!"); 4091 4092 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4093 switch (SetCCOpcode) { 4094 default: assert(false && "Illegal floating point SetCC!"); 4095 case ISD::SETOEQ: { // !PF & ZF 4096 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4097 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4098 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4099 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4100 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4101 } 4102 case ISD::SETUNE: { // PF | !ZF 4103 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4104 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4105 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4106 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4107 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4108 } 4109 } 4110} 4111 4112 4113SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4114 bool addTest = true; 4115 SDOperand Cond = Op.getOperand(0); 4116 SDOperand CC; 4117 4118 if (Cond.getOpcode() == ISD::SETCC) 4119 Cond = LowerSETCC(Cond, DAG); 4120 4121 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4122 // setting operand in place of the X86ISD::SETCC. 4123 if (Cond.getOpcode() == X86ISD::SETCC) { 4124 CC = Cond.getOperand(0); 4125 4126 SDOperand Cmp = Cond.getOperand(1); 4127 unsigned Opc = Cmp.getOpcode(); 4128 MVT::ValueType VT = Op.getValueType(); 4129 bool IllegalFPCMov = false; 4130 if (VT == MVT::f32 && !X86ScalarSSEf32) 4131 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4132 else if (VT == MVT::f64 && !X86ScalarSSEf64) 4133 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4134 if ((Opc == X86ISD::CMP || 4135 Opc == X86ISD::COMI || 4136 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4137 Cond = Cmp; 4138 addTest = false; 4139 } 4140 } 4141 4142 if (addTest) { 4143 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4144 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4145 } 4146 4147 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4148 MVT::Flag); 4149 SmallVector<SDOperand, 4> Ops; 4150 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4151 // condition is true. 4152 Ops.push_back(Op.getOperand(2)); 4153 Ops.push_back(Op.getOperand(1)); 4154 Ops.push_back(CC); 4155 Ops.push_back(Cond); 4156 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4157} 4158 4159SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4160 bool addTest = true; 4161 SDOperand Chain = Op.getOperand(0); 4162 SDOperand Cond = Op.getOperand(1); 4163 SDOperand Dest = Op.getOperand(2); 4164 SDOperand CC; 4165 4166 if (Cond.getOpcode() == ISD::SETCC) 4167 Cond = LowerSETCC(Cond, DAG); 4168 4169 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4170 // setting operand in place of the X86ISD::SETCC. 4171 if (Cond.getOpcode() == X86ISD::SETCC) { 4172 CC = Cond.getOperand(0); 4173 4174 SDOperand Cmp = Cond.getOperand(1); 4175 unsigned Opc = Cmp.getOpcode(); 4176 if (Opc == X86ISD::CMP || 4177 Opc == X86ISD::COMI || 4178 Opc == X86ISD::UCOMI) { 4179 Cond = Cmp; 4180 addTest = false; 4181 } 4182 } 4183 4184 if (addTest) { 4185 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4186 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4187 } 4188 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4189 Chain, Op.getOperand(2), CC, Cond); 4190} 4191 4192SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 4193 unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4194 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 4195 4196 if (Subtarget->is64Bit()) 4197 if(CallingConv==CallingConv::Fast && isTailCall && PerformTailCallOpt) 4198 return LowerX86_TailCallTo(Op, DAG, CallingConv); 4199 else 4200 return LowerX86_64CCCCallTo(Op, DAG, CallingConv); 4201 else 4202 switch (CallingConv) { 4203 default: 4204 assert(0 && "Unsupported calling convention"); 4205 case CallingConv::Fast: 4206 if (isTailCall && PerformTailCallOpt) 4207 return LowerX86_TailCallTo(Op, DAG, CallingConv); 4208 else 4209 return LowerCCCCallTo(Op,DAG, CallingConv); 4210 case CallingConv::C: 4211 case CallingConv::X86_StdCall: 4212 return LowerCCCCallTo(Op, DAG, CallingConv); 4213 case CallingConv::X86_FastCall: 4214 return LowerFastCCCallTo(Op, DAG, CallingConv); 4215 } 4216} 4217 4218 4219// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4220// Calls to _alloca is needed to probe the stack when allocating more than 4k 4221// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4222// that the guard pages used by the OS virtual memory manager are allocated in 4223// correct sequence. 4224SDOperand 4225X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4226 SelectionDAG &DAG) { 4227 assert(Subtarget->isTargetCygMing() && 4228 "This should be used only on Cygwin/Mingw targets"); 4229 4230 // Get the inputs. 4231 SDOperand Chain = Op.getOperand(0); 4232 SDOperand Size = Op.getOperand(1); 4233 // FIXME: Ensure alignment here 4234 4235 SDOperand Flag; 4236 4237 MVT::ValueType IntPtr = getPointerTy(); 4238 MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 4239 4240 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4241 Flag = Chain.getValue(1); 4242 4243 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4244 SDOperand Ops[] = { Chain, 4245 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4246 DAG.getRegister(X86::EAX, IntPtr), 4247 Flag }; 4248 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4249 Flag = Chain.getValue(1); 4250 4251 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4252 4253 std::vector<MVT::ValueType> Tys; 4254 Tys.push_back(SPTy); 4255 Tys.push_back(MVT::Other); 4256 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4257 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4258} 4259 4260SDOperand 4261X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 4262 MachineFunction &MF = DAG.getMachineFunction(); 4263 const Function* Fn = MF.getFunction(); 4264 if (Fn->hasExternalLinkage() && 4265 Subtarget->isTargetCygMing() && 4266 Fn->getName() == "main") 4267 MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true); 4268 4269 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 4270 if (Subtarget->is64Bit()) 4271 return LowerX86_64CCCArguments(Op, DAG); 4272 else 4273 switch(CC) { 4274 default: 4275 assert(0 && "Unsupported calling convention"); 4276 case CallingConv::Fast: 4277 return LowerCCCArguments(Op,DAG, true); 4278 // Falls through 4279 case CallingConv::C: 4280 return LowerCCCArguments(Op, DAG); 4281 case CallingConv::X86_StdCall: 4282 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall); 4283 return LowerCCCArguments(Op, DAG, true); 4284 case CallingConv::X86_FastCall: 4285 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall); 4286 return LowerFastCCArguments(Op, DAG); 4287 } 4288} 4289 4290SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4291 SDOperand InFlag(0, 0); 4292 SDOperand Chain = Op.getOperand(0); 4293 unsigned Align = 4294 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4295 if (Align == 0) Align = 1; 4296 4297 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4298 // If not DWORD aligned or size is more than the threshold, call memset. 4299 // The libc version is likely to be faster for these cases. It can use the 4300 // address value and run time information about the CPU. 4301 if ((Align & 3) != 0 || 4302 (I && I->getValue() > Subtarget->getMinRepStrSizeThreshold())) { 4303 MVT::ValueType IntPtr = getPointerTy(); 4304 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4305 TargetLowering::ArgListTy Args; 4306 TargetLowering::ArgListEntry Entry; 4307 Entry.Node = Op.getOperand(1); 4308 Entry.Ty = IntPtrTy; 4309 Args.push_back(Entry); 4310 // Extend the unsigned i8 argument to be an int value for the call. 4311 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4312 Entry.Ty = IntPtrTy; 4313 Args.push_back(Entry); 4314 Entry.Node = Op.getOperand(3); 4315 Args.push_back(Entry); 4316 std::pair<SDOperand,SDOperand> CallResult = 4317 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 4318 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4319 return CallResult.second; 4320 } 4321 4322 MVT::ValueType AVT; 4323 SDOperand Count; 4324 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4325 unsigned BytesLeft = 0; 4326 bool TwoRepStos = false; 4327 if (ValC) { 4328 unsigned ValReg; 4329 uint64_t Val = ValC->getValue() & 255; 4330 4331 // If the value is a constant, then we can potentially use larger sets. 4332 switch (Align & 3) { 4333 case 2: // WORD aligned 4334 AVT = MVT::i16; 4335 ValReg = X86::AX; 4336 Val = (Val << 8) | Val; 4337 break; 4338 case 0: // DWORD aligned 4339 AVT = MVT::i32; 4340 ValReg = X86::EAX; 4341 Val = (Val << 8) | Val; 4342 Val = (Val << 16) | Val; 4343 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4344 AVT = MVT::i64; 4345 ValReg = X86::RAX; 4346 Val = (Val << 32) | Val; 4347 } 4348 break; 4349 default: // Byte aligned 4350 AVT = MVT::i8; 4351 ValReg = X86::AL; 4352 Count = Op.getOperand(3); 4353 break; 4354 } 4355 4356 if (AVT > MVT::i8) { 4357 if (I) { 4358 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4359 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 4360 BytesLeft = I->getValue() % UBytes; 4361 } else { 4362 assert(AVT >= MVT::i32 && 4363 "Do not use rep;stos if not at least DWORD aligned"); 4364 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4365 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4366 TwoRepStos = true; 4367 } 4368 } 4369 4370 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4371 InFlag); 4372 InFlag = Chain.getValue(1); 4373 } else { 4374 AVT = MVT::i8; 4375 Count = Op.getOperand(3); 4376 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4377 InFlag = Chain.getValue(1); 4378 } 4379 4380 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4381 Count, InFlag); 4382 InFlag = Chain.getValue(1); 4383 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4384 Op.getOperand(1), InFlag); 4385 InFlag = Chain.getValue(1); 4386 4387 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4388 SmallVector<SDOperand, 8> Ops; 4389 Ops.push_back(Chain); 4390 Ops.push_back(DAG.getValueType(AVT)); 4391 Ops.push_back(InFlag); 4392 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4393 4394 if (TwoRepStos) { 4395 InFlag = Chain.getValue(1); 4396 Count = Op.getOperand(3); 4397 MVT::ValueType CVT = Count.getValueType(); 4398 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4399 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4400 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4401 Left, InFlag); 4402 InFlag = Chain.getValue(1); 4403 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4404 Ops.clear(); 4405 Ops.push_back(Chain); 4406 Ops.push_back(DAG.getValueType(MVT::i8)); 4407 Ops.push_back(InFlag); 4408 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4409 } else if (BytesLeft) { 4410 // Issue stores for the last 1 - 7 bytes. 4411 SDOperand Value; 4412 unsigned Val = ValC->getValue() & 255; 4413 unsigned Offset = I->getValue() - BytesLeft; 4414 SDOperand DstAddr = Op.getOperand(1); 4415 MVT::ValueType AddrVT = DstAddr.getValueType(); 4416 if (BytesLeft >= 4) { 4417 Val = (Val << 8) | Val; 4418 Val = (Val << 16) | Val; 4419 Value = DAG.getConstant(Val, MVT::i32); 4420 Chain = DAG.getStore(Chain, Value, 4421 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4422 DAG.getConstant(Offset, AddrVT)), 4423 NULL, 0); 4424 BytesLeft -= 4; 4425 Offset += 4; 4426 } 4427 if (BytesLeft >= 2) { 4428 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4429 Chain = DAG.getStore(Chain, Value, 4430 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4431 DAG.getConstant(Offset, AddrVT)), 4432 NULL, 0); 4433 BytesLeft -= 2; 4434 Offset += 2; 4435 } 4436 if (BytesLeft == 1) { 4437 Value = DAG.getConstant(Val, MVT::i8); 4438 Chain = DAG.getStore(Chain, Value, 4439 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4440 DAG.getConstant(Offset, AddrVT)), 4441 NULL, 0); 4442 } 4443 } 4444 4445 return Chain; 4446} 4447 4448SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { 4449 SDOperand ChainOp = Op.getOperand(0); 4450 SDOperand DestOp = Op.getOperand(1); 4451 SDOperand SourceOp = Op.getOperand(2); 4452 SDOperand CountOp = Op.getOperand(3); 4453 SDOperand AlignOp = Op.getOperand(4); 4454 unsigned Align = (unsigned)cast<ConstantSDNode>(AlignOp)->getValue(); 4455 if (Align == 0) Align = 1; 4456 4457 // The libc version is likely to be faster for the following cases. It can 4458 // use the address value and run time information about the CPU. 4459 // With glibc 2.6.1 on a core 2, coping an array of 100M longs was 30% faster 4460 4461 // If not DWORD aligned, call memcpy. 4462 if ((Align & 3) != 0) 4463 return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG); 4464 4465 // If size is unknown, call memcpy. 4466 ConstantSDNode *I = dyn_cast<ConstantSDNode>(CountOp); 4467 if (!I) 4468 return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG); 4469 4470 // If size is more than the threshold, call memcpy. 4471 unsigned Size = I->getValue(); 4472 if (Size > Subtarget->getMinRepStrSizeThreshold()) 4473 return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG); 4474 4475 return LowerMEMCPYInline(ChainOp, DestOp, SourceOp, Size, Align, DAG); 4476} 4477 4478SDOperand X86TargetLowering::LowerMEMCPYCall(SDOperand Chain, 4479 SDOperand Dest, 4480 SDOperand Source, 4481 SDOperand Count, 4482 SelectionDAG &DAG) { 4483 MVT::ValueType IntPtr = getPointerTy(); 4484 TargetLowering::ArgListTy Args; 4485 TargetLowering::ArgListEntry Entry; 4486 Entry.Ty = getTargetData()->getIntPtrType(); 4487 Entry.Node = Dest; Args.push_back(Entry); 4488 Entry.Node = Source; Args.push_back(Entry); 4489 Entry.Node = Count; Args.push_back(Entry); 4490 std::pair<SDOperand,SDOperand> CallResult = 4491 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 4492 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); 4493 return CallResult.second; 4494} 4495 4496SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4497 SDOperand Dest, 4498 SDOperand Source, 4499 unsigned Size, 4500 unsigned Align, 4501 SelectionDAG &DAG) { 4502 MVT::ValueType AVT; 4503 unsigned BytesLeft = 0; 4504 switch (Align & 3) { 4505 case 2: // WORD aligned 4506 AVT = MVT::i16; 4507 break; 4508 case 0: // DWORD aligned 4509 AVT = MVT::i32; 4510 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4511 AVT = MVT::i64; 4512 break; 4513 default: // Byte aligned 4514 AVT = MVT::i8; 4515 break; 4516 } 4517 4518 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4519 SDOperand Count = DAG.getConstant(Size / UBytes, getPointerTy()); 4520 BytesLeft = Size % UBytes; 4521 4522 SDOperand InFlag(0, 0); 4523 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4524 Count, InFlag); 4525 InFlag = Chain.getValue(1); 4526 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4527 Dest, InFlag); 4528 InFlag = Chain.getValue(1); 4529 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4530 Source, InFlag); 4531 InFlag = Chain.getValue(1); 4532 4533 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4534 SmallVector<SDOperand, 8> Ops; 4535 Ops.push_back(Chain); 4536 Ops.push_back(DAG.getValueType(AVT)); 4537 Ops.push_back(InFlag); 4538 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4539 4540 if (BytesLeft) { 4541 // Issue loads and stores for the last 1 - 7 bytes. 4542 unsigned Offset = Size - BytesLeft; 4543 SDOperand DstAddr = Dest; 4544 MVT::ValueType DstVT = DstAddr.getValueType(); 4545 SDOperand SrcAddr = Source; 4546 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4547 SDOperand Value; 4548 if (BytesLeft >= 4) { 4549 Value = DAG.getLoad(MVT::i32, Chain, 4550 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4551 DAG.getConstant(Offset, SrcVT)), 4552 NULL, 0); 4553 Chain = Value.getValue(1); 4554 Chain = DAG.getStore(Chain, Value, 4555 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4556 DAG.getConstant(Offset, DstVT)), 4557 NULL, 0); 4558 BytesLeft -= 4; 4559 Offset += 4; 4560 } 4561 if (BytesLeft >= 2) { 4562 Value = DAG.getLoad(MVT::i16, Chain, 4563 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4564 DAG.getConstant(Offset, SrcVT)), 4565 NULL, 0); 4566 Chain = Value.getValue(1); 4567 Chain = DAG.getStore(Chain, Value, 4568 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4569 DAG.getConstant(Offset, DstVT)), 4570 NULL, 0); 4571 BytesLeft -= 2; 4572 Offset += 2; 4573 } 4574 4575 if (BytesLeft == 1) { 4576 Value = DAG.getLoad(MVT::i8, Chain, 4577 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4578 DAG.getConstant(Offset, SrcVT)), 4579 NULL, 0); 4580 Chain = Value.getValue(1); 4581 Chain = DAG.getStore(Chain, Value, 4582 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4583 DAG.getConstant(Offset, DstVT)), 4584 NULL, 0); 4585 } 4586 } 4587 4588 return Chain; 4589} 4590 4591SDOperand 4592X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) { 4593 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4594 SDOperand TheOp = Op.getOperand(0); 4595 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheOp, 1); 4596 if (Subtarget->is64Bit()) { 4597 SDOperand Copy1 = 4598 DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4599 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX, 4600 MVT::i64, Copy1.getValue(2)); 4601 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2, 4602 DAG.getConstant(32, MVT::i8)); 4603 SDOperand Ops[] = { 4604 DAG.getNode(ISD::OR, MVT::i64, Copy1, Tmp), Copy2.getValue(1) 4605 }; 4606 4607 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4608 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2); 4609 } 4610 4611 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4612 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::EDX, 4613 MVT::i32, Copy1.getValue(2)); 4614 SDOperand Ops[] = { Copy1, Copy2, Copy2.getValue(1) }; 4615 Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4616 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 3); 4617} 4618 4619SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4620 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 4621 4622 if (!Subtarget->is64Bit()) { 4623 // vastart just stores the address of the VarArgsFrameIndex slot into the 4624 // memory location argument. 4625 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4626 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 4627 SV->getOffset()); 4628 } 4629 4630 // __va_list_tag: 4631 // gp_offset (0 - 6 * 8) 4632 // fp_offset (48 - 48 + 8 * 16) 4633 // overflow_arg_area (point to parameters coming in memory). 4634 // reg_save_area 4635 SmallVector<SDOperand, 8> MemOps; 4636 SDOperand FIN = Op.getOperand(1); 4637 // Store gp_offset 4638 SDOperand Store = DAG.getStore(Op.getOperand(0), 4639 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4640 FIN, SV->getValue(), SV->getOffset()); 4641 MemOps.push_back(Store); 4642 4643 // Store fp_offset 4644 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4645 DAG.getConstant(4, getPointerTy())); 4646 Store = DAG.getStore(Op.getOperand(0), 4647 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4648 FIN, SV->getValue(), SV->getOffset()); 4649 MemOps.push_back(Store); 4650 4651 // Store ptr to overflow_arg_area 4652 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4653 DAG.getConstant(4, getPointerTy())); 4654 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4655 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 4656 SV->getOffset()); 4657 MemOps.push_back(Store); 4658 4659 // Store ptr to reg_save_area. 4660 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 4661 DAG.getConstant(8, getPointerTy())); 4662 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4663 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 4664 SV->getOffset()); 4665 MemOps.push_back(Store); 4666 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4667} 4668 4669SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4670 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4671 SDOperand Chain = Op.getOperand(0); 4672 SDOperand DstPtr = Op.getOperand(1); 4673 SDOperand SrcPtr = Op.getOperand(2); 4674 SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3)); 4675 SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4)); 4676 4677 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, 4678 SrcSV->getValue(), SrcSV->getOffset()); 4679 Chain = SrcPtr.getValue(1); 4680 for (unsigned i = 0; i < 3; ++i) { 4681 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, 4682 SrcSV->getValue(), SrcSV->getOffset()); 4683 Chain = Val.getValue(1); 4684 Chain = DAG.getStore(Chain, Val, DstPtr, 4685 DstSV->getValue(), DstSV->getOffset()); 4686 if (i == 2) 4687 break; 4688 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 4689 DAG.getConstant(8, getPointerTy())); 4690 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 4691 DAG.getConstant(8, getPointerTy())); 4692 } 4693 return Chain; 4694} 4695 4696SDOperand 4697X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4698 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4699 switch (IntNo) { 4700 default: return SDOperand(); // Don't custom lower most intrinsics. 4701 // Comparison intrinsics. 4702 case Intrinsic::x86_sse_comieq_ss: 4703 case Intrinsic::x86_sse_comilt_ss: 4704 case Intrinsic::x86_sse_comile_ss: 4705 case Intrinsic::x86_sse_comigt_ss: 4706 case Intrinsic::x86_sse_comige_ss: 4707 case Intrinsic::x86_sse_comineq_ss: 4708 case Intrinsic::x86_sse_ucomieq_ss: 4709 case Intrinsic::x86_sse_ucomilt_ss: 4710 case Intrinsic::x86_sse_ucomile_ss: 4711 case Intrinsic::x86_sse_ucomigt_ss: 4712 case Intrinsic::x86_sse_ucomige_ss: 4713 case Intrinsic::x86_sse_ucomineq_ss: 4714 case Intrinsic::x86_sse2_comieq_sd: 4715 case Intrinsic::x86_sse2_comilt_sd: 4716 case Intrinsic::x86_sse2_comile_sd: 4717 case Intrinsic::x86_sse2_comigt_sd: 4718 case Intrinsic::x86_sse2_comige_sd: 4719 case Intrinsic::x86_sse2_comineq_sd: 4720 case Intrinsic::x86_sse2_ucomieq_sd: 4721 case Intrinsic::x86_sse2_ucomilt_sd: 4722 case Intrinsic::x86_sse2_ucomile_sd: 4723 case Intrinsic::x86_sse2_ucomigt_sd: 4724 case Intrinsic::x86_sse2_ucomige_sd: 4725 case Intrinsic::x86_sse2_ucomineq_sd: { 4726 unsigned Opc = 0; 4727 ISD::CondCode CC = ISD::SETCC_INVALID; 4728 switch (IntNo) { 4729 default: break; 4730 case Intrinsic::x86_sse_comieq_ss: 4731 case Intrinsic::x86_sse2_comieq_sd: 4732 Opc = X86ISD::COMI; 4733 CC = ISD::SETEQ; 4734 break; 4735 case Intrinsic::x86_sse_comilt_ss: 4736 case Intrinsic::x86_sse2_comilt_sd: 4737 Opc = X86ISD::COMI; 4738 CC = ISD::SETLT; 4739 break; 4740 case Intrinsic::x86_sse_comile_ss: 4741 case Intrinsic::x86_sse2_comile_sd: 4742 Opc = X86ISD::COMI; 4743 CC = ISD::SETLE; 4744 break; 4745 case Intrinsic::x86_sse_comigt_ss: 4746 case Intrinsic::x86_sse2_comigt_sd: 4747 Opc = X86ISD::COMI; 4748 CC = ISD::SETGT; 4749 break; 4750 case Intrinsic::x86_sse_comige_ss: 4751 case Intrinsic::x86_sse2_comige_sd: 4752 Opc = X86ISD::COMI; 4753 CC = ISD::SETGE; 4754 break; 4755 case Intrinsic::x86_sse_comineq_ss: 4756 case Intrinsic::x86_sse2_comineq_sd: 4757 Opc = X86ISD::COMI; 4758 CC = ISD::SETNE; 4759 break; 4760 case Intrinsic::x86_sse_ucomieq_ss: 4761 case Intrinsic::x86_sse2_ucomieq_sd: 4762 Opc = X86ISD::UCOMI; 4763 CC = ISD::SETEQ; 4764 break; 4765 case Intrinsic::x86_sse_ucomilt_ss: 4766 case Intrinsic::x86_sse2_ucomilt_sd: 4767 Opc = X86ISD::UCOMI; 4768 CC = ISD::SETLT; 4769 break; 4770 case Intrinsic::x86_sse_ucomile_ss: 4771 case Intrinsic::x86_sse2_ucomile_sd: 4772 Opc = X86ISD::UCOMI; 4773 CC = ISD::SETLE; 4774 break; 4775 case Intrinsic::x86_sse_ucomigt_ss: 4776 case Intrinsic::x86_sse2_ucomigt_sd: 4777 Opc = X86ISD::UCOMI; 4778 CC = ISD::SETGT; 4779 break; 4780 case Intrinsic::x86_sse_ucomige_ss: 4781 case Intrinsic::x86_sse2_ucomige_sd: 4782 Opc = X86ISD::UCOMI; 4783 CC = ISD::SETGE; 4784 break; 4785 case Intrinsic::x86_sse_ucomineq_ss: 4786 case Intrinsic::x86_sse2_ucomineq_sd: 4787 Opc = X86ISD::UCOMI; 4788 CC = ISD::SETNE; 4789 break; 4790 } 4791 4792 unsigned X86CC; 4793 SDOperand LHS = Op.getOperand(1); 4794 SDOperand RHS = Op.getOperand(2); 4795 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4796 4797 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 4798 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 4799 DAG.getConstant(X86CC, MVT::i8), Cond); 4800 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4801 } 4802 } 4803} 4804 4805SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4806 // Depths > 0 not supported yet! 4807 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4808 return SDOperand(); 4809 4810 // Just load the return address 4811 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4812 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4813} 4814 4815SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4816 // Depths > 0 not supported yet! 4817 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4818 return SDOperand(); 4819 4820 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4821 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 4822 DAG.getConstant(4, getPointerTy())); 4823} 4824 4825SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 4826 SelectionDAG &DAG) { 4827 // Is not yet supported on x86-64 4828 if (Subtarget->is64Bit()) 4829 return SDOperand(); 4830 4831 return DAG.getConstant(8, getPointerTy()); 4832} 4833 4834SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 4835{ 4836 assert(!Subtarget->is64Bit() && 4837 "Lowering of eh_return builtin is not supported yet on x86-64"); 4838 4839 MachineFunction &MF = DAG.getMachineFunction(); 4840 SDOperand Chain = Op.getOperand(0); 4841 SDOperand Offset = Op.getOperand(1); 4842 SDOperand Handler = Op.getOperand(2); 4843 4844 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 4845 getPointerTy()); 4846 4847 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 4848 DAG.getConstant(-4UL, getPointerTy())); 4849 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 4850 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 4851 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 4852 MF.addLiveOut(X86::ECX); 4853 4854 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 4855 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 4856} 4857 4858SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 4859 SelectionDAG &DAG) { 4860 SDOperand Root = Op.getOperand(0); 4861 SDOperand Trmp = Op.getOperand(1); // trampoline 4862 SDOperand FPtr = Op.getOperand(2); // nested function 4863 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 4864 4865 SrcValueSDNode *TrmpSV = cast<SrcValueSDNode>(Op.getOperand(4)); 4866 4867 if (Subtarget->is64Bit()) { 4868 return SDOperand(); // not yet supported 4869 } else { 4870 Function *Func = (Function *) 4871 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 4872 unsigned CC = Func->getCallingConv(); 4873 unsigned NestReg; 4874 4875 switch (CC) { 4876 default: 4877 assert(0 && "Unsupported calling convention"); 4878 case CallingConv::C: 4879 case CallingConv::X86_StdCall: { 4880 // Pass 'nest' parameter in ECX. 4881 // Must be kept in sync with X86CallingConv.td 4882 NestReg = X86::ECX; 4883 4884 // Check that ECX wasn't needed by an 'inreg' parameter. 4885 const FunctionType *FTy = Func->getFunctionType(); 4886 const ParamAttrsList *Attrs = FTy->getParamAttrs(); 4887 4888 if (Attrs && !Func->isVarArg()) { 4889 unsigned InRegCount = 0; 4890 unsigned Idx = 1; 4891 4892 for (FunctionType::param_iterator I = FTy->param_begin(), 4893 E = FTy->param_end(); I != E; ++I, ++Idx) 4894 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 4895 // FIXME: should only count parameters that are lowered to integers. 4896 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 4897 4898 if (InRegCount > 2) { 4899 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 4900 abort(); 4901 } 4902 } 4903 break; 4904 } 4905 case CallingConv::X86_FastCall: 4906 // Pass 'nest' parameter in EAX. 4907 // Must be kept in sync with X86CallingConv.td 4908 NestReg = X86::EAX; 4909 break; 4910 } 4911 4912 const X86InstrInfo *TII = 4913 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 4914 4915 SDOperand OutChains[4]; 4916 SDOperand Addr, Disp; 4917 4918 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 4919 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 4920 4921 unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 4922 unsigned char N86Reg = ((X86RegisterInfo&)RegInfo).getX86RegNum(NestReg); 4923 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 4924 Trmp, TrmpSV->getValue(), TrmpSV->getOffset()); 4925 4926 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 4927 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(), 4928 TrmpSV->getOffset() + 1, false, 1); 4929 4930 unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 4931 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 4932 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 4933 TrmpSV->getValue() + 5, TrmpSV->getOffset()); 4934 4935 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 4936 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(), 4937 TrmpSV->getOffset() + 6, false, 1); 4938 4939 SDOperand Ops[] = 4940 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 4941 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 4942 } 4943} 4944 4945/// LowerOperation - Provide custom lowering hooks for some operations. 4946/// 4947SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 4948 switch (Op.getOpcode()) { 4949 default: assert(0 && "Should not custom lower this!"); 4950 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4951 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4952 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4953 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 4954 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4955 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4956 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4957 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4958 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 4959 case ISD::SHL_PARTS: 4960 case ISD::SRA_PARTS: 4961 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 4962 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4963 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 4964 case ISD::FABS: return LowerFABS(Op, DAG); 4965 case ISD::FNEG: return LowerFNEG(Op, DAG); 4966 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4967 case ISD::SETCC: return LowerSETCC(Op, DAG); 4968 case ISD::SELECT: return LowerSELECT(Op, DAG); 4969 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 4970 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4971 case ISD::CALL: return LowerCALL(Op, DAG); 4972 case ISD::RET: return LowerRET(Op, DAG); 4973 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 4974 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 4975 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 4976 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG); 4977 case ISD::VASTART: return LowerVASTART(Op, DAG); 4978 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 4979 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4980 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4981 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4982 case ISD::FRAME_TO_ARGS_OFFSET: 4983 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 4984 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 4985 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 4986 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 4987 } 4988 return SDOperand(); 4989} 4990 4991const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 4992 switch (Opcode) { 4993 default: return NULL; 4994 case X86ISD::SHLD: return "X86ISD::SHLD"; 4995 case X86ISD::SHRD: return "X86ISD::SHRD"; 4996 case X86ISD::FAND: return "X86ISD::FAND"; 4997 case X86ISD::FOR: return "X86ISD::FOR"; 4998 case X86ISD::FXOR: return "X86ISD::FXOR"; 4999 case X86ISD::FSRL: return "X86ISD::FSRL"; 5000 case X86ISD::FILD: return "X86ISD::FILD"; 5001 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5002 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5003 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5004 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5005 case X86ISD::FLD: return "X86ISD::FLD"; 5006 case X86ISD::FST: return "X86ISD::FST"; 5007 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 5008 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 5009 case X86ISD::CALL: return "X86ISD::CALL"; 5010 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5011 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5012 case X86ISD::CMP: return "X86ISD::CMP"; 5013 case X86ISD::COMI: return "X86ISD::COMI"; 5014 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5015 case X86ISD::SETCC: return "X86ISD::SETCC"; 5016 case X86ISD::CMOV: return "X86ISD::CMOV"; 5017 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5018 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5019 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5020 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5021 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5022 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5023 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 5024 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5025 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5026 case X86ISD::FMAX: return "X86ISD::FMAX"; 5027 case X86ISD::FMIN: return "X86ISD::FMIN"; 5028 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5029 case X86ISD::FRCP: return "X86ISD::FRCP"; 5030 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5031 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5032 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5033 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5034 } 5035} 5036 5037// isLegalAddressingMode - Return true if the addressing mode represented 5038// by AM is legal for this target, for a load/store of the specified type. 5039bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5040 const Type *Ty) const { 5041 // X86 supports extremely general addressing modes. 5042 5043 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5044 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5045 return false; 5046 5047 if (AM.BaseGV) { 5048 // We can only fold this if we don't need an extra load. 5049 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5050 return false; 5051 5052 // X86-64 only supports addr of globals in small code model. 5053 if (Subtarget->is64Bit()) { 5054 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5055 return false; 5056 // If lower 4G is not available, then we must use rip-relative addressing. 5057 if (AM.BaseOffs || AM.Scale > 1) 5058 return false; 5059 } 5060 } 5061 5062 switch (AM.Scale) { 5063 case 0: 5064 case 1: 5065 case 2: 5066 case 4: 5067 case 8: 5068 // These scales always work. 5069 break; 5070 case 3: 5071 case 5: 5072 case 9: 5073 // These scales are formed with basereg+scalereg. Only accept if there is 5074 // no basereg yet. 5075 if (AM.HasBaseReg) 5076 return false; 5077 break; 5078 default: // Other stuff never works. 5079 return false; 5080 } 5081 5082 return true; 5083} 5084 5085 5086/// isShuffleMaskLegal - Targets can use this to indicate that they only 5087/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5088/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5089/// are assumed to be legal. 5090bool 5091X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5092 // Only do shuffles on 128-bit vector types for now. 5093 if (MVT::getSizeInBits(VT) == 64) return false; 5094 return (Mask.Val->getNumOperands() <= 4 || 5095 isIdentityMask(Mask.Val) || 5096 isIdentityMask(Mask.Val, true) || 5097 isSplatMask(Mask.Val) || 5098 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5099 X86::isUNPCKLMask(Mask.Val) || 5100 X86::isUNPCKHMask(Mask.Val) || 5101 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5102 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5103} 5104 5105bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5106 MVT::ValueType EVT, 5107 SelectionDAG &DAG) const { 5108 unsigned NumElts = BVOps.size(); 5109 // Only do shuffles on 128-bit vector types for now. 5110 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5111 if (NumElts == 2) return true; 5112 if (NumElts == 4) { 5113 return (isMOVLMask(&BVOps[0], 4) || 5114 isCommutedMOVL(&BVOps[0], 4, true) || 5115 isSHUFPMask(&BVOps[0], 4) || 5116 isCommutedSHUFP(&BVOps[0], 4)); 5117 } 5118 return false; 5119} 5120 5121//===----------------------------------------------------------------------===// 5122// X86 Scheduler Hooks 5123//===----------------------------------------------------------------------===// 5124 5125MachineBasicBlock * 5126X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 5127 MachineBasicBlock *BB) { 5128 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5129 switch (MI->getOpcode()) { 5130 default: assert(false && "Unexpected instr type to insert"); 5131 case X86::CMOV_FR32: 5132 case X86::CMOV_FR64: 5133 case X86::CMOV_V4F32: 5134 case X86::CMOV_V2F64: 5135 case X86::CMOV_V2I64: { 5136 // To "insert" a SELECT_CC instruction, we actually have to insert the 5137 // diamond control-flow pattern. The incoming instruction knows the 5138 // destination vreg to set, the condition code register to branch on, the 5139 // true/false values to select between, and a branch opcode to use. 5140 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5141 ilist<MachineBasicBlock>::iterator It = BB; 5142 ++It; 5143 5144 // thisMBB: 5145 // ... 5146 // TrueVal = ... 5147 // cmpTY ccX, r1, r2 5148 // bCC copy1MBB 5149 // fallthrough --> copy0MBB 5150 MachineBasicBlock *thisMBB = BB; 5151 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5152 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5153 unsigned Opc = 5154 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5155 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5156 MachineFunction *F = BB->getParent(); 5157 F->getBasicBlockList().insert(It, copy0MBB); 5158 F->getBasicBlockList().insert(It, sinkMBB); 5159 // Update machine-CFG edges by first adding all successors of the current 5160 // block to the new block which will contain the Phi node for the select. 5161 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5162 e = BB->succ_end(); i != e; ++i) 5163 sinkMBB->addSuccessor(*i); 5164 // Next, remove all successors of the current block, and add the true 5165 // and fallthrough blocks as its successors. 5166 while(!BB->succ_empty()) 5167 BB->removeSuccessor(BB->succ_begin()); 5168 BB->addSuccessor(copy0MBB); 5169 BB->addSuccessor(sinkMBB); 5170 5171 // copy0MBB: 5172 // %FalseValue = ... 5173 // # fallthrough to sinkMBB 5174 BB = copy0MBB; 5175 5176 // Update machine-CFG edges 5177 BB->addSuccessor(sinkMBB); 5178 5179 // sinkMBB: 5180 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5181 // ... 5182 BB = sinkMBB; 5183 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5184 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5185 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5186 5187 delete MI; // The pseudo instruction is gone now. 5188 return BB; 5189 } 5190 5191 case X86::FP32_TO_INT16_IN_MEM: 5192 case X86::FP32_TO_INT32_IN_MEM: 5193 case X86::FP32_TO_INT64_IN_MEM: 5194 case X86::FP64_TO_INT16_IN_MEM: 5195 case X86::FP64_TO_INT32_IN_MEM: 5196 case X86::FP64_TO_INT64_IN_MEM: 5197 case X86::FP80_TO_INT16_IN_MEM: 5198 case X86::FP80_TO_INT32_IN_MEM: 5199 case X86::FP80_TO_INT64_IN_MEM: { 5200 // Change the floating point control register to use "round towards zero" 5201 // mode when truncating to an integer value. 5202 MachineFunction *F = BB->getParent(); 5203 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5204 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5205 5206 // Load the old value of the high byte of the control word... 5207 unsigned OldCW = 5208 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass); 5209 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5210 5211 // Set the high part to be round to zero... 5212 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5213 .addImm(0xC7F); 5214 5215 // Reload the modified control word now... 5216 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5217 5218 // Restore the memory image of control word to original value 5219 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5220 .addReg(OldCW); 5221 5222 // Get the X86 opcode to use. 5223 unsigned Opc; 5224 switch (MI->getOpcode()) { 5225 default: assert(0 && "illegal opcode!"); 5226 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5227 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5228 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5229 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5230 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5231 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5232 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5233 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5234 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5235 } 5236 5237 X86AddressMode AM; 5238 MachineOperand &Op = MI->getOperand(0); 5239 if (Op.isRegister()) { 5240 AM.BaseType = X86AddressMode::RegBase; 5241 AM.Base.Reg = Op.getReg(); 5242 } else { 5243 AM.BaseType = X86AddressMode::FrameIndexBase; 5244 AM.Base.FrameIndex = Op.getFrameIndex(); 5245 } 5246 Op = MI->getOperand(1); 5247 if (Op.isImmediate()) 5248 AM.Scale = Op.getImm(); 5249 Op = MI->getOperand(2); 5250 if (Op.isImmediate()) 5251 AM.IndexReg = Op.getImm(); 5252 Op = MI->getOperand(3); 5253 if (Op.isGlobalAddress()) { 5254 AM.GV = Op.getGlobal(); 5255 } else { 5256 AM.Disp = Op.getImm(); 5257 } 5258 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5259 .addReg(MI->getOperand(4).getReg()); 5260 5261 // Reload the original control word now. 5262 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5263 5264 delete MI; // The pseudo instruction is gone now. 5265 return BB; 5266 } 5267 } 5268} 5269 5270//===----------------------------------------------------------------------===// 5271// X86 Optimization Hooks 5272//===----------------------------------------------------------------------===// 5273 5274void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5275 uint64_t Mask, 5276 uint64_t &KnownZero, 5277 uint64_t &KnownOne, 5278 const SelectionDAG &DAG, 5279 unsigned Depth) const { 5280 unsigned Opc = Op.getOpcode(); 5281 assert((Opc >= ISD::BUILTIN_OP_END || 5282 Opc == ISD::INTRINSIC_WO_CHAIN || 5283 Opc == ISD::INTRINSIC_W_CHAIN || 5284 Opc == ISD::INTRINSIC_VOID) && 5285 "Should use MaskedValueIsZero if you don't know whether Op" 5286 " is a target node!"); 5287 5288 KnownZero = KnownOne = 0; // Don't know anything. 5289 switch (Opc) { 5290 default: break; 5291 case X86ISD::SETCC: 5292 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 5293 break; 5294 } 5295} 5296 5297/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5298/// element of the result of the vector shuffle. 5299static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5300 MVT::ValueType VT = N->getValueType(0); 5301 SDOperand PermMask = N->getOperand(2); 5302 unsigned NumElems = PermMask.getNumOperands(); 5303 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5304 i %= NumElems; 5305 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5306 return (i == 0) 5307 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5308 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5309 SDOperand Idx = PermMask.getOperand(i); 5310 if (Idx.getOpcode() == ISD::UNDEF) 5311 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5312 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5313 } 5314 return SDOperand(); 5315} 5316 5317/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5318/// node is a GlobalAddress + an offset. 5319static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5320 unsigned Opc = N->getOpcode(); 5321 if (Opc == X86ISD::Wrapper) { 5322 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5323 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5324 return true; 5325 } 5326 } else if (Opc == ISD::ADD) { 5327 SDOperand N1 = N->getOperand(0); 5328 SDOperand N2 = N->getOperand(1); 5329 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5330 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5331 if (V) { 5332 Offset += V->getSignExtended(); 5333 return true; 5334 } 5335 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5336 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5337 if (V) { 5338 Offset += V->getSignExtended(); 5339 return true; 5340 } 5341 } 5342 } 5343 return false; 5344} 5345 5346/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5347/// + Dist * Size. 5348static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5349 MachineFrameInfo *MFI) { 5350 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5351 return false; 5352 5353 SDOperand Loc = N->getOperand(1); 5354 SDOperand BaseLoc = Base->getOperand(1); 5355 if (Loc.getOpcode() == ISD::FrameIndex) { 5356 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5357 return false; 5358 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5359 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5360 int FS = MFI->getObjectSize(FI); 5361 int BFS = MFI->getObjectSize(BFI); 5362 if (FS != BFS || FS != Size) return false; 5363 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5364 } else { 5365 GlobalValue *GV1 = NULL; 5366 GlobalValue *GV2 = NULL; 5367 int64_t Offset1 = 0; 5368 int64_t Offset2 = 0; 5369 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5370 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5371 if (isGA1 && isGA2 && GV1 == GV2) 5372 return Offset1 == (Offset2 + Dist*Size); 5373 } 5374 5375 return false; 5376} 5377 5378static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5379 const X86Subtarget *Subtarget) { 5380 GlobalValue *GV; 5381 int64_t Offset; 5382 if (isGAPlusOffset(Base, GV, Offset)) 5383 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5384 else { 5385 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!"); 5386 int BFI = cast<FrameIndexSDNode>(Base)->getIndex(); 5387 if (BFI < 0) 5388 // Fixed objects do not specify alignment, however the offsets are known. 5389 return ((Subtarget->getStackAlignment() % 16) == 0 && 5390 (MFI->getObjectOffset(BFI) % 16) == 0); 5391 else 5392 return MFI->getObjectAlignment(BFI) >= 16; 5393 } 5394 return false; 5395} 5396 5397 5398/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5399/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5400/// if the load addresses are consecutive, non-overlapping, and in the right 5401/// order. 5402static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5403 const X86Subtarget *Subtarget) { 5404 MachineFunction &MF = DAG.getMachineFunction(); 5405 MachineFrameInfo *MFI = MF.getFrameInfo(); 5406 MVT::ValueType VT = N->getValueType(0); 5407 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5408 SDOperand PermMask = N->getOperand(2); 5409 int NumElems = (int)PermMask.getNumOperands(); 5410 SDNode *Base = NULL; 5411 for (int i = 0; i < NumElems; ++i) { 5412 SDOperand Idx = PermMask.getOperand(i); 5413 if (Idx.getOpcode() == ISD::UNDEF) { 5414 if (!Base) return SDOperand(); 5415 } else { 5416 SDOperand Arg = 5417 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5418 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5419 return SDOperand(); 5420 if (!Base) 5421 Base = Arg.Val; 5422 else if (!isConsecutiveLoad(Arg.Val, Base, 5423 i, MVT::getSizeInBits(EVT)/8,MFI)) 5424 return SDOperand(); 5425 } 5426 } 5427 5428 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5429 LoadSDNode *LD = cast<LoadSDNode>(Base); 5430 if (isAlign16) { 5431 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5432 LD->getSrcValueOffset(), LD->isVolatile()); 5433 } else { 5434 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5435 LD->getSrcValueOffset(), LD->isVolatile(), 5436 LD->getAlignment()); 5437 } 5438} 5439 5440/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5441static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5442 const X86Subtarget *Subtarget) { 5443 SDOperand Cond = N->getOperand(0); 5444 5445 // If we have SSE[12] support, try to form min/max nodes. 5446 if (Subtarget->hasSSE2() && 5447 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5448 if (Cond.getOpcode() == ISD::SETCC) { 5449 // Get the LHS/RHS of the select. 5450 SDOperand LHS = N->getOperand(1); 5451 SDOperand RHS = N->getOperand(2); 5452 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5453 5454 unsigned Opcode = 0; 5455 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5456 switch (CC) { 5457 default: break; 5458 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5459 case ISD::SETULE: 5460 case ISD::SETLE: 5461 if (!UnsafeFPMath) break; 5462 // FALL THROUGH. 5463 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5464 case ISD::SETLT: 5465 Opcode = X86ISD::FMIN; 5466 break; 5467 5468 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5469 case ISD::SETUGT: 5470 case ISD::SETGT: 5471 if (!UnsafeFPMath) break; 5472 // FALL THROUGH. 5473 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5474 case ISD::SETGE: 5475 Opcode = X86ISD::FMAX; 5476 break; 5477 } 5478 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5479 switch (CC) { 5480 default: break; 5481 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5482 case ISD::SETUGT: 5483 case ISD::SETGT: 5484 if (!UnsafeFPMath) break; 5485 // FALL THROUGH. 5486 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5487 case ISD::SETGE: 5488 Opcode = X86ISD::FMIN; 5489 break; 5490 5491 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5492 case ISD::SETULE: 5493 case ISD::SETLE: 5494 if (!UnsafeFPMath) break; 5495 // FALL THROUGH. 5496 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5497 case ISD::SETLT: 5498 Opcode = X86ISD::FMAX; 5499 break; 5500 } 5501 } 5502 5503 if (Opcode) 5504 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5505 } 5506 5507 } 5508 5509 return SDOperand(); 5510} 5511 5512 5513SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5514 DAGCombinerInfo &DCI) const { 5515 SelectionDAG &DAG = DCI.DAG; 5516 switch (N->getOpcode()) { 5517 default: break; 5518 case ISD::VECTOR_SHUFFLE: 5519 return PerformShuffleCombine(N, DAG, Subtarget); 5520 case ISD::SELECT: 5521 return PerformSELECTCombine(N, DAG, Subtarget); 5522 } 5523 5524 return SDOperand(); 5525} 5526 5527//===----------------------------------------------------------------------===// 5528// X86 Inline Assembly Support 5529//===----------------------------------------------------------------------===// 5530 5531/// getConstraintType - Given a constraint letter, return the type of 5532/// constraint it is for this target. 5533X86TargetLowering::ConstraintType 5534X86TargetLowering::getConstraintType(const std::string &Constraint) const { 5535 if (Constraint.size() == 1) { 5536 switch (Constraint[0]) { 5537 case 'A': 5538 case 'r': 5539 case 'R': 5540 case 'l': 5541 case 'q': 5542 case 'Q': 5543 case 'x': 5544 case 'Y': 5545 return C_RegisterClass; 5546 default: 5547 break; 5548 } 5549 } 5550 return TargetLowering::getConstraintType(Constraint); 5551} 5552 5553/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5554/// vector. If it is invalid, don't add anything to Ops. 5555void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 5556 char Constraint, 5557 std::vector<SDOperand>&Ops, 5558 SelectionDAG &DAG) { 5559 SDOperand Result(0, 0); 5560 5561 switch (Constraint) { 5562 default: break; 5563 case 'I': 5564 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5565 if (C->getValue() <= 31) { 5566 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5567 break; 5568 } 5569 } 5570 return; 5571 case 'N': 5572 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5573 if (C->getValue() <= 255) { 5574 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5575 break; 5576 } 5577 } 5578 return; 5579 case 'i': { 5580 // Literal immediates are always ok. 5581 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 5582 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 5583 break; 5584 } 5585 5586 // If we are in non-pic codegen mode, we allow the address of a global (with 5587 // an optional displacement) to be used with 'i'. 5588 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 5589 int64_t Offset = 0; 5590 5591 // Match either (GA) or (GA+C) 5592 if (GA) { 5593 Offset = GA->getOffset(); 5594 } else if (Op.getOpcode() == ISD::ADD) { 5595 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5596 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5597 if (C && GA) { 5598 Offset = GA->getOffset()+C->getValue(); 5599 } else { 5600 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5601 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 5602 if (C && GA) 5603 Offset = GA->getOffset()+C->getValue(); 5604 else 5605 C = 0, GA = 0; 5606 } 5607 } 5608 5609 if (GA) { 5610 // If addressing this global requires a load (e.g. in PIC mode), we can't 5611 // match. 5612 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 5613 false)) 5614 return; 5615 5616 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 5617 Offset); 5618 Result = Op; 5619 break; 5620 } 5621 5622 // Otherwise, not valid for this mode. 5623 return; 5624 } 5625 } 5626 5627 if (Result.Val) { 5628 Ops.push_back(Result); 5629 return; 5630 } 5631 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5632} 5633 5634std::vector<unsigned> X86TargetLowering:: 5635getRegClassForInlineAsmConstraint(const std::string &Constraint, 5636 MVT::ValueType VT) const { 5637 if (Constraint.size() == 1) { 5638 // FIXME: not handling fp-stack yet! 5639 switch (Constraint[0]) { // GCC X86 Constraint Letters 5640 default: break; // Unknown constraint letter 5641 case 'A': // EAX/EDX 5642 if (VT == MVT::i32 || VT == MVT::i64) 5643 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 5644 break; 5645 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 5646 case 'Q': // Q_REGS 5647 if (VT == MVT::i32) 5648 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 5649 else if (VT == MVT::i16) 5650 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 5651 else if (VT == MVT::i8) 5652 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 5653 break; 5654 } 5655 } 5656 5657 return std::vector<unsigned>(); 5658} 5659 5660std::pair<unsigned, const TargetRegisterClass*> 5661X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5662 MVT::ValueType VT) const { 5663 // First, see if this is a constraint that directly corresponds to an LLVM 5664 // register class. 5665 if (Constraint.size() == 1) { 5666 // GCC Constraint Letters 5667 switch (Constraint[0]) { 5668 default: break; 5669 case 'r': // GENERAL_REGS 5670 case 'R': // LEGACY_REGS 5671 case 'l': // INDEX_REGS 5672 if (VT == MVT::i64 && Subtarget->is64Bit()) 5673 return std::make_pair(0U, X86::GR64RegisterClass); 5674 if (VT == MVT::i32) 5675 return std::make_pair(0U, X86::GR32RegisterClass); 5676 else if (VT == MVT::i16) 5677 return std::make_pair(0U, X86::GR16RegisterClass); 5678 else if (VT == MVT::i8) 5679 return std::make_pair(0U, X86::GR8RegisterClass); 5680 break; 5681 case 'y': // MMX_REGS if MMX allowed. 5682 if (!Subtarget->hasMMX()) break; 5683 return std::make_pair(0U, X86::VR64RegisterClass); 5684 break; 5685 case 'Y': // SSE_REGS if SSE2 allowed 5686 if (!Subtarget->hasSSE2()) break; 5687 // FALL THROUGH. 5688 case 'x': // SSE_REGS if SSE1 allowed 5689 if (!Subtarget->hasSSE1()) break; 5690 5691 switch (VT) { 5692 default: break; 5693 // Scalar SSE types. 5694 case MVT::f32: 5695 case MVT::i32: 5696 return std::make_pair(0U, X86::FR32RegisterClass); 5697 case MVT::f64: 5698 case MVT::i64: 5699 return std::make_pair(0U, X86::FR64RegisterClass); 5700 // Vector types. 5701 case MVT::v16i8: 5702 case MVT::v8i16: 5703 case MVT::v4i32: 5704 case MVT::v2i64: 5705 case MVT::v4f32: 5706 case MVT::v2f64: 5707 return std::make_pair(0U, X86::VR128RegisterClass); 5708 } 5709 break; 5710 } 5711 } 5712 5713 // Use the default implementation in TargetLowering to convert the register 5714 // constraint into a member of a register class. 5715 std::pair<unsigned, const TargetRegisterClass*> Res; 5716 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5717 5718 // Not found as a standard register? 5719 if (Res.second == 0) { 5720 // GCC calls "st(0)" just plain "st". 5721 if (StringsEqualNoCase("{st}", Constraint)) { 5722 Res.first = X86::ST0; 5723 Res.second = X86::RFP80RegisterClass; 5724 } 5725 5726 return Res; 5727 } 5728 5729 // Otherwise, check to see if this is a register class of the wrong value 5730 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 5731 // turn into {ax},{dx}. 5732 if (Res.second->hasType(VT)) 5733 return Res; // Correct type already, nothing to do. 5734 5735 // All of the single-register GCC register classes map their values onto 5736 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 5737 // really want an 8-bit or 32-bit register, map to the appropriate register 5738 // class and return the appropriate register. 5739 if (Res.second != X86::GR16RegisterClass) 5740 return Res; 5741 5742 if (VT == MVT::i8) { 5743 unsigned DestReg = 0; 5744 switch (Res.first) { 5745 default: break; 5746 case X86::AX: DestReg = X86::AL; break; 5747 case X86::DX: DestReg = X86::DL; break; 5748 case X86::CX: DestReg = X86::CL; break; 5749 case X86::BX: DestReg = X86::BL; break; 5750 } 5751 if (DestReg) { 5752 Res.first = DestReg; 5753 Res.second = Res.second = X86::GR8RegisterClass; 5754 } 5755 } else if (VT == MVT::i32) { 5756 unsigned DestReg = 0; 5757 switch (Res.first) { 5758 default: break; 5759 case X86::AX: DestReg = X86::EAX; break; 5760 case X86::DX: DestReg = X86::EDX; break; 5761 case X86::CX: DestReg = X86::ECX; break; 5762 case X86::BX: DestReg = X86::EBX; break; 5763 case X86::SI: DestReg = X86::ESI; break; 5764 case X86::DI: DestReg = X86::EDI; break; 5765 case X86::BP: DestReg = X86::EBP; break; 5766 case X86::SP: DestReg = X86::ESP; break; 5767 } 5768 if (DestReg) { 5769 Res.first = DestReg; 5770 Res.second = Res.second = X86::GR32RegisterClass; 5771 } 5772 } else if (VT == MVT::i64) { 5773 unsigned DestReg = 0; 5774 switch (Res.first) { 5775 default: break; 5776 case X86::AX: DestReg = X86::RAX; break; 5777 case X86::DX: DestReg = X86::RDX; break; 5778 case X86::CX: DestReg = X86::RCX; break; 5779 case X86::BX: DestReg = X86::RBX; break; 5780 case X86::SI: DestReg = X86::RSI; break; 5781 case X86::DI: DestReg = X86::RDI; break; 5782 case X86::BP: DestReg = X86::RBP; break; 5783 case X86::SP: DestReg = X86::RSP; break; 5784 } 5785 if (DestReg) { 5786 Res.first = DestReg; 5787 Res.second = Res.second = X86::GR64RegisterClass; 5788 } 5789 } 5790 5791 return Res; 5792} 5793