X86ISelLowering.cpp revision cdd1eeca2c2da2e1c9b48e04f2f779ffe5cf3666
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/BitVector.h" 27#include "llvm/ADT/VectorExtras.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/CodeGen/CallingConvLower.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/PseudoSourceValue.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/Debug.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/ADT/SmallSet.h" 41#include "llvm/ADT/StringExtras.h" 42#include "llvm/ParameterAttributes.h" 43using namespace llvm; 44 45X86TargetLowering::X86TargetLowering(TargetMachine &TM) 46 : TargetLowering(TM) { 47 Subtarget = &TM.getSubtarget<X86Subtarget>(); 48 X86ScalarSSEf64 = Subtarget->hasSSE2(); 49 X86ScalarSSEf32 = Subtarget->hasSSE1(); 50 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 51 52 bool Fast = false; 53 54 RegInfo = TM.getRegisterInfo(); 55 56 // Set up the TargetLowering object. 57 58 // X86 is weird, it always uses i8 for shift amounts and setcc results. 59 setShiftAmountType(MVT::i8); 60 setSetCCResultType(MVT::i8); 61 setSetCCResultContents(ZeroOrOneSetCCResult); 62 setSchedulingPreference(SchedulingForRegPressure); 63 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 64 setStackPointerRegisterToSaveRestore(X86StackPtr); 65 66 if (Subtarget->isTargetDarwin()) { 67 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 68 setUseUnderscoreSetJmp(false); 69 setUseUnderscoreLongJmp(false); 70 } else if (Subtarget->isTargetMingw()) { 71 // MS runtime is weird: it exports _setjmp, but longjmp! 72 setUseUnderscoreSetJmp(true); 73 setUseUnderscoreLongJmp(false); 74 } else { 75 setUseUnderscoreSetJmp(true); 76 setUseUnderscoreLongJmp(true); 77 } 78 79 // Set up the register classes. 80 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 81 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 82 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 83 if (Subtarget->is64Bit()) 84 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 85 86 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 87 88 // We don't accept any truncstore of integer registers. 89 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 90 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 91 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 92 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 93 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 94 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 95 96 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 97 // operation. 98 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 99 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 100 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 101 102 if (Subtarget->is64Bit()) { 103 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 104 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 105 } else { 106 if (X86ScalarSSEf64) 107 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 108 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 109 else 110 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 111 } 112 113 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 114 // this operation. 115 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 116 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 117 // SSE has no i16 to fp conversion, only i32 118 if (X86ScalarSSEf32) { 119 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 120 // f32 and f64 cases are Legal, f80 case is not 121 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 122 } else { 123 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 124 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 125 } 126 127 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 128 // are Legal, f80 is custom lowered. 129 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 130 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 131 132 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 133 // this operation. 134 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 135 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 136 137 if (X86ScalarSSEf32) { 138 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 139 // f32 and f64 cases are Legal, f80 case is not 140 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 141 } else { 142 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 143 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 144 } 145 146 // Handle FP_TO_UINT by promoting the destination to a larger signed 147 // conversion. 148 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 149 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 150 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 151 152 if (Subtarget->is64Bit()) { 153 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 154 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 155 } else { 156 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 157 // Expand FP_TO_UINT into a select. 158 // FIXME: We would like to use a Custom expander here eventually to do 159 // the optimal thing for SSE vs. the default expansion in the legalizer. 160 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 161 else 162 // With SSE3 we can use fisttpll to convert to a signed i64. 163 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 164 } 165 166 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 167 if (!X86ScalarSSEf64) { 168 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 169 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 170 } 171 172 // Scalar integer multiply, multiply-high, divide, and remainder are 173 // lowered to use operations that produce two results, to match the 174 // available instructions. This exposes the two-result form to trivial 175 // CSE, which is able to combine x/y and x%y into a single instruction, 176 // for example. The single-result multiply instructions are introduced 177 // in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part 178 // is not needed. 179 setOperationAction(ISD::MUL , MVT::i8 , Expand); 180 setOperationAction(ISD::MULHS , MVT::i8 , Expand); 181 setOperationAction(ISD::MULHU , MVT::i8 , Expand); 182 setOperationAction(ISD::SDIV , MVT::i8 , Expand); 183 setOperationAction(ISD::UDIV , MVT::i8 , Expand); 184 setOperationAction(ISD::SREM , MVT::i8 , Expand); 185 setOperationAction(ISD::UREM , MVT::i8 , Expand); 186 setOperationAction(ISD::MUL , MVT::i16 , Expand); 187 setOperationAction(ISD::MULHS , MVT::i16 , Expand); 188 setOperationAction(ISD::MULHU , MVT::i16 , Expand); 189 setOperationAction(ISD::SDIV , MVT::i16 , Expand); 190 setOperationAction(ISD::UDIV , MVT::i16 , Expand); 191 setOperationAction(ISD::SREM , MVT::i16 , Expand); 192 setOperationAction(ISD::UREM , MVT::i16 , Expand); 193 setOperationAction(ISD::MUL , MVT::i32 , Expand); 194 setOperationAction(ISD::MULHS , MVT::i32 , Expand); 195 setOperationAction(ISD::MULHU , MVT::i32 , Expand); 196 setOperationAction(ISD::SDIV , MVT::i32 , Expand); 197 setOperationAction(ISD::UDIV , MVT::i32 , Expand); 198 setOperationAction(ISD::SREM , MVT::i32 , Expand); 199 setOperationAction(ISD::UREM , MVT::i32 , Expand); 200 setOperationAction(ISD::MUL , MVT::i64 , Expand); 201 setOperationAction(ISD::MULHS , MVT::i64 , Expand); 202 setOperationAction(ISD::MULHU , MVT::i64 , Expand); 203 setOperationAction(ISD::SDIV , MVT::i64 , Expand); 204 setOperationAction(ISD::UDIV , MVT::i64 , Expand); 205 setOperationAction(ISD::SREM , MVT::i64 , Expand); 206 setOperationAction(ISD::UREM , MVT::i64 , Expand); 207 208 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 209 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 210 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 211 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 212 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 213 if (Subtarget->is64Bit()) 214 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 218 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 219 setOperationAction(ISD::FREM , MVT::f64 , Expand); 220 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 221 222 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 223 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 224 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 225 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 226 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 227 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 228 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 229 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 230 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 231 if (Subtarget->is64Bit()) { 232 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 233 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 234 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 235 } 236 237 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 238 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 239 240 // These should be promoted to a larger select which is supported. 241 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 242 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 243 // X86 wants to expand cmov itself. 244 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 245 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 246 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 247 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 248 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 249 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 250 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 251 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 252 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 253 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 254 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 255 if (Subtarget->is64Bit()) { 256 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 257 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 258 } 259 // X86 ret instruction may pop stack. 260 setOperationAction(ISD::RET , MVT::Other, Custom); 261 if (!Subtarget->is64Bit()) 262 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 263 264 // Darwin ABI issue. 265 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 266 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 267 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 268 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 269 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 270 if (Subtarget->is64Bit()) { 271 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 272 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 273 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 274 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 275 } 276 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 277 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 278 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 279 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 280 // X86 wants to expand memset / memcpy itself. 281 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 282 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 283 284 // Use the default ISD::LOCATION, ISD::DECLARE expansion. 285 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 286 // FIXME - use subtarget debug flags 287 if (!Subtarget->isTargetDarwin() && 288 !Subtarget->isTargetELF() && 289 !Subtarget->isTargetCygMing()) 290 setOperationAction(ISD::LABEL, MVT::Other, Expand); 291 292 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 293 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 294 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 295 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 296 if (Subtarget->is64Bit()) { 297 // FIXME: Verify 298 setExceptionPointerRegister(X86::RAX); 299 setExceptionSelectorRegister(X86::RDX); 300 } else { 301 setExceptionPointerRegister(X86::EAX); 302 setExceptionSelectorRegister(X86::EDX); 303 } 304 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 305 306 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 307 308 setOperationAction(ISD::TRAP, MVT::Other, Legal); 309 310 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 311 setOperationAction(ISD::VASTART , MVT::Other, Custom); 312 setOperationAction(ISD::VAARG , MVT::Other, Expand); 313 setOperationAction(ISD::VAEND , MVT::Other, Expand); 314 if (Subtarget->is64Bit()) 315 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 316 else 317 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 318 319 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 320 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 321 if (Subtarget->is64Bit()) 322 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 323 if (Subtarget->isTargetCygMing()) 324 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 325 else 326 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 327 328 if (X86ScalarSSEf64) { 329 // f32 and f64 use SSE. 330 // Set up the FP register classes. 331 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 332 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 333 334 // Use ANDPD to simulate FABS. 335 setOperationAction(ISD::FABS , MVT::f64, Custom); 336 setOperationAction(ISD::FABS , MVT::f32, Custom); 337 338 // Use XORP to simulate FNEG. 339 setOperationAction(ISD::FNEG , MVT::f64, Custom); 340 setOperationAction(ISD::FNEG , MVT::f32, Custom); 341 342 // Use ANDPD and ORPD to simulate FCOPYSIGN. 343 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 344 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 345 346 // We don't support sin/cos/fmod 347 setOperationAction(ISD::FSIN , MVT::f64, Expand); 348 setOperationAction(ISD::FCOS , MVT::f64, Expand); 349 setOperationAction(ISD::FREM , MVT::f64, Expand); 350 setOperationAction(ISD::FSIN , MVT::f32, Expand); 351 setOperationAction(ISD::FCOS , MVT::f32, Expand); 352 setOperationAction(ISD::FREM , MVT::f32, Expand); 353 354 // Expand FP immediates into loads from the stack, except for the special 355 // cases we handle. 356 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 357 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 358 addLegalFPImmediate(APFloat(+0.0)); // xorpd 359 addLegalFPImmediate(APFloat(+0.0f)); // xorps 360 361 // Floating truncations from f80 and extensions to f80 go through memory. 362 // If optimizing, we lie about this though and handle it in 363 // InstructionSelectPreprocess so that dagcombine2 can hack on these. 364 if (Fast) { 365 setConvertAction(MVT::f32, MVT::f80, Expand); 366 setConvertAction(MVT::f64, MVT::f80, Expand); 367 setConvertAction(MVT::f80, MVT::f32, Expand); 368 setConvertAction(MVT::f80, MVT::f64, Expand); 369 } 370 } else if (X86ScalarSSEf32) { 371 // Use SSE for f32, x87 for f64. 372 // Set up the FP register classes. 373 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 374 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 375 376 // Use ANDPS to simulate FABS. 377 setOperationAction(ISD::FABS , MVT::f32, Custom); 378 379 // Use XORP to simulate FNEG. 380 setOperationAction(ISD::FNEG , MVT::f32, Custom); 381 382 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 383 384 // Use ANDPS and ORPS to simulate FCOPYSIGN. 385 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 386 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 387 388 // We don't support sin/cos/fmod 389 setOperationAction(ISD::FSIN , MVT::f32, Expand); 390 setOperationAction(ISD::FCOS , MVT::f32, Expand); 391 setOperationAction(ISD::FREM , MVT::f32, Expand); 392 393 // Expand FP immediates into loads from the stack, except for the special 394 // cases we handle. 395 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 396 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 397 addLegalFPImmediate(APFloat(+0.0f)); // xorps 398 addLegalFPImmediate(APFloat(+0.0)); // FLD0 399 addLegalFPImmediate(APFloat(+1.0)); // FLD1 400 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 401 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 402 403 // SSE <-> X87 conversions go through memory. If optimizing, we lie about 404 // this though and handle it in InstructionSelectPreprocess so that 405 // dagcombine2 can hack on these. 406 if (Fast) { 407 setConvertAction(MVT::f32, MVT::f64, Expand); 408 setConvertAction(MVT::f32, MVT::f80, Expand); 409 setConvertAction(MVT::f80, MVT::f32, Expand); 410 setConvertAction(MVT::f64, MVT::f32, Expand); 411 // And x87->x87 truncations also. 412 setConvertAction(MVT::f80, MVT::f64, Expand); 413 } 414 415 if (!UnsafeFPMath) { 416 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 417 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 418 } 419 } else { 420 // f32 and f64 in x87. 421 // Set up the FP register classes. 422 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 423 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 424 425 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 426 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 427 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 428 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 429 430 // Floating truncations go through memory. If optimizing, we lie about 431 // this though and handle it in InstructionSelectPreprocess so that 432 // dagcombine2 can hack on these. 433 if (Fast) { 434 setConvertAction(MVT::f80, MVT::f32, Expand); 435 setConvertAction(MVT::f64, MVT::f32, Expand); 436 setConvertAction(MVT::f80, MVT::f64, Expand); 437 } 438 439 if (!UnsafeFPMath) { 440 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 441 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 442 } 443 444 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 445 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 446 addLegalFPImmediate(APFloat(+0.0)); // FLD0 447 addLegalFPImmediate(APFloat(+1.0)); // FLD1 448 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 449 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 450 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 451 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 452 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 453 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 454 } 455 456 // Long double always uses X87. 457 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 458 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 459 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 460 { 461 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 462 APFloat TmpFlt(+0.0); 463 TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 464 addLegalFPImmediate(TmpFlt); // FLD0 465 TmpFlt.changeSign(); 466 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 467 APFloat TmpFlt2(+1.0); 468 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven); 469 addLegalFPImmediate(TmpFlt2); // FLD1 470 TmpFlt2.changeSign(); 471 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 472 } 473 474 if (!UnsafeFPMath) { 475 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 476 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 477 } 478 479 // Always use a library call for pow. 480 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 481 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 482 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 483 484 // First set operation action for all vector types to expand. Then we 485 // will selectively turn on ones that can be effectively codegen'd. 486 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 487 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 488 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 489 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 490 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 491 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 492 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 493 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 494 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 495 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 496 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 497 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 498 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 499 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 500 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 501 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 502 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 503 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 504 setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand); 505 setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand); 506 setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand); 507 setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand); 508 setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand); 509 setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand); 510 setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand); 511 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 512 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 513 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 514 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 515 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 516 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 517 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 518 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 519 setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand); 520 setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand); 521 setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand); 522 setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand); 523 setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand); 524 setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand); 525 } 526 527 if (Subtarget->hasMMX()) { 528 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 529 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 530 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 531 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 532 533 // FIXME: add MMX packed arithmetics 534 535 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 536 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 537 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 538 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 539 540 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 541 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 542 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 543 setOperationAction(ISD::SUB, MVT::v1i64, Legal); 544 545 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 546 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 547 548 setOperationAction(ISD::AND, MVT::v8i8, Promote); 549 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 550 setOperationAction(ISD::AND, MVT::v4i16, Promote); 551 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 552 setOperationAction(ISD::AND, MVT::v2i32, Promote); 553 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 554 setOperationAction(ISD::AND, MVT::v1i64, Legal); 555 556 setOperationAction(ISD::OR, MVT::v8i8, Promote); 557 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 558 setOperationAction(ISD::OR, MVT::v4i16, Promote); 559 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 560 setOperationAction(ISD::OR, MVT::v2i32, Promote); 561 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 562 setOperationAction(ISD::OR, MVT::v1i64, Legal); 563 564 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 565 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 566 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 567 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 568 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 569 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 570 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 571 572 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 573 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 574 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 575 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 576 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 577 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 578 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 579 580 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 581 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 582 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 583 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 584 585 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 586 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 587 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 588 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 589 590 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 591 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 592 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom); 593 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 594 } 595 596 if (Subtarget->hasSSE1()) { 597 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 598 599 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 600 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 601 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 602 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 603 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 604 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 605 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 606 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 607 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 608 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 609 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 610 } 611 612 if (Subtarget->hasSSE2()) { 613 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 614 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 615 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 616 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 617 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 618 619 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 620 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 621 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 622 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 623 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 624 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 625 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 626 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 627 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 628 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 629 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 630 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 631 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 632 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 633 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 634 635 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 636 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 637 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 638 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 639 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 640 641 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 642 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 643 // Do not attempt to custom lower non-power-of-2 vectors 644 if (!isPowerOf2_32(MVT::getVectorNumElements(VT))) 645 continue; 646 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 647 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 648 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 649 } 650 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 651 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 652 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 653 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 654 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 655 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 656 if (Subtarget->is64Bit()) { 657 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 658 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 659 } 660 661 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 662 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 663 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 664 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 665 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 666 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 667 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 668 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 669 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 670 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 671 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 672 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 673 } 674 675 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 676 677 // Custom lower v2i64 and v2f64 selects. 678 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 679 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 680 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 681 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 682 } 683 684 if (Subtarget->hasSSE41()) { 685 // FIXME: Do we need to handle scalar-to-vector here? 686 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 687 688 // i8 and i16 vectors are custom , because the source register and source 689 // source memory operand types are not the same width. f32 vectors are 690 // custom since the immediate controlling the insert encodes additional 691 // information. 692 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 693 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 694 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); 695 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 696 697 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 698 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 699 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 700 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 701 702 if (Subtarget->is64Bit()) { 703 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 704 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 705 } 706 } 707 708 // We want to custom lower some of our intrinsics. 709 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 710 711 // We have target-specific dag combine patterns for the following nodes: 712 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 713 setTargetDAGCombine(ISD::SELECT); 714 715 computeRegisterProperties(); 716 717 // FIXME: These should be based on subtarget info. Plus, the values should 718 // be smaller when we are in optimizing for size mode. 719 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 720 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 721 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 722 allowUnalignedMemoryAccesses = true; // x86 supports it! 723} 724 725/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 726/// the desired ByVal argument alignment. 727static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 728 if (MaxAlign == 16) 729 return; 730 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 731 if (VTy->getBitWidth() == 128) 732 MaxAlign = 16; 733 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 734 unsigned EltAlign = 0; 735 getMaxByValAlign(ATy->getElementType(), EltAlign); 736 if (EltAlign > MaxAlign) 737 MaxAlign = EltAlign; 738 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 739 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 740 unsigned EltAlign = 0; 741 getMaxByValAlign(STy->getElementType(i), EltAlign); 742 if (EltAlign > MaxAlign) 743 MaxAlign = EltAlign; 744 if (MaxAlign == 16) 745 break; 746 } 747 } 748 return; 749} 750 751/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 752/// function arguments in the caller parameter area. For X86, aggregates 753/// that contain SSE vectors are placed at 16-byte boundaries while the rest 754/// are at 4-byte boundaries. 755unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 756 if (Subtarget->is64Bit()) 757 return getTargetData()->getABITypeAlignment(Ty); 758 unsigned Align = 4; 759 if (Subtarget->hasSSE1()) 760 getMaxByValAlign(Ty, Align); 761 return Align; 762} 763 764/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 765/// jumptable. 766SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 767 SelectionDAG &DAG) const { 768 if (usesGlobalOffsetTable()) 769 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 770 if (!Subtarget->isPICStyleRIPRel()) 771 return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()); 772 return Table; 773} 774 775//===----------------------------------------------------------------------===// 776// Return Value Calling Convention Implementation 777//===----------------------------------------------------------------------===// 778 779#include "X86GenCallingConv.inc" 780 781/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it 782/// exists skip possible ISD:TokenFactor. 783static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) { 784 if (Chain.getOpcode() == X86ISD::TAILCALL) { 785 return Chain; 786 } else if (Chain.getOpcode() == ISD::TokenFactor) { 787 if (Chain.getNumOperands() && 788 Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL) 789 return Chain.getOperand(0); 790 } 791 return Chain; 792} 793 794/// LowerRET - Lower an ISD::RET node. 795SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 796 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 797 798 SmallVector<CCValAssign, 16> RVLocs; 799 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 800 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 801 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); 802 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 803 804 // If this is the first return lowered for this function, add the regs to the 805 // liveout set for the function. 806 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 807 for (unsigned i = 0; i != RVLocs.size(); ++i) 808 if (RVLocs[i].isRegLoc()) 809 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 810 } 811 SDOperand Chain = Op.getOperand(0); 812 813 // Handle tail call return. 814 Chain = GetPossiblePreceedingTailCall(Chain); 815 if (Chain.getOpcode() == X86ISD::TAILCALL) { 816 SDOperand TailCall = Chain; 817 SDOperand TargetAddress = TailCall.getOperand(1); 818 SDOperand StackAdjustment = TailCall.getOperand(2); 819 assert(((TargetAddress.getOpcode() == ISD::Register && 820 (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || 821 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || 822 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 823 TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 824 "Expecting an global address, external symbol, or register"); 825 assert(StackAdjustment.getOpcode() == ISD::Constant && 826 "Expecting a const value"); 827 828 SmallVector<SDOperand,8> Operands; 829 Operands.push_back(Chain.getOperand(0)); 830 Operands.push_back(TargetAddress); 831 Operands.push_back(StackAdjustment); 832 // Copy registers used by the call. Last operand is a flag so it is not 833 // copied. 834 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 835 Operands.push_back(Chain.getOperand(i)); 836 } 837 return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], 838 Operands.size()); 839 } 840 841 // Regular return. 842 SDOperand Flag; 843 844 // Copy the result values into the output registers. 845 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 846 RVLocs[0].getLocReg() != X86::ST0) { 847 for (unsigned i = 0; i != RVLocs.size(); ++i) { 848 CCValAssign &VA = RVLocs[i]; 849 assert(VA.isRegLoc() && "Can only return in registers!"); 850 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 851 Flag); 852 Flag = Chain.getValue(1); 853 } 854 } else { 855 // We need to handle a destination of ST0 specially, because it isn't really 856 // a register. 857 SDOperand Value = Op.getOperand(1); 858 859 // an XMM register onto the fp-stack. Do this with an FP_EXTEND to f80. 860 // This will get legalized into a load/store if it can't get optimized away. 861 if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) 862 Value = DAG.getNode(ISD::FP_EXTEND, MVT::f80, Value); 863 864 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 865 SDOperand Ops[] = { Chain, Value }; 866 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 867 Flag = Chain.getValue(1); 868 } 869 870 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 871 if (Flag.Val) 872 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 873 else 874 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 875} 876 877 878/// LowerCallResult - Lower the result values of an ISD::CALL into the 879/// appropriate copies out of appropriate physical registers. This assumes that 880/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 881/// being lowered. The returns a SDNode with the same number of values as the 882/// ISD::CALL. 883SDNode *X86TargetLowering:: 884LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 885 unsigned CallingConv, SelectionDAG &DAG) { 886 887 // Assign locations to each value returned by this call. 888 SmallVector<CCValAssign, 16> RVLocs; 889 bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; 890 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); 891 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 892 893 SmallVector<SDOperand, 8> ResultVals; 894 895 // Copy all of the result registers out of their specified physreg. 896 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 897 for (unsigned i = 0; i != RVLocs.size(); ++i) { 898 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 899 RVLocs[i].getValVT(), InFlag).getValue(1); 900 InFlag = Chain.getValue(2); 901 ResultVals.push_back(Chain.getValue(0)); 902 } 903 } else { 904 // Copies from the FP stack are special, as ST0 isn't a valid register 905 // before the fp stackifier runs. 906 907 // Copy ST0 into an RFP register with FP_GET_RESULT. If this will end up 908 // in an SSE register, copy it out as F80 and do a truncate, otherwise use 909 // the specified value type. 910 MVT::ValueType GetResultTy = RVLocs[0].getValVT(); 911 if (isScalarFPTypeInSSEReg(GetResultTy)) 912 GetResultTy = MVT::f80; 913 SDVTList Tys = DAG.getVTList(GetResultTy, MVT::Other, MVT::Flag); 914 915 SDOperand GROps[] = { Chain, InFlag }; 916 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 917 Chain = RetVal.getValue(1); 918 InFlag = RetVal.getValue(2); 919 920 // If we want the result in an SSE register, use an FP_TRUNCATE to get it 921 // there. 922 if (GetResultTy != RVLocs[0].getValVT()) 923 RetVal = DAG.getNode(ISD::FP_ROUND, RVLocs[0].getValVT(), RetVal, 924 // This truncation won't change the value. 925 DAG.getIntPtrConstant(1)); 926 927 ResultVals.push_back(RetVal); 928 } 929 930 // Merge everything together with a MERGE_VALUES node. 931 ResultVals.push_back(Chain); 932 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 933 &ResultVals[0], ResultVals.size()).Val; 934} 935 936/// LowerCallResultToTwo64BitRegs - Lower the result values of an x86-64 937/// ISD::CALL where the results are known to be in two 64-bit registers, 938/// e.g. XMM0 and XMM1. This simplify store the two values back to the 939/// fixed stack slot allocated for StructRet. 940SDNode *X86TargetLowering:: 941LowerCallResultToTwo64BitRegs(SDOperand Chain, SDOperand InFlag, 942 SDNode *TheCall, unsigned Reg1, unsigned Reg2, 943 MVT::ValueType VT, SelectionDAG &DAG) { 944 SDOperand RetVal1 = DAG.getCopyFromReg(Chain, Reg1, VT, InFlag); 945 Chain = RetVal1.getValue(1); 946 InFlag = RetVal1.getValue(2); 947 SDOperand RetVal2 = DAG.getCopyFromReg(Chain, Reg2, VT, InFlag); 948 Chain = RetVal2.getValue(1); 949 InFlag = RetVal2.getValue(2); 950 SDOperand FIN = TheCall->getOperand(5); 951 Chain = DAG.getStore(Chain, RetVal1, FIN, NULL, 0); 952 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 953 Chain = DAG.getStore(Chain, RetVal2, FIN, NULL, 0); 954 return Chain.Val; 955} 956 957/// LowerCallResultToTwoX87Regs - Lower the result values of an x86-64 ISD::CALL 958/// where the results are known to be in ST0 and ST1. 959SDNode *X86TargetLowering:: 960LowerCallResultToTwoX87Regs(SDOperand Chain, SDOperand InFlag, 961 SDNode *TheCall, SelectionDAG &DAG) { 962 SmallVector<SDOperand, 8> ResultVals; 963 const MVT::ValueType VTs[] = { MVT::f80, MVT::f80, MVT::Other, MVT::Flag }; 964 SDVTList Tys = DAG.getVTList(VTs, 4); 965 SDOperand Ops[] = { Chain, InFlag }; 966 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT2, Tys, Ops, 2); 967 Chain = RetVal.getValue(2); 968 SDOperand FIN = TheCall->getOperand(5); 969 Chain = DAG.getStore(Chain, RetVal.getValue(1), FIN, NULL, 0); 970 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(16)); 971 Chain = DAG.getStore(Chain, RetVal, FIN, NULL, 0); 972 return Chain.Val; 973} 974 975//===----------------------------------------------------------------------===// 976// C & StdCall & Fast Calling Convention implementation 977//===----------------------------------------------------------------------===// 978// StdCall calling convention seems to be standard for many Windows' API 979// routines and around. It differs from C calling convention just a little: 980// callee should clean up the stack, not caller. Symbols should be also 981// decorated in some fancy way :) It doesn't support any vector arguments. 982// For info on fast calling convention see Fast Calling Convention (tail call) 983// implementation LowerX86_32FastCCCallTo. 984 985/// AddLiveIn - This helper function adds the specified physical register to the 986/// MachineFunction as a live in value. It also creates a corresponding virtual 987/// register for it. 988static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 989 const TargetRegisterClass *RC) { 990 assert(RC->contains(PReg) && "Not the correct regclass!"); 991 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); 992 MF.getRegInfo().addLiveIn(PReg, VReg); 993 return VReg; 994} 995 996// Determines whether a CALL node uses struct return semantics. 997static bool CallIsStructReturn(SDOperand Op) { 998 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 999 if (!NumOps) 1000 return false; 1001 1002 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(6)); 1003 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1004} 1005 1006// Determines whether a FORMAL_ARGUMENTS node uses struct return semantics. 1007static bool ArgsAreStructReturn(SDOperand Op) { 1008 unsigned NumArgs = Op.Val->getNumValues() - 1; 1009 if (!NumArgs) 1010 return false; 1011 1012 ConstantSDNode *Flags = cast<ConstantSDNode>(Op.getOperand(3)); 1013 return Flags->getValue() & ISD::ParamFlags::StructReturn; 1014} 1015 1016// Determines whether a CALL or FORMAL_ARGUMENTS node requires the callee to pop 1017// its own arguments. Callee pop is necessary to support tail calls. 1018bool X86TargetLowering::IsCalleePop(SDOperand Op) { 1019 bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1020 if (IsVarArg) 1021 return false; 1022 1023 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 1024 default: 1025 return false; 1026 case CallingConv::X86_StdCall: 1027 return !Subtarget->is64Bit(); 1028 case CallingConv::X86_FastCall: 1029 return !Subtarget->is64Bit(); 1030 case CallingConv::Fast: 1031 return PerformTailCallOpt; 1032 } 1033} 1034 1035// Selects the correct CCAssignFn for a CALL or FORMAL_ARGUMENTS node. 1036CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { 1037 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1038 1039 if (Subtarget->is64Bit()) 1040 if (CC == CallingConv::Fast && PerformTailCallOpt) 1041 return CC_X86_64_TailCall; 1042 else 1043 return CC_X86_64_C; 1044 1045 if (CC == CallingConv::X86_FastCall) 1046 return CC_X86_32_FastCall; 1047 else if (CC == CallingConv::Fast && PerformTailCallOpt) 1048 return CC_X86_32_TailCall; 1049 else 1050 return CC_X86_32_C; 1051} 1052 1053// Selects the appropriate decoration to apply to a MachineFunction containing a 1054// given FORMAL_ARGUMENTS node. 1055NameDecorationStyle 1056X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { 1057 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1058 if (CC == CallingConv::X86_FastCall) 1059 return FastCall; 1060 else if (CC == CallingConv::X86_StdCall) 1061 return StdCall; 1062 return None; 1063} 1064 1065 1066// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could possibly 1067// be overwritten when lowering the outgoing arguments in a tail call. Currently 1068// the implementation of this call is very conservative and assumes all 1069// arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with virtual 1070// registers would be overwritten by direct lowering. 1071// Possible improvement: 1072// Check FORMAL_ARGUMENTS corresponding MERGE_VALUES for CopyFromReg nodes 1073// indicating inreg passed arguments which also need not be lowered to a safe 1074// stack slot. 1075static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op) { 1076 RegisterSDNode * OpReg = NULL; 1077 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 1078 (Op.getOpcode()== ISD::CopyFromReg && 1079 (OpReg = cast<RegisterSDNode>(Op.getOperand(1))) && 1080 OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) 1081 return true; 1082 return false; 1083} 1084 1085// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1086// by "Src" to address "Dst" with size and alignment information specified by 1087// the specific parameter attribute. The copy will be passed as a byval function 1088// parameter. 1089static SDOperand 1090CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1091 unsigned Flags, SelectionDAG &DAG) { 1092 unsigned Align = 1 << 1093 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1094 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1095 ISD::ParamFlags::ByValSizeOffs; 1096 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1097 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1098 SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32); 1099 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1100} 1101 1102SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, 1103 const CCValAssign &VA, 1104 MachineFrameInfo *MFI, 1105 SDOperand Root, unsigned i) { 1106 // Create the nodes corresponding to a load from this parameter slot. 1107 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue(); 1108 bool isByVal = Flags & ISD::ParamFlags::ByVal; 1109 1110 // FIXME: For now, all byval parameter objects are marked mutable. This 1111 // can be changed with more analysis. 1112 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1113 VA.getLocMemOffset(), !isByVal); 1114 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1115 if (isByVal) 1116 return FIN; 1117 return DAG.getLoad(VA.getValVT(), Root, FIN, 1118 PseudoSourceValue::getFixedStack(), FI); 1119} 1120 1121SDOperand 1122X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 1123 MachineFunction &MF = DAG.getMachineFunction(); 1124 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1125 1126 const Function* Fn = MF.getFunction(); 1127 if (Fn->hasExternalLinkage() && 1128 Subtarget->isTargetCygMing() && 1129 Fn->getName() == "main") 1130 FuncInfo->setForceFramePointer(true); 1131 1132 // Decorate the function name. 1133 FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); 1134 1135 MachineFrameInfo *MFI = MF.getFrameInfo(); 1136 SDOperand Root = Op.getOperand(0); 1137 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1138 unsigned CC = MF.getFunction()->getCallingConv(); 1139 bool Is64Bit = Subtarget->is64Bit(); 1140 1141 assert(!(isVarArg && CC == CallingConv::Fast) && 1142 "Var args not supported with calling convention fastcc"); 1143 1144 // Assign locations to all of the incoming arguments. 1145 SmallVector<CCValAssign, 16> ArgLocs; 1146 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1147 CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); 1148 1149 SmallVector<SDOperand, 8> ArgValues; 1150 unsigned LastVal = ~0U; 1151 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1152 CCValAssign &VA = ArgLocs[i]; 1153 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1154 // places. 1155 assert(VA.getValNo() != LastVal && 1156 "Don't support value assigned to multiple locs yet"); 1157 LastVal = VA.getValNo(); 1158 1159 if (VA.isRegLoc()) { 1160 MVT::ValueType RegVT = VA.getLocVT(); 1161 TargetRegisterClass *RC; 1162 if (RegVT == MVT::i32) 1163 RC = X86::GR32RegisterClass; 1164 else if (Is64Bit && RegVT == MVT::i64) 1165 RC = X86::GR64RegisterClass; 1166 else if (RegVT == MVT::f32) 1167 RC = X86::FR32RegisterClass; 1168 else if (RegVT == MVT::f64) 1169 RC = X86::FR64RegisterClass; 1170 else { 1171 assert(MVT::isVector(RegVT)); 1172 if (Is64Bit && MVT::getSizeInBits(RegVT) == 64) { 1173 RC = X86::GR64RegisterClass; // MMX values are passed in GPRs. 1174 RegVT = MVT::i64; 1175 } else 1176 RC = X86::VR128RegisterClass; 1177 } 1178 1179 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1180 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1181 1182 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1183 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1184 // right size. 1185 if (VA.getLocInfo() == CCValAssign::SExt) 1186 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1187 DAG.getValueType(VA.getValVT())); 1188 else if (VA.getLocInfo() == CCValAssign::ZExt) 1189 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1190 DAG.getValueType(VA.getValVT())); 1191 1192 if (VA.getLocInfo() != CCValAssign::Full) 1193 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1194 1195 // Handle MMX values passed in GPRs. 1196 if (Is64Bit && RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass && 1197 MVT::getSizeInBits(RegVT) == 64) 1198 ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue); 1199 1200 ArgValues.push_back(ArgValue); 1201 } else { 1202 assert(VA.isMemLoc()); 1203 ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i)); 1204 } 1205 } 1206 1207 unsigned StackSize = CCInfo.getNextStackOffset(); 1208 // align stack specially for tail calls 1209 if (CC == CallingConv::Fast) 1210 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1211 1212 // If the function takes variable number of arguments, make a frame index for 1213 // the start of the first vararg value... for expansion of llvm.va_start. 1214 if (isVarArg) { 1215 if (Is64Bit || CC != CallingConv::X86_FastCall) { 1216 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1217 } 1218 if (Is64Bit) { 1219 static const unsigned GPR64ArgRegs[] = { 1220 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1221 }; 1222 static const unsigned XMMArgRegs[] = { 1223 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1224 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1225 }; 1226 1227 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1228 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1229 1230 // For X86-64, if there are vararg parameters that are passed via 1231 // registers, then we must store them to their spots on the stack so they 1232 // may be loaded by deferencing the result of va_next. 1233 VarArgsGPOffset = NumIntRegs * 8; 1234 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1235 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1236 1237 // Store the integer parameter registers. 1238 SmallVector<SDOperand, 8> MemOps; 1239 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1240 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1241 DAG.getIntPtrConstant(VarArgsGPOffset)); 1242 for (; NumIntRegs != 6; ++NumIntRegs) { 1243 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1244 X86::GR64RegisterClass); 1245 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1246 SDOperand Store = 1247 DAG.getStore(Val.getValue(1), Val, FIN, 1248 PseudoSourceValue::getFixedStack(), 1249 RegSaveFrameIndex); 1250 MemOps.push_back(Store); 1251 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1252 DAG.getIntPtrConstant(8)); 1253 } 1254 1255 // Now store the XMM (fp + vector) parameter registers. 1256 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1257 DAG.getIntPtrConstant(VarArgsFPOffset)); 1258 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1259 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1260 X86::VR128RegisterClass); 1261 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1262 SDOperand Store = 1263 DAG.getStore(Val.getValue(1), Val, FIN, 1264 PseudoSourceValue::getFixedStack(), 1265 RegSaveFrameIndex); 1266 MemOps.push_back(Store); 1267 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1268 DAG.getIntPtrConstant(16)); 1269 } 1270 if (!MemOps.empty()) 1271 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1272 &MemOps[0], MemOps.size()); 1273 } 1274 } 1275 1276 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1277 // arguments and the arguments after the retaddr has been pushed are 1278 // aligned. 1279 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1280 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1281 (StackSize & 7) == 0) 1282 StackSize += 4; 1283 1284 ArgValues.push_back(Root); 1285 1286 // Some CCs need callee pop. 1287 if (IsCalleePop(Op)) { 1288 BytesToPopOnReturn = StackSize; // Callee pops everything. 1289 BytesCallerReserves = 0; 1290 } else { 1291 BytesToPopOnReturn = 0; // Callee pops nothing. 1292 // If this is an sret function, the return should pop the hidden pointer. 1293 if (!Is64Bit && ArgsAreStructReturn(Op)) 1294 BytesToPopOnReturn = 4; 1295 BytesCallerReserves = StackSize; 1296 } 1297 1298 if (!Is64Bit) { 1299 RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only. 1300 if (CC == CallingConv::X86_FastCall) 1301 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 1302 } 1303 1304 FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn); 1305 1306 // Return the new list of results. 1307 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1308 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1309} 1310 1311SDOperand 1312X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, 1313 const SDOperand &StackPtr, 1314 const CCValAssign &VA, 1315 SDOperand Chain, 1316 SDOperand Arg) { 1317 unsigned LocMemOffset = VA.getLocMemOffset(); 1318 SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1319 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1320 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1321 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1322 if (Flags & ISD::ParamFlags::ByVal) { 1323 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG); 1324 } 1325 return DAG.getStore(Chain, Arg, PtrOff, 1326 PseudoSourceValue::getStack(), LocMemOffset); 1327} 1328 1329/// ClassifyX86_64SRetCallReturn - Classify how to implement a x86-64 1330/// struct return call to the specified function. X86-64 ABI specifies 1331/// some SRet calls are actually returned in registers. Since current 1332/// LLVM cannot represent multi-value calls, they are represent as 1333/// calls where the results are passed in a hidden struct provided by 1334/// the caller. This function examines the type of the struct to 1335/// determine the correct way to implement the call. 1336X86::X86_64SRet 1337X86TargetLowering::ClassifyX86_64SRetCallReturn(const Function *Fn) { 1338 // FIXME: Disabled for now. 1339 return X86::InMemory; 1340 1341 const PointerType *PTy = cast<PointerType>(Fn->arg_begin()->getType()); 1342 const Type *RTy = PTy->getElementType(); 1343 unsigned Size = getTargetData()->getABITypeSize(RTy); 1344 if (Size != 16 && Size != 32) 1345 return X86::InMemory; 1346 1347 if (Size == 32) { 1348 const StructType *STy = dyn_cast<StructType>(RTy); 1349 if (!STy) return X86::InMemory; 1350 if (STy->getNumElements() == 2 && 1351 STy->getElementType(0) == Type::X86_FP80Ty && 1352 STy->getElementType(1) == Type::X86_FP80Ty) 1353 return X86::InX87; 1354 } 1355 1356 bool AllFP = true; 1357 for (Type::subtype_iterator I = RTy->subtype_begin(), E = RTy->subtype_end(); 1358 I != E; ++I) { 1359 const Type *STy = I->get(); 1360 if (!STy->isFPOrFPVector()) { 1361 AllFP = false; 1362 break; 1363 } 1364 } 1365 1366 if (AllFP) 1367 return X86::InSSE; 1368 return X86::InGPR64; 1369} 1370 1371void X86TargetLowering::X86_64AnalyzeSRetCallOperands(SDNode *TheCall, 1372 CCAssignFn *Fn, 1373 CCState &CCInfo) { 1374 unsigned NumOps = (TheCall->getNumOperands() - 5) / 2; 1375 for (unsigned i = 1; i != NumOps; ++i) { 1376 MVT::ValueType ArgVT = TheCall->getOperand(5+2*i).getValueType(); 1377 SDOperand FlagOp = TheCall->getOperand(5+2*i+1); 1378 unsigned ArgFlags =cast<ConstantSDNode>(FlagOp)->getValue(); 1379 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) { 1380 cerr << "Call operand #" << i << " has unhandled type " 1381 << MVT::getValueTypeString(ArgVT) << "\n"; 1382 abort(); 1383 } 1384 } 1385} 1386 1387SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1388 MachineFunction &MF = DAG.getMachineFunction(); 1389 SDOperand Chain = Op.getOperand(0); 1390 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 1391 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1392 bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 1393 && CC == CallingConv::Fast && PerformTailCallOpt; 1394 SDOperand Callee = Op.getOperand(4); 1395 bool Is64Bit = Subtarget->is64Bit(); 1396 bool IsStructRet = CallIsStructReturn(Op); 1397 1398 assert(!(isVarArg && CC == CallingConv::Fast) && 1399 "Var args not supported with calling convention fastcc"); 1400 1401 // Analyze operands of the call, assigning locations to each operand. 1402 SmallVector<CCValAssign, 16> ArgLocs; 1403 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); 1404 CCAssignFn *CCFn = CCAssignFnForNode(Op); 1405 1406 X86::X86_64SRet SRetMethod = X86::InMemory; 1407 if (Is64Bit && IsStructRet) 1408 // FIXME: We can't figure out type of the sret structure for indirect 1409 // calls. We need to copy more information from CallSite to the ISD::CALL 1410 // node. 1411 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1412 SRetMethod = 1413 ClassifyX86_64SRetCallReturn(dyn_cast<Function>(G->getGlobal())); 1414 1415 // UGLY HACK! For x86-64, some 128-bit aggregates are returns in a pair of 1416 // registers. Unfortunately, llvm does not support i128 yet so we pretend it's 1417 // a sret call. 1418 if (SRetMethod != X86::InMemory) 1419 X86_64AnalyzeSRetCallOperands(Op.Val, CCFn, CCInfo); 1420 else 1421 CCInfo.AnalyzeCallOperands(Op.Val, CCFn); 1422 1423 // Get a count of how many bytes are to be pushed on the stack. 1424 unsigned NumBytes = CCInfo.getNextStackOffset(); 1425 if (CC == CallingConv::Fast) 1426 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 1427 1428 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1429 // arguments and the arguments after the retaddr has been pushed are aligned. 1430 if (!Is64Bit && CC == CallingConv::X86_FastCall && 1431 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows() && 1432 (NumBytes & 7) == 0) 1433 NumBytes += 4; 1434 1435 int FPDiff = 0; 1436 if (IsTailCall) { 1437 // Lower arguments at fp - stackoffset + fpdiff. 1438 unsigned NumBytesCallerPushed = 1439 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 1440 FPDiff = NumBytesCallerPushed - NumBytes; 1441 1442 // Set the delta of movement of the returnaddr stackslot. 1443 // But only set if delta is greater than previous delta. 1444 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 1445 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 1446 } 1447 1448 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); 1449 1450 SDOperand RetAddrFrIdx, NewRetAddrFrIdx; 1451 if (IsTailCall) { 1452 // Adjust the Return address stack slot. 1453 if (FPDiff) { 1454 MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32; 1455 RetAddrFrIdx = getReturnAddressFrameIndex(DAG); 1456 // Load the "old" Return address. 1457 RetAddrFrIdx = 1458 DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0); 1459 // Calculate the new stack slot for the return address. 1460 int SlotSize = Is64Bit ? 8 : 4; 1461 int NewReturnAddrFI = 1462 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); 1463 NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1464 Chain = SDOperand(RetAddrFrIdx.Val, 1); 1465 } 1466 } 1467 1468 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1469 SmallVector<SDOperand, 8> MemOpChains; 1470 1471 SDOperand StackPtr; 1472 1473 // Walk the register/memloc assignments, inserting copies/loads. For tail 1474 // calls, lower arguments which could otherwise be possibly overwritten to the 1475 // stack slot where they would go on normal function calls. 1476 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1477 CCValAssign &VA = ArgLocs[i]; 1478 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1479 1480 // Promote the value if needed. 1481 switch (VA.getLocInfo()) { 1482 default: assert(0 && "Unknown loc info!"); 1483 case CCValAssign::Full: break; 1484 case CCValAssign::SExt: 1485 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1486 break; 1487 case CCValAssign::ZExt: 1488 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1489 break; 1490 case CCValAssign::AExt: 1491 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1492 break; 1493 } 1494 1495 if (VA.isRegLoc()) { 1496 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1497 } else { 1498 if (!IsTailCall || IsPossiblyOverwrittenArgumentOfTailCall(Arg)) { 1499 assert(VA.isMemLoc()); 1500 if (StackPtr.Val == 0) 1501 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1502 1503 MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain, 1504 Arg)); 1505 } 1506 } 1507 } 1508 1509 if (!MemOpChains.empty()) 1510 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1511 &MemOpChains[0], MemOpChains.size()); 1512 1513 // Build a sequence of copy-to-reg nodes chained together with token chain 1514 // and flag operands which copy the outgoing args into registers. 1515 SDOperand InFlag; 1516 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1517 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1518 InFlag); 1519 InFlag = Chain.getValue(1); 1520 } 1521 1522 if (IsTailCall) 1523 InFlag = SDOperand(); // ??? Isn't this nuking the preceding loop's output? 1524 1525 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1526 // GOT pointer. 1527 // Does not work with tail call since ebx is not restored correctly by 1528 // tailcaller. TODO: at least for x86 - verify for x86-64 1529 if (!IsTailCall && !Is64Bit && 1530 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1531 Subtarget->isPICStyleGOT()) { 1532 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1533 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1534 InFlag); 1535 InFlag = Chain.getValue(1); 1536 } 1537 1538 if (Is64Bit && isVarArg) { 1539 // From AMD64 ABI document: 1540 // For calls that may call functions that use varargs or stdargs 1541 // (prototype-less calls or calls to functions containing ellipsis (...) in 1542 // the declaration) %al is used as hidden argument to specify the number 1543 // of SSE registers used. The contents of %al do not need to match exactly 1544 // the number of registers, but must be an ubound on the number of SSE 1545 // registers used and is in the range 0 - 8 inclusive. 1546 1547 // Count the number of XMM registers allocated. 1548 static const unsigned XMMArgRegs[] = { 1549 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1550 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1551 }; 1552 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1553 1554 Chain = DAG.getCopyToReg(Chain, X86::AL, 1555 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1556 InFlag = Chain.getValue(1); 1557 } 1558 1559 // For tail calls lower the arguments to the 'real' stack slot. 1560 if (IsTailCall) { 1561 SmallVector<SDOperand, 8> MemOpChains2; 1562 SDOperand FIN; 1563 int FI = 0; 1564 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1565 CCValAssign &VA = ArgLocs[i]; 1566 if (!VA.isRegLoc()) { 1567 assert(VA.isMemLoc()); 1568 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1569 SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); 1570 unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue(); 1571 // Create frame index. 1572 int32_t Offset = VA.getLocMemOffset()+FPDiff; 1573 uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8; 1574 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1575 FIN = DAG.getFrameIndex(FI, MVT::i32); 1576 SDOperand Source = Arg; 1577 if (IsPossiblyOverwrittenArgumentOfTailCall(Arg)) { 1578 // Copy from stack slots to stack slot of a tail called function. This 1579 // needs to be done because if we would lower the arguments directly 1580 // to their real stack slot we might end up overwriting each other. 1581 // Get source stack slot. 1582 Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1583 if (StackPtr.Val == 0) 1584 StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); 1585 Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source); 1586 if ((Flags & ISD::ParamFlags::ByVal)==0) 1587 Source = DAG.getLoad(VA.getValVT(), Chain, Source, NULL, 0); 1588 } 1589 1590 if (Flags & ISD::ParamFlags::ByVal) { 1591 // Copy relative to framepointer. 1592 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain, 1593 Flags, DAG)); 1594 } else { 1595 // Store relative to framepointer. 1596 MemOpChains2.push_back( 1597 DAG.getStore(Chain, Source, FIN, 1598 PseudoSourceValue::getFixedStack(), FI)); 1599 } 1600 } 1601 } 1602 1603 if (!MemOpChains2.empty()) 1604 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1605 &MemOpChains2[0], MemOpChains2.size()); 1606 1607 // Store the return address to the appropriate stack slot. 1608 if (FPDiff) 1609 Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0); 1610 } 1611 1612 // If the callee is a GlobalAddress node (quite common, every direct call is) 1613 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1614 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1615 // We should use extra load for direct calls to dllimported functions in 1616 // non-JIT mode. 1617 if ((IsTailCall || !Is64Bit || 1618 getTargetMachine().getCodeModel() != CodeModel::Large) 1619 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1620 getTargetMachine(), true)) 1621 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1622 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1623 if (IsTailCall || !Is64Bit || 1624 getTargetMachine().getCodeModel() != CodeModel::Large) 1625 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1626 } else if (IsTailCall) { 1627 assert(Callee.getOpcode() == ISD::LOAD && 1628 "Function destination must be loaded into virtual register"); 1629 unsigned Opc = Is64Bit ? X86::R9 : X86::ECX; 1630 1631 Chain = DAG.getCopyToReg(Chain, 1632 DAG.getRegister(Opc, getPointerTy()) , 1633 Callee,InFlag); 1634 Callee = DAG.getRegister(Opc, getPointerTy()); 1635 // Add register as live out. 1636 DAG.getMachineFunction().getRegInfo().addLiveOut(Opc); 1637 } 1638 1639 // Returns a chain & a flag for retval copy to use. 1640 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1641 SmallVector<SDOperand, 8> Ops; 1642 1643 if (IsTailCall) { 1644 Ops.push_back(Chain); 1645 Ops.push_back(DAG.getIntPtrConstant(NumBytes)); 1646 Ops.push_back(DAG.getIntPtrConstant(0)); 1647 if (InFlag.Val) 1648 Ops.push_back(InFlag); 1649 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1650 InFlag = Chain.getValue(1); 1651 1652 // Returns a chain & a flag for retval copy to use. 1653 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1654 Ops.clear(); 1655 } 1656 1657 Ops.push_back(Chain); 1658 Ops.push_back(Callee); 1659 1660 if (IsTailCall) 1661 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 1662 1663 // Add an implicit use GOT pointer in EBX. 1664 if (!IsTailCall && !Is64Bit && 1665 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1666 Subtarget->isPICStyleGOT()) 1667 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1668 1669 // Add argument registers to the end of the list so that they are known live 1670 // into the call. 1671 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1672 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1673 RegsToPass[i].second.getValueType())); 1674 1675 if (InFlag.Val) 1676 Ops.push_back(InFlag); 1677 1678 if (IsTailCall) { 1679 assert(InFlag.Val && 1680 "Flag must be set. Depend on flag being set in LowerRET"); 1681 Chain = DAG.getNode(X86ISD::TAILCALL, 1682 Op.Val->getVTList(), &Ops[0], Ops.size()); 1683 1684 return SDOperand(Chain.Val, Op.ResNo); 1685 } 1686 1687 Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); 1688 InFlag = Chain.getValue(1); 1689 1690 // Create the CALLSEQ_END node. 1691 unsigned NumBytesForCalleeToPush; 1692 if (IsCalleePop(Op)) 1693 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 1694 else if (!Is64Bit && IsStructRet) 1695 // If this is is a call to a struct-return function, the callee 1696 // pops the hidden struct pointer, so we have to push it back. 1697 // This is common for Darwin/X86, Linux & Mingw32 targets. 1698 NumBytesForCalleeToPush = 4; 1699 else 1700 NumBytesForCalleeToPush = 0; // Callee pops nothing. 1701 1702 // Returns a flag for retval copy to use. 1703 Chain = DAG.getCALLSEQ_END(Chain, 1704 DAG.getIntPtrConstant(NumBytes), 1705 DAG.getIntPtrConstant(NumBytesForCalleeToPush), 1706 InFlag); 1707 InFlag = Chain.getValue(1); 1708 1709 // Handle result values, copying them out of physregs into vregs that we 1710 // return. 1711 switch (SRetMethod) { 1712 default: 1713 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1714 case X86::InGPR64: 1715 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1716 X86::RAX, X86::RDX, 1717 MVT::i64, DAG), Op.ResNo); 1718 case X86::InSSE: 1719 return SDOperand(LowerCallResultToTwo64BitRegs(Chain, InFlag, Op.Val, 1720 X86::XMM0, X86::XMM1, 1721 MVT::f64, DAG), Op.ResNo); 1722 case X86::InX87: 1723 return SDOperand(LowerCallResultToTwoX87Regs(Chain, InFlag, Op.Val, DAG), 1724 Op.ResNo); 1725 } 1726} 1727 1728 1729//===----------------------------------------------------------------------===// 1730// Fast Calling Convention (tail call) implementation 1731//===----------------------------------------------------------------------===// 1732 1733// Like std call, callee cleans arguments, convention except that ECX is 1734// reserved for storing the tail called function address. Only 2 registers are 1735// free for argument passing (inreg). Tail call optimization is performed 1736// provided: 1737// * tailcallopt is enabled 1738// * caller/callee are fastcc 1739// * elf/pic is disabled OR 1740// * elf/pic enabled + callee is in module + callee has 1741// visibility protected or hidden 1742// To keep the stack aligned according to platform abi the function 1743// GetAlignedArgumentStackSize ensures that argument delta is always multiples 1744// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 1745// If a tail called function callee has more arguments than the caller the 1746// caller needs to make sure that there is room to move the RETADDR to. This is 1747// achieved by reserving an area the size of the argument delta right after the 1748// original REtADDR, but before the saved framepointer or the spilled registers 1749// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 1750// stack layout: 1751// arg1 1752// arg2 1753// RETADDR 1754// [ new RETADDR 1755// move area ] 1756// (possible EBP) 1757// ESI 1758// EDI 1759// local1 .. 1760 1761/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 1762/// for a 16 byte align requirement. 1763unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 1764 SelectionDAG& DAG) { 1765 if (PerformTailCallOpt) { 1766 MachineFunction &MF = DAG.getMachineFunction(); 1767 const TargetMachine &TM = MF.getTarget(); 1768 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 1769 unsigned StackAlignment = TFI.getStackAlignment(); 1770 uint64_t AlignMask = StackAlignment - 1; 1771 int64_t Offset = StackSize; 1772 unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4; 1773 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 1774 // Number smaller than 12 so just add the difference. 1775 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 1776 } else { 1777 // Mask out lower bits, add stackalignment once plus the 12 bytes. 1778 Offset = ((~AlignMask) & Offset) + StackAlignment + 1779 (StackAlignment-SlotSize); 1780 } 1781 StackSize = Offset; 1782 } 1783 return StackSize; 1784} 1785 1786/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1787/// following the call is a return. A function is eligible if caller/callee 1788/// calling conventions match, currently only fastcc supports tail calls, and 1789/// the function CALL is immediatly followed by a RET. 1790bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, 1791 SDOperand Ret, 1792 SelectionDAG& DAG) const { 1793 if (!PerformTailCallOpt) 1794 return false; 1795 1796 // Check whether CALL node immediatly preceeds the RET node and whether the 1797 // return uses the result of the node or is a void return. 1798 unsigned NumOps = Ret.getNumOperands(); 1799 if ((NumOps == 1 && 1800 (Ret.getOperand(0) == SDOperand(Call.Val,1) || 1801 Ret.getOperand(0) == SDOperand(Call.Val,0))) || 1802 (NumOps > 1 && 1803 Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) && 1804 Ret.getOperand(1) == SDOperand(Call.Val,0))) { 1805 MachineFunction &MF = DAG.getMachineFunction(); 1806 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1807 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1808 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1809 SDOperand Callee = Call.getOperand(4); 1810 // On elf/pic %ebx needs to be livein. 1811 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || 1812 !Subtarget->isPICStyleGOT()) 1813 return true; 1814 1815 // Can only do local tail calls with PIC. 1816 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1817 return G->getGlobal()->hasHiddenVisibility() 1818 || G->getGlobal()->hasProtectedVisibility(); 1819 } 1820 } 1821 1822 return false; 1823} 1824 1825//===----------------------------------------------------------------------===// 1826// Other Lowering Hooks 1827//===----------------------------------------------------------------------===// 1828 1829 1830SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1831 MachineFunction &MF = DAG.getMachineFunction(); 1832 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1833 int ReturnAddrIndex = FuncInfo->getRAIndex(); 1834 1835 if (ReturnAddrIndex == 0) { 1836 // Set up a frame object for the return address. 1837 if (Subtarget->is64Bit()) 1838 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1839 else 1840 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1841 1842 FuncInfo->setRAIndex(ReturnAddrIndex); 1843 } 1844 1845 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1846} 1847 1848 1849 1850/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1851/// specific condition code. It returns a false if it cannot do a direct 1852/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1853/// needed. 1854static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1855 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1856 SelectionDAG &DAG) { 1857 X86CC = X86::COND_INVALID; 1858 if (!isFP) { 1859 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1860 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1861 // X > -1 -> X == 0, jump !sign. 1862 RHS = DAG.getConstant(0, RHS.getValueType()); 1863 X86CC = X86::COND_NS; 1864 return true; 1865 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1866 // X < 0 -> X == 0, jump on sign. 1867 X86CC = X86::COND_S; 1868 return true; 1869 } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) { 1870 // X < 1 -> X <= 0 1871 RHS = DAG.getConstant(0, RHS.getValueType()); 1872 X86CC = X86::COND_LE; 1873 return true; 1874 } 1875 } 1876 1877 switch (SetCCOpcode) { 1878 default: break; 1879 case ISD::SETEQ: X86CC = X86::COND_E; break; 1880 case ISD::SETGT: X86CC = X86::COND_G; break; 1881 case ISD::SETGE: X86CC = X86::COND_GE; break; 1882 case ISD::SETLT: X86CC = X86::COND_L; break; 1883 case ISD::SETLE: X86CC = X86::COND_LE; break; 1884 case ISD::SETNE: X86CC = X86::COND_NE; break; 1885 case ISD::SETULT: X86CC = X86::COND_B; break; 1886 case ISD::SETUGT: X86CC = X86::COND_A; break; 1887 case ISD::SETULE: X86CC = X86::COND_BE; break; 1888 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1889 } 1890 } else { 1891 // On a floating point condition, the flags are set as follows: 1892 // ZF PF CF op 1893 // 0 | 0 | 0 | X > Y 1894 // 0 | 0 | 1 | X < Y 1895 // 1 | 0 | 0 | X == Y 1896 // 1 | 1 | 1 | unordered 1897 bool Flip = false; 1898 switch (SetCCOpcode) { 1899 default: break; 1900 case ISD::SETUEQ: 1901 case ISD::SETEQ: X86CC = X86::COND_E; break; 1902 case ISD::SETOLT: Flip = true; // Fallthrough 1903 case ISD::SETOGT: 1904 case ISD::SETGT: X86CC = X86::COND_A; break; 1905 case ISD::SETOLE: Flip = true; // Fallthrough 1906 case ISD::SETOGE: 1907 case ISD::SETGE: X86CC = X86::COND_AE; break; 1908 case ISD::SETUGT: Flip = true; // Fallthrough 1909 case ISD::SETULT: 1910 case ISD::SETLT: X86CC = X86::COND_B; break; 1911 case ISD::SETUGE: Flip = true; // Fallthrough 1912 case ISD::SETULE: 1913 case ISD::SETLE: X86CC = X86::COND_BE; break; 1914 case ISD::SETONE: 1915 case ISD::SETNE: X86CC = X86::COND_NE; break; 1916 case ISD::SETUO: X86CC = X86::COND_P; break; 1917 case ISD::SETO: X86CC = X86::COND_NP; break; 1918 } 1919 if (Flip) 1920 std::swap(LHS, RHS); 1921 } 1922 1923 return X86CC != X86::COND_INVALID; 1924} 1925 1926/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1927/// code. Current x86 isa includes the following FP cmov instructions: 1928/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1929static bool hasFPCMov(unsigned X86CC) { 1930 switch (X86CC) { 1931 default: 1932 return false; 1933 case X86::COND_B: 1934 case X86::COND_BE: 1935 case X86::COND_E: 1936 case X86::COND_P: 1937 case X86::COND_A: 1938 case X86::COND_AE: 1939 case X86::COND_NE: 1940 case X86::COND_NP: 1941 return true; 1942 } 1943} 1944 1945/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1946/// true if Op is undef or if its value falls within the specified range (L, H]. 1947static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1948 if (Op.getOpcode() == ISD::UNDEF) 1949 return true; 1950 1951 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1952 return (Val >= Low && Val < Hi); 1953} 1954 1955/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1956/// true if Op is undef or if its value equal to the specified value. 1957static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1958 if (Op.getOpcode() == ISD::UNDEF) 1959 return true; 1960 return cast<ConstantSDNode>(Op)->getValue() == Val; 1961} 1962 1963/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1964/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1965bool X86::isPSHUFDMask(SDNode *N) { 1966 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1967 1968 if (N->getNumOperands() != 2 && N->getNumOperands() != 4) 1969 return false; 1970 1971 // Check if the value doesn't reference the second vector. 1972 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1973 SDOperand Arg = N->getOperand(i); 1974 if (Arg.getOpcode() == ISD::UNDEF) continue; 1975 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1976 if (cast<ConstantSDNode>(Arg)->getValue() >= e) 1977 return false; 1978 } 1979 1980 return true; 1981} 1982 1983/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1984/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1985bool X86::isPSHUFHWMask(SDNode *N) { 1986 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1987 1988 if (N->getNumOperands() != 8) 1989 return false; 1990 1991 // Lower quadword copied in order. 1992 for (unsigned i = 0; i != 4; ++i) { 1993 SDOperand Arg = N->getOperand(i); 1994 if (Arg.getOpcode() == ISD::UNDEF) continue; 1995 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1996 if (cast<ConstantSDNode>(Arg)->getValue() != i) 1997 return false; 1998 } 1999 2000 // Upper quadword shuffled. 2001 for (unsigned i = 4; i != 8; ++i) { 2002 SDOperand Arg = N->getOperand(i); 2003 if (Arg.getOpcode() == ISD::UNDEF) continue; 2004 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2005 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2006 if (Val < 4 || Val > 7) 2007 return false; 2008 } 2009 2010 return true; 2011} 2012 2013/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 2014/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 2015bool X86::isPSHUFLWMask(SDNode *N) { 2016 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2017 2018 if (N->getNumOperands() != 8) 2019 return false; 2020 2021 // Upper quadword copied in order. 2022 for (unsigned i = 4; i != 8; ++i) 2023 if (!isUndefOrEqual(N->getOperand(i), i)) 2024 return false; 2025 2026 // Lower quadword shuffled. 2027 for (unsigned i = 0; i != 4; ++i) 2028 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 2029 return false; 2030 2031 return true; 2032} 2033 2034/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 2035/// specifies a shuffle of elements that is suitable for input to SHUFP*. 2036static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 2037 if (NumElems != 2 && NumElems != 4) return false; 2038 2039 unsigned Half = NumElems / 2; 2040 for (unsigned i = 0; i < Half; ++i) 2041 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 2042 return false; 2043 for (unsigned i = Half; i < NumElems; ++i) 2044 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 2045 return false; 2046 2047 return true; 2048} 2049 2050bool X86::isSHUFPMask(SDNode *N) { 2051 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2052 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 2053} 2054 2055/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 2056/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 2057/// half elements to come from vector 1 (which would equal the dest.) and 2058/// the upper half to come from vector 2. 2059static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 2060 if (NumOps != 2 && NumOps != 4) return false; 2061 2062 unsigned Half = NumOps / 2; 2063 for (unsigned i = 0; i < Half; ++i) 2064 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 2065 return false; 2066 for (unsigned i = Half; i < NumOps; ++i) 2067 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 2068 return false; 2069 return true; 2070} 2071 2072static bool isCommutedSHUFP(SDNode *N) { 2073 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2074 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 2075} 2076 2077/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 2078/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 2079bool X86::isMOVHLPSMask(SDNode *N) { 2080 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2081 2082 if (N->getNumOperands() != 4) 2083 return false; 2084 2085 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 2086 return isUndefOrEqual(N->getOperand(0), 6) && 2087 isUndefOrEqual(N->getOperand(1), 7) && 2088 isUndefOrEqual(N->getOperand(2), 2) && 2089 isUndefOrEqual(N->getOperand(3), 3); 2090} 2091 2092/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 2093/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 2094/// <2, 3, 2, 3> 2095bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 2096 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2097 2098 if (N->getNumOperands() != 4) 2099 return false; 2100 2101 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 2102 return isUndefOrEqual(N->getOperand(0), 2) && 2103 isUndefOrEqual(N->getOperand(1), 3) && 2104 isUndefOrEqual(N->getOperand(2), 2) && 2105 isUndefOrEqual(N->getOperand(3), 3); 2106} 2107 2108/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 2109/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 2110bool X86::isMOVLPMask(SDNode *N) { 2111 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2112 2113 unsigned NumElems = N->getNumOperands(); 2114 if (NumElems != 2 && NumElems != 4) 2115 return false; 2116 2117 for (unsigned i = 0; i < NumElems/2; ++i) 2118 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 2119 return false; 2120 2121 for (unsigned i = NumElems/2; i < NumElems; ++i) 2122 if (!isUndefOrEqual(N->getOperand(i), i)) 2123 return false; 2124 2125 return true; 2126} 2127 2128/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 2129/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 2130/// and MOVLHPS. 2131bool X86::isMOVHPMask(SDNode *N) { 2132 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2133 2134 unsigned NumElems = N->getNumOperands(); 2135 if (NumElems != 2 && NumElems != 4) 2136 return false; 2137 2138 for (unsigned i = 0; i < NumElems/2; ++i) 2139 if (!isUndefOrEqual(N->getOperand(i), i)) 2140 return false; 2141 2142 for (unsigned i = 0; i < NumElems/2; ++i) { 2143 SDOperand Arg = N->getOperand(i + NumElems/2); 2144 if (!isUndefOrEqual(Arg, i + NumElems)) 2145 return false; 2146 } 2147 2148 return true; 2149} 2150 2151/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 2152/// specifies a shuffle of elements that is suitable for input to UNPCKL. 2153bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 2154 bool V2IsSplat = false) { 2155 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2156 return false; 2157 2158 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2159 SDOperand BitI = Elts[i]; 2160 SDOperand BitI1 = Elts[i+1]; 2161 if (!isUndefOrEqual(BitI, j)) 2162 return false; 2163 if (V2IsSplat) { 2164 if (isUndefOrEqual(BitI1, NumElts)) 2165 return false; 2166 } else { 2167 if (!isUndefOrEqual(BitI1, j + NumElts)) 2168 return false; 2169 } 2170 } 2171 2172 return true; 2173} 2174 2175bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 2176 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2177 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2178} 2179 2180/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 2181/// specifies a shuffle of elements that is suitable for input to UNPCKH. 2182bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 2183 bool V2IsSplat = false) { 2184 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 2185 return false; 2186 2187 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 2188 SDOperand BitI = Elts[i]; 2189 SDOperand BitI1 = Elts[i+1]; 2190 if (!isUndefOrEqual(BitI, j + NumElts/2)) 2191 return false; 2192 if (V2IsSplat) { 2193 if (isUndefOrEqual(BitI1, NumElts)) 2194 return false; 2195 } else { 2196 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 2197 return false; 2198 } 2199 } 2200 2201 return true; 2202} 2203 2204bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 2205 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2206 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 2207} 2208 2209/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 2210/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 2211/// <0, 0, 1, 1> 2212bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 2213 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2214 2215 unsigned NumElems = N->getNumOperands(); 2216 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2217 return false; 2218 2219 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 2220 SDOperand BitI = N->getOperand(i); 2221 SDOperand BitI1 = N->getOperand(i+1); 2222 2223 if (!isUndefOrEqual(BitI, j)) 2224 return false; 2225 if (!isUndefOrEqual(BitI1, j)) 2226 return false; 2227 } 2228 2229 return true; 2230} 2231 2232/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 2233/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 2234/// <2, 2, 3, 3> 2235bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 2236 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2237 2238 unsigned NumElems = N->getNumOperands(); 2239 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 2240 return false; 2241 2242 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 2243 SDOperand BitI = N->getOperand(i); 2244 SDOperand BitI1 = N->getOperand(i + 1); 2245 2246 if (!isUndefOrEqual(BitI, j)) 2247 return false; 2248 if (!isUndefOrEqual(BitI1, j)) 2249 return false; 2250 } 2251 2252 return true; 2253} 2254 2255/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 2256/// specifies a shuffle of elements that is suitable for input to MOVSS, 2257/// MOVSD, and MOVD, i.e. setting the lowest element. 2258static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 2259 if (NumElts != 2 && NumElts != 4) 2260 return false; 2261 2262 if (!isUndefOrEqual(Elts[0], NumElts)) 2263 return false; 2264 2265 for (unsigned i = 1; i < NumElts; ++i) { 2266 if (!isUndefOrEqual(Elts[i], i)) 2267 return false; 2268 } 2269 2270 return true; 2271} 2272 2273bool X86::isMOVLMask(SDNode *N) { 2274 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2275 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 2276} 2277 2278/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 2279/// of what x86 movss want. X86 movs requires the lowest element to be lowest 2280/// element of vector 2 and the other elements to come from vector 1 in order. 2281static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 2282 bool V2IsSplat = false, 2283 bool V2IsUndef = false) { 2284 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 2285 return false; 2286 2287 if (!isUndefOrEqual(Ops[0], 0)) 2288 return false; 2289 2290 for (unsigned i = 1; i < NumOps; ++i) { 2291 SDOperand Arg = Ops[i]; 2292 if (!(isUndefOrEqual(Arg, i+NumOps) || 2293 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 2294 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 2295 return false; 2296 } 2297 2298 return true; 2299} 2300 2301static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 2302 bool V2IsUndef = false) { 2303 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2304 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 2305 V2IsSplat, V2IsUndef); 2306} 2307 2308/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2309/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 2310bool X86::isMOVSHDUPMask(SDNode *N) { 2311 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2312 2313 if (N->getNumOperands() != 4) 2314 return false; 2315 2316 // Expect 1, 1, 3, 3 2317 for (unsigned i = 0; i < 2; ++i) { 2318 SDOperand Arg = N->getOperand(i); 2319 if (Arg.getOpcode() == ISD::UNDEF) continue; 2320 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2321 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2322 if (Val != 1) return false; 2323 } 2324 2325 bool HasHi = false; 2326 for (unsigned i = 2; i < 4; ++i) { 2327 SDOperand Arg = N->getOperand(i); 2328 if (Arg.getOpcode() == ISD::UNDEF) continue; 2329 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2330 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2331 if (Val != 3) return false; 2332 HasHi = true; 2333 } 2334 2335 // Don't use movshdup if it can be done with a shufps. 2336 return HasHi; 2337} 2338 2339/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 2340/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 2341bool X86::isMOVSLDUPMask(SDNode *N) { 2342 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2343 2344 if (N->getNumOperands() != 4) 2345 return false; 2346 2347 // Expect 0, 0, 2, 2 2348 for (unsigned i = 0; i < 2; ++i) { 2349 SDOperand Arg = N->getOperand(i); 2350 if (Arg.getOpcode() == ISD::UNDEF) continue; 2351 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2352 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2353 if (Val != 0) return false; 2354 } 2355 2356 bool HasHi = false; 2357 for (unsigned i = 2; i < 4; ++i) { 2358 SDOperand Arg = N->getOperand(i); 2359 if (Arg.getOpcode() == ISD::UNDEF) continue; 2360 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2361 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2362 if (Val != 2) return false; 2363 HasHi = true; 2364 } 2365 2366 // Don't use movshdup if it can be done with a shufps. 2367 return HasHi; 2368} 2369 2370/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand 2371/// specifies a identity operation on the LHS or RHS. 2372static bool isIdentityMask(SDNode *N, bool RHS = false) { 2373 unsigned NumElems = N->getNumOperands(); 2374 for (unsigned i = 0; i < NumElems; ++i) 2375 if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0))) 2376 return false; 2377 return true; 2378} 2379 2380/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2381/// a splat of a single element. 2382static bool isSplatMask(SDNode *N) { 2383 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2384 2385 // This is a splat operation if each element of the permute is the same, and 2386 // if the value doesn't reference the second vector. 2387 unsigned NumElems = N->getNumOperands(); 2388 SDOperand ElementBase; 2389 unsigned i = 0; 2390 for (; i != NumElems; ++i) { 2391 SDOperand Elt = N->getOperand(i); 2392 if (isa<ConstantSDNode>(Elt)) { 2393 ElementBase = Elt; 2394 break; 2395 } 2396 } 2397 2398 if (!ElementBase.Val) 2399 return false; 2400 2401 for (; i != NumElems; ++i) { 2402 SDOperand Arg = N->getOperand(i); 2403 if (Arg.getOpcode() == ISD::UNDEF) continue; 2404 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2405 if (Arg != ElementBase) return false; 2406 } 2407 2408 // Make sure it is a splat of the first vector operand. 2409 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 2410} 2411 2412/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 2413/// a splat of a single element and it's a 2 or 4 element mask. 2414bool X86::isSplatMask(SDNode *N) { 2415 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2416 2417 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 2418 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 2419 return false; 2420 return ::isSplatMask(N); 2421} 2422 2423/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 2424/// specifies a splat of zero element. 2425bool X86::isSplatLoMask(SDNode *N) { 2426 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2427 2428 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 2429 if (!isUndefOrEqual(N->getOperand(i), 0)) 2430 return false; 2431 return true; 2432} 2433 2434/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2435/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2436/// instructions. 2437unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2438 unsigned NumOperands = N->getNumOperands(); 2439 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2440 unsigned Mask = 0; 2441 for (unsigned i = 0; i < NumOperands; ++i) { 2442 unsigned Val = 0; 2443 SDOperand Arg = N->getOperand(NumOperands-i-1); 2444 if (Arg.getOpcode() != ISD::UNDEF) 2445 Val = cast<ConstantSDNode>(Arg)->getValue(); 2446 if (Val >= NumOperands) Val -= NumOperands; 2447 Mask |= Val; 2448 if (i != NumOperands - 1) 2449 Mask <<= Shift; 2450 } 2451 2452 return Mask; 2453} 2454 2455/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2456/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2457/// instructions. 2458unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2459 unsigned Mask = 0; 2460 // 8 nodes, but we only care about the last 4. 2461 for (unsigned i = 7; i >= 4; --i) { 2462 unsigned Val = 0; 2463 SDOperand Arg = N->getOperand(i); 2464 if (Arg.getOpcode() != ISD::UNDEF) 2465 Val = cast<ConstantSDNode>(Arg)->getValue(); 2466 Mask |= (Val - 4); 2467 if (i != 4) 2468 Mask <<= 2; 2469 } 2470 2471 return Mask; 2472} 2473 2474/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2475/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2476/// instructions. 2477unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2478 unsigned Mask = 0; 2479 // 8 nodes, but we only care about the first 4. 2480 for (int i = 3; i >= 0; --i) { 2481 unsigned Val = 0; 2482 SDOperand Arg = N->getOperand(i); 2483 if (Arg.getOpcode() != ISD::UNDEF) 2484 Val = cast<ConstantSDNode>(Arg)->getValue(); 2485 Mask |= Val; 2486 if (i != 0) 2487 Mask <<= 2; 2488 } 2489 2490 return Mask; 2491} 2492 2493/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2494/// specifies a 8 element shuffle that can be broken into a pair of 2495/// PSHUFHW and PSHUFLW. 2496static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2497 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2498 2499 if (N->getNumOperands() != 8) 2500 return false; 2501 2502 // Lower quadword shuffled. 2503 for (unsigned i = 0; i != 4; ++i) { 2504 SDOperand Arg = N->getOperand(i); 2505 if (Arg.getOpcode() == ISD::UNDEF) continue; 2506 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2507 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2508 if (Val >= 4) 2509 return false; 2510 } 2511 2512 // Upper quadword shuffled. 2513 for (unsigned i = 4; i != 8; ++i) { 2514 SDOperand Arg = N->getOperand(i); 2515 if (Arg.getOpcode() == ISD::UNDEF) continue; 2516 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2517 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2518 if (Val < 4 || Val > 7) 2519 return false; 2520 } 2521 2522 return true; 2523} 2524 2525/// CommuteVectorShuffle - Swap vector_shuffle operands as well as 2526/// values in ther permute mask. 2527static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2528 SDOperand &V2, SDOperand &Mask, 2529 SelectionDAG &DAG) { 2530 MVT::ValueType VT = Op.getValueType(); 2531 MVT::ValueType MaskVT = Mask.getValueType(); 2532 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2533 unsigned NumElems = Mask.getNumOperands(); 2534 SmallVector<SDOperand, 8> MaskVec; 2535 2536 for (unsigned i = 0; i != NumElems; ++i) { 2537 SDOperand Arg = Mask.getOperand(i); 2538 if (Arg.getOpcode() == ISD::UNDEF) { 2539 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2540 continue; 2541 } 2542 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2543 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2544 if (Val < NumElems) 2545 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2546 else 2547 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2548 } 2549 2550 std::swap(V1, V2); 2551 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2552 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2553} 2554 2555/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 2556/// the two vector operands have swapped position. 2557static 2558SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { 2559 MVT::ValueType MaskVT = Mask.getValueType(); 2560 MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT); 2561 unsigned NumElems = Mask.getNumOperands(); 2562 SmallVector<SDOperand, 8> MaskVec; 2563 for (unsigned i = 0; i != NumElems; ++i) { 2564 SDOperand Arg = Mask.getOperand(i); 2565 if (Arg.getOpcode() == ISD::UNDEF) { 2566 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2567 continue; 2568 } 2569 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2570 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2571 if (Val < NumElems) 2572 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2573 else 2574 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2575 } 2576 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems); 2577} 2578 2579 2580/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2581/// match movhlps. The lower half elements should come from upper half of 2582/// V1 (and in order), and the upper half elements should come from the upper 2583/// half of V2 (and in order). 2584static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2585 unsigned NumElems = Mask->getNumOperands(); 2586 if (NumElems != 4) 2587 return false; 2588 for (unsigned i = 0, e = 2; i != e; ++i) 2589 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2590 return false; 2591 for (unsigned i = 2; i != 4; ++i) 2592 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2593 return false; 2594 return true; 2595} 2596 2597/// isScalarLoadToVector - Returns true if the node is a scalar load that 2598/// is promoted to a vector. 2599static inline bool isScalarLoadToVector(SDNode *N) { 2600 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2601 N = N->getOperand(0).Val; 2602 return ISD::isNON_EXTLoad(N); 2603 } 2604 return false; 2605} 2606 2607/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2608/// match movlp{s|d}. The lower half elements should come from lower half of 2609/// V1 (and in order), and the upper half elements should come from the upper 2610/// half of V2 (and in order). And since V1 will become the source of the 2611/// MOVLP, it must be either a vector load or a scalar load to vector. 2612static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2613 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2614 return false; 2615 // Is V2 is a vector load, don't do this transformation. We will try to use 2616 // load folding shufps op. 2617 if (ISD::isNON_EXTLoad(V2)) 2618 return false; 2619 2620 unsigned NumElems = Mask->getNumOperands(); 2621 if (NumElems != 2 && NumElems != 4) 2622 return false; 2623 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2624 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2625 return false; 2626 for (unsigned i = NumElems/2; i != NumElems; ++i) 2627 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2628 return false; 2629 return true; 2630} 2631 2632/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2633/// all the same. 2634static bool isSplatVector(SDNode *N) { 2635 if (N->getOpcode() != ISD::BUILD_VECTOR) 2636 return false; 2637 2638 SDOperand SplatValue = N->getOperand(0); 2639 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2640 if (N->getOperand(i) != SplatValue) 2641 return false; 2642 return true; 2643} 2644 2645/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2646/// to an undef. 2647static bool isUndefShuffle(SDNode *N) { 2648 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2649 return false; 2650 2651 SDOperand V1 = N->getOperand(0); 2652 SDOperand V2 = N->getOperand(1); 2653 SDOperand Mask = N->getOperand(2); 2654 unsigned NumElems = Mask.getNumOperands(); 2655 for (unsigned i = 0; i != NumElems; ++i) { 2656 SDOperand Arg = Mask.getOperand(i); 2657 if (Arg.getOpcode() != ISD::UNDEF) { 2658 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2659 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2660 return false; 2661 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2662 return false; 2663 } 2664 } 2665 return true; 2666} 2667 2668/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2669/// constant +0.0. 2670static inline bool isZeroNode(SDOperand Elt) { 2671 return ((isa<ConstantSDNode>(Elt) && 2672 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2673 (isa<ConstantFPSDNode>(Elt) && 2674 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 2675} 2676 2677/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2678/// to an zero vector. 2679static bool isZeroShuffle(SDNode *N) { 2680 if (N->getOpcode() != ISD::VECTOR_SHUFFLE) 2681 return false; 2682 2683 SDOperand V1 = N->getOperand(0); 2684 SDOperand V2 = N->getOperand(1); 2685 SDOperand Mask = N->getOperand(2); 2686 unsigned NumElems = Mask.getNumOperands(); 2687 for (unsigned i = 0; i != NumElems; ++i) { 2688 SDOperand Arg = Mask.getOperand(i); 2689 if (Arg.getOpcode() == ISD::UNDEF) 2690 continue; 2691 2692 unsigned Idx = cast<ConstantSDNode>(Arg)->getValue(); 2693 if (Idx < NumElems) { 2694 unsigned Opc = V1.Val->getOpcode(); 2695 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val)) 2696 continue; 2697 if (Opc != ISD::BUILD_VECTOR || 2698 !isZeroNode(V1.Val->getOperand(Idx))) 2699 return false; 2700 } else if (Idx >= NumElems) { 2701 unsigned Opc = V2.Val->getOpcode(); 2702 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val)) 2703 continue; 2704 if (Opc != ISD::BUILD_VECTOR || 2705 !isZeroNode(V2.Val->getOperand(Idx - NumElems))) 2706 return false; 2707 } 2708 } 2709 return true; 2710} 2711 2712/// getZeroVector - Returns a vector of specified type with all zero elements. 2713/// 2714static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2715 assert(MVT::isVector(VT) && "Expected a vector type"); 2716 2717 // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2718 // type. This ensures they get CSE'd. 2719 SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); 2720 SDOperand Vec; 2721 if (MVT::getSizeInBits(VT) == 64) // MMX 2722 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2723 else // SSE 2724 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2725 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2726} 2727 2728/// getOnesVector - Returns a vector of specified type with all bits set. 2729/// 2730static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) { 2731 assert(MVT::isVector(VT) && "Expected a vector type"); 2732 2733 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 2734 // type. This ensures they get CSE'd. 2735 SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); 2736 SDOperand Vec; 2737 if (MVT::getSizeInBits(VT) == 64) // MMX 2738 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); 2739 else // SSE 2740 Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); 2741 return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); 2742} 2743 2744 2745/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2746/// that point to V2 points to its first element. 2747static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2748 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2749 2750 bool Changed = false; 2751 SmallVector<SDOperand, 8> MaskVec; 2752 unsigned NumElems = Mask.getNumOperands(); 2753 for (unsigned i = 0; i != NumElems; ++i) { 2754 SDOperand Arg = Mask.getOperand(i); 2755 if (Arg.getOpcode() != ISD::UNDEF) { 2756 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2757 if (Val > NumElems) { 2758 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2759 Changed = true; 2760 } 2761 } 2762 MaskVec.push_back(Arg); 2763 } 2764 2765 if (Changed) 2766 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2767 &MaskVec[0], MaskVec.size()); 2768 return Mask; 2769} 2770 2771/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2772/// operation of specified width. 2773static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2774 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2775 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2776 2777 SmallVector<SDOperand, 8> MaskVec; 2778 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2779 for (unsigned i = 1; i != NumElems; ++i) 2780 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2781 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2782} 2783 2784/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2785/// of specified width. 2786static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2787 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2788 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2789 SmallVector<SDOperand, 8> MaskVec; 2790 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2791 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2792 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2793 } 2794 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2795} 2796 2797/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2798/// of specified width. 2799static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2800 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2801 MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT); 2802 unsigned Half = NumElems/2; 2803 SmallVector<SDOperand, 8> MaskVec; 2804 for (unsigned i = 0; i != Half; ++i) { 2805 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2806 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2807 } 2808 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2809} 2810 2811/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2812/// 2813static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2814 SDOperand V1 = Op.getOperand(0); 2815 SDOperand Mask = Op.getOperand(2); 2816 MVT::ValueType VT = Op.getValueType(); 2817 unsigned NumElems = Mask.getNumOperands(); 2818 Mask = getUnpacklMask(NumElems, DAG); 2819 while (NumElems != 4) { 2820 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2821 NumElems >>= 1; 2822 } 2823 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2824 2825 Mask = getZeroVector(MVT::v4i32, DAG); 2826 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2827 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2828 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2829} 2830 2831/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2832/// vector of zero or undef vector. This produces a shuffle where the low 2833/// element of V2 is swizzled into the zero/undef vector, landing at element 2834/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 2835static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2836 unsigned NumElems, unsigned Idx, 2837 bool isZero, SelectionDAG &DAG) { 2838 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2839 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2840 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 2841 SmallVector<SDOperand, 16> MaskVec; 2842 for (unsigned i = 0; i != NumElems; ++i) 2843 if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. 2844 MaskVec.push_back(DAG.getConstant(NumElems, EVT)); 2845 else 2846 MaskVec.push_back(DAG.getConstant(i, EVT)); 2847 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2848 &MaskVec[0], MaskVec.size()); 2849 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2850} 2851 2852/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2853/// 2854static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2855 unsigned NumNonZero, unsigned NumZero, 2856 SelectionDAG &DAG, TargetLowering &TLI) { 2857 if (NumNonZero > 8) 2858 return SDOperand(); 2859 2860 SDOperand V(0, 0); 2861 bool First = true; 2862 for (unsigned i = 0; i < 16; ++i) { 2863 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2864 if (ThisIsNonZero && First) { 2865 if (NumZero) 2866 V = getZeroVector(MVT::v8i16, DAG); 2867 else 2868 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2869 First = false; 2870 } 2871 2872 if ((i & 1) != 0) { 2873 SDOperand ThisElt(0, 0), LastElt(0, 0); 2874 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2875 if (LastIsNonZero) { 2876 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2877 } 2878 if (ThisIsNonZero) { 2879 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2880 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2881 ThisElt, DAG.getConstant(8, MVT::i8)); 2882 if (LastIsNonZero) 2883 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2884 } else 2885 ThisElt = LastElt; 2886 2887 if (ThisElt.Val) 2888 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2889 DAG.getIntPtrConstant(i/2)); 2890 } 2891 } 2892 2893 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2894} 2895 2896/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2897/// 2898static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2899 unsigned NumNonZero, unsigned NumZero, 2900 SelectionDAG &DAG, TargetLowering &TLI) { 2901 if (NumNonZero > 4) 2902 return SDOperand(); 2903 2904 SDOperand V(0, 0); 2905 bool First = true; 2906 for (unsigned i = 0; i < 8; ++i) { 2907 bool isNonZero = (NonZeros & (1 << i)) != 0; 2908 if (isNonZero) { 2909 if (First) { 2910 if (NumZero) 2911 V = getZeroVector(MVT::v8i16, DAG); 2912 else 2913 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2914 First = false; 2915 } 2916 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2917 DAG.getIntPtrConstant(i)); 2918 } 2919 } 2920 2921 return V; 2922} 2923 2924SDOperand 2925X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2926 // All zero's are handled with pxor, all one's are handled with pcmpeqd. 2927 if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { 2928 // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to 2929 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 2930 // eliminated on x86-32 hosts. 2931 if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32) 2932 return Op; 2933 2934 if (ISD::isBuildVectorAllOnes(Op.Val)) 2935 return getOnesVector(Op.getValueType(), DAG); 2936 return getZeroVector(Op.getValueType(), DAG); 2937 } 2938 2939 MVT::ValueType VT = Op.getValueType(); 2940 MVT::ValueType EVT = MVT::getVectorElementType(VT); 2941 unsigned EVTBits = MVT::getSizeInBits(EVT); 2942 2943 unsigned NumElems = Op.getNumOperands(); 2944 unsigned NumZero = 0; 2945 unsigned NumNonZero = 0; 2946 unsigned NonZeros = 0; 2947 bool HasNonImms = false; 2948 SmallSet<SDOperand, 8> Values; 2949 for (unsigned i = 0; i < NumElems; ++i) { 2950 SDOperand Elt = Op.getOperand(i); 2951 if (Elt.getOpcode() == ISD::UNDEF) 2952 continue; 2953 Values.insert(Elt); 2954 if (Elt.getOpcode() != ISD::Constant && 2955 Elt.getOpcode() != ISD::ConstantFP) 2956 HasNonImms = true; 2957 if (isZeroNode(Elt)) 2958 NumZero++; 2959 else { 2960 NonZeros |= (1 << i); 2961 NumNonZero++; 2962 } 2963 } 2964 2965 if (NumNonZero == 0) { 2966 // All undef vector. Return an UNDEF. All zero vectors were handled above. 2967 return DAG.getNode(ISD::UNDEF, VT); 2968 } 2969 2970 // Splat is obviously ok. Let legalizer expand it to a shuffle. 2971 if (Values.size() == 1) 2972 return SDOperand(); 2973 2974 // Special case for single non-zero element. 2975 if (NumNonZero == 1 && NumElems <= 4) { 2976 unsigned Idx = CountTrailingZeros_32(NonZeros); 2977 SDOperand Item = Op.getOperand(Idx); 2978 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 2979 if (Idx == 0) 2980 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 2981 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 2982 NumZero > 0, DAG); 2983 else if (!HasNonImms) // Otherwise, it's better to do a constpool load. 2984 return SDOperand(); 2985 2986 if (EVTBits == 32) { 2987 // Turn it into a shuffle of zero and zero-extended scalar to vector. 2988 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 2989 DAG); 2990 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2991 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 2992 SmallVector<SDOperand, 8> MaskVec; 2993 for (unsigned i = 0; i < NumElems; i++) 2994 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 2995 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2996 &MaskVec[0], MaskVec.size()); 2997 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 2998 DAG.getNode(ISD::UNDEF, VT), Mask); 2999 } 3000 } 3001 3002 // A vector full of immediates; various special cases are already 3003 // handled, so this is best done with a single constant-pool load. 3004 if (!HasNonImms) 3005 return SDOperand(); 3006 3007 // Let legalizer expand 2-wide build_vectors. 3008 if (EVTBits == 64) 3009 return SDOperand(); 3010 3011 // If element VT is < 32 bits, convert it to inserts into a zero vector. 3012 if (EVTBits == 8 && NumElems == 16) { 3013 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 3014 *this); 3015 if (V.Val) return V; 3016 } 3017 3018 if (EVTBits == 16 && NumElems == 8) { 3019 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 3020 *this); 3021 if (V.Val) return V; 3022 } 3023 3024 // If element VT is == 32 bits, turn it into a number of shuffles. 3025 SmallVector<SDOperand, 8> V; 3026 V.resize(NumElems); 3027 if (NumElems == 4 && NumZero > 0) { 3028 for (unsigned i = 0; i < 4; ++i) { 3029 bool isZero = !(NonZeros & (1 << i)); 3030 if (isZero) 3031 V[i] = getZeroVector(VT, DAG); 3032 else 3033 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3034 } 3035 3036 for (unsigned i = 0; i < 2; ++i) { 3037 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 3038 default: break; 3039 case 0: 3040 V[i] = V[i*2]; // Must be a zero vector. 3041 break; 3042 case 1: 3043 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 3044 getMOVLMask(NumElems, DAG)); 3045 break; 3046 case 2: 3047 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3048 getMOVLMask(NumElems, DAG)); 3049 break; 3050 case 3: 3051 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 3052 getUnpacklMask(NumElems, DAG)); 3053 break; 3054 } 3055 } 3056 3057 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 3058 // clears the upper bits. 3059 // FIXME: we can do the same for v4f32 case when we know both parts of 3060 // the lower half come from scalar_to_vector (loadf32). We should do 3061 // that in post legalizer dag combiner with target specific hooks. 3062 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 3063 return V[0]; 3064 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 3065 MVT::ValueType EVT = MVT::getVectorElementType(MaskVT); 3066 SmallVector<SDOperand, 8> MaskVec; 3067 bool Reverse = (NonZeros & 0x3) == 2; 3068 for (unsigned i = 0; i < 2; ++i) 3069 if (Reverse) 3070 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 3071 else 3072 MaskVec.push_back(DAG.getConstant(i, EVT)); 3073 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 3074 for (unsigned i = 0; i < 2; ++i) 3075 if (Reverse) 3076 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 3077 else 3078 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 3079 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3080 &MaskVec[0], MaskVec.size()); 3081 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 3082 } 3083 3084 if (Values.size() > 2) { 3085 // Expand into a number of unpckl*. 3086 // e.g. for v4f32 3087 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 3088 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 3089 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 3090 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 3091 for (unsigned i = 0; i < NumElems; ++i) 3092 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 3093 NumElems >>= 1; 3094 while (NumElems != 0) { 3095 for (unsigned i = 0; i < NumElems; ++i) 3096 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 3097 UnpckMask); 3098 NumElems >>= 1; 3099 } 3100 return V[0]; 3101 } 3102 3103 return SDOperand(); 3104} 3105 3106static 3107SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, 3108 SDOperand PermMask, SelectionDAG &DAG, 3109 TargetLowering &TLI) { 3110 SDOperand NewV; 3111 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8); 3112 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3113 MVT::ValueType PtrVT = TLI.getPointerTy(); 3114 SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), 3115 PermMask.Val->op_end()); 3116 3117 // First record which half of which vector the low elements come from. 3118 SmallVector<unsigned, 4> LowQuad(4); 3119 for (unsigned i = 0; i < 4; ++i) { 3120 SDOperand Elt = MaskElts[i]; 3121 if (Elt.getOpcode() == ISD::UNDEF) 3122 continue; 3123 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3124 int QuadIdx = EltIdx / 4; 3125 ++LowQuad[QuadIdx]; 3126 } 3127 int BestLowQuad = -1; 3128 unsigned MaxQuad = 1; 3129 for (unsigned i = 0; i < 4; ++i) { 3130 if (LowQuad[i] > MaxQuad) { 3131 BestLowQuad = i; 3132 MaxQuad = LowQuad[i]; 3133 } 3134 } 3135 3136 // Record which half of which vector the high elements come from. 3137 SmallVector<unsigned, 4> HighQuad(4); 3138 for (unsigned i = 4; i < 8; ++i) { 3139 SDOperand Elt = MaskElts[i]; 3140 if (Elt.getOpcode() == ISD::UNDEF) 3141 continue; 3142 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3143 int QuadIdx = EltIdx / 4; 3144 ++HighQuad[QuadIdx]; 3145 } 3146 int BestHighQuad = -1; 3147 MaxQuad = 1; 3148 for (unsigned i = 0; i < 4; ++i) { 3149 if (HighQuad[i] > MaxQuad) { 3150 BestHighQuad = i; 3151 MaxQuad = HighQuad[i]; 3152 } 3153 } 3154 3155 // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. 3156 if (BestLowQuad != -1 || BestHighQuad != -1) { 3157 // First sort the 4 chunks in order using shufpd. 3158 SmallVector<SDOperand, 8> MaskVec; 3159 if (BestLowQuad != -1) 3160 MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); 3161 else 3162 MaskVec.push_back(DAG.getConstant(0, MVT::i32)); 3163 if (BestHighQuad != -1) 3164 MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); 3165 else 3166 MaskVec.push_back(DAG.getConstant(1, MVT::i32)); 3167 SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); 3168 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, 3169 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), 3170 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); 3171 NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV); 3172 3173 // Now sort high and low parts separately. 3174 BitVector InOrder(8); 3175 if (BestLowQuad != -1) { 3176 // Sort lower half in order using PSHUFLW. 3177 MaskVec.clear(); 3178 bool AnyOutOrder = false; 3179 for (unsigned i = 0; i != 4; ++i) { 3180 SDOperand Elt = MaskElts[i]; 3181 if (Elt.getOpcode() == ISD::UNDEF) { 3182 MaskVec.push_back(Elt); 3183 InOrder.set(i); 3184 } else { 3185 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3186 if (EltIdx != i) 3187 AnyOutOrder = true; 3188 MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT)); 3189 // If this element is in the right place after this shuffle, then 3190 // remember it. 3191 if ((int)(EltIdx / 4) == BestLowQuad) 3192 InOrder.set(i); 3193 } 3194 } 3195 if (AnyOutOrder) { 3196 for (unsigned i = 4; i != 8; ++i) 3197 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3198 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3199 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3200 } 3201 } 3202 3203 if (BestHighQuad != -1) { 3204 // Sort high half in order using PSHUFHW if possible. 3205 MaskVec.clear(); 3206 for (unsigned i = 0; i != 4; ++i) 3207 MaskVec.push_back(DAG.getConstant(i, MaskEVT)); 3208 bool AnyOutOrder = false; 3209 for (unsigned i = 4; i != 8; ++i) { 3210 SDOperand Elt = MaskElts[i]; 3211 if (Elt.getOpcode() == ISD::UNDEF) { 3212 MaskVec.push_back(Elt); 3213 InOrder.set(i); 3214 } else { 3215 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3216 if (EltIdx != i) 3217 AnyOutOrder = true; 3218 MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT)); 3219 // If this element is in the right place after this shuffle, then 3220 // remember it. 3221 if ((int)(EltIdx / 4) == BestHighQuad) 3222 InOrder.set(i); 3223 } 3224 } 3225 if (AnyOutOrder) { 3226 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3227 NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); 3228 } 3229 } 3230 3231 // The other elements are put in the right place using pextrw and pinsrw. 3232 for (unsigned i = 0; i != 8; ++i) { 3233 if (InOrder[i]) 3234 continue; 3235 SDOperand Elt = MaskElts[i]; 3236 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3237 if (EltIdx == i) 3238 continue; 3239 SDOperand ExtOp = (EltIdx < 8) 3240 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3241 DAG.getConstant(EltIdx, PtrVT)) 3242 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3243 DAG.getConstant(EltIdx - 8, PtrVT)); 3244 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3245 DAG.getConstant(i, PtrVT)); 3246 } 3247 return NewV; 3248 } 3249 3250 // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use 3251 ///as few as possible. 3252 // First, let's find out how many elements are already in the right order. 3253 unsigned V1InOrder = 0; 3254 unsigned V1FromV1 = 0; 3255 unsigned V2InOrder = 0; 3256 unsigned V2FromV2 = 0; 3257 SmallVector<SDOperand, 8> V1Elts; 3258 SmallVector<SDOperand, 8> V2Elts; 3259 for (unsigned i = 0; i < 8; ++i) { 3260 SDOperand Elt = MaskElts[i]; 3261 if (Elt.getOpcode() == ISD::UNDEF) { 3262 V1Elts.push_back(Elt); 3263 V2Elts.push_back(Elt); 3264 ++V1InOrder; 3265 ++V2InOrder; 3266 continue; 3267 } 3268 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3269 if (EltIdx == i) { 3270 V1Elts.push_back(Elt); 3271 V2Elts.push_back(DAG.getConstant(i+8, MaskEVT)); 3272 ++V1InOrder; 3273 } else if (EltIdx == i+8) { 3274 V1Elts.push_back(Elt); 3275 V2Elts.push_back(DAG.getConstant(i, MaskEVT)); 3276 ++V2InOrder; 3277 } else if (EltIdx < 8) { 3278 V1Elts.push_back(Elt); 3279 ++V1FromV1; 3280 } else { 3281 V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT)); 3282 ++V2FromV2; 3283 } 3284 } 3285 3286 if (V2InOrder > V1InOrder) { 3287 PermMask = CommuteVectorShuffleMask(PermMask, DAG); 3288 std::swap(V1, V2); 3289 std::swap(V1Elts, V2Elts); 3290 std::swap(V1FromV1, V2FromV2); 3291 } 3292 3293 if ((V1FromV1 + V1InOrder) != 8) { 3294 // Some elements are from V2. 3295 if (V1FromV1) { 3296 // If there are elements that are from V1 but out of place, 3297 // then first sort them in place 3298 SmallVector<SDOperand, 8> MaskVec; 3299 for (unsigned i = 0; i < 8; ++i) { 3300 SDOperand Elt = V1Elts[i]; 3301 if (Elt.getOpcode() == ISD::UNDEF) { 3302 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3303 continue; 3304 } 3305 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3306 if (EltIdx >= 8) 3307 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3308 else 3309 MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); 3310 } 3311 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); 3312 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); 3313 } 3314 3315 NewV = V1; 3316 for (unsigned i = 0; i < 8; ++i) { 3317 SDOperand Elt = V1Elts[i]; 3318 if (Elt.getOpcode() == ISD::UNDEF) 3319 continue; 3320 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3321 if (EltIdx < 8) 3322 continue; 3323 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, 3324 DAG.getConstant(EltIdx - 8, PtrVT)); 3325 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3326 DAG.getConstant(i, PtrVT)); 3327 } 3328 return NewV; 3329 } else { 3330 // All elements are from V1. 3331 NewV = V1; 3332 for (unsigned i = 0; i < 8; ++i) { 3333 SDOperand Elt = V1Elts[i]; 3334 if (Elt.getOpcode() == ISD::UNDEF) 3335 continue; 3336 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3337 SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, 3338 DAG.getConstant(EltIdx, PtrVT)); 3339 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, 3340 DAG.getConstant(i, PtrVT)); 3341 } 3342 return NewV; 3343 } 3344} 3345 3346/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 3347/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be 3348/// done when every pair / quad of shuffle mask elements point to elements in 3349/// the right sequence. e.g. 3350/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> 3351static 3352SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, 3353 MVT::ValueType VT, 3354 SDOperand PermMask, SelectionDAG &DAG, 3355 TargetLowering &TLI) { 3356 unsigned NumElems = PermMask.getNumOperands(); 3357 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 3358 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth); 3359 MVT::ValueType NewVT = MaskVT; 3360 switch (VT) { 3361 case MVT::v4f32: NewVT = MVT::v2f64; break; 3362 case MVT::v4i32: NewVT = MVT::v2i64; break; 3363 case MVT::v8i16: NewVT = MVT::v4i32; break; 3364 case MVT::v16i8: NewVT = MVT::v4i32; break; 3365 default: assert(false && "Unexpected!"); 3366 } 3367 3368 if (NewWidth == 2) 3369 if (MVT::isInteger(VT)) 3370 NewVT = MVT::v2i64; 3371 else 3372 NewVT = MVT::v2f64; 3373 unsigned Scale = NumElems / NewWidth; 3374 SmallVector<SDOperand, 8> MaskVec; 3375 for (unsigned i = 0; i < NumElems; i += Scale) { 3376 unsigned StartIdx = ~0U; 3377 for (unsigned j = 0; j < Scale; ++j) { 3378 SDOperand Elt = PermMask.getOperand(i+j); 3379 if (Elt.getOpcode() == ISD::UNDEF) 3380 continue; 3381 unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); 3382 if (StartIdx == ~0U) 3383 StartIdx = EltIdx - (EltIdx % Scale); 3384 if (EltIdx != StartIdx + j) 3385 return SDOperand(); 3386 } 3387 if (StartIdx == ~0U) 3388 MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 3389 else 3390 MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32)); 3391 } 3392 3393 V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1); 3394 V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2); 3395 return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2, 3396 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3397 &MaskVec[0], MaskVec.size())); 3398} 3399 3400SDOperand 3401X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 3402 SDOperand V1 = Op.getOperand(0); 3403 SDOperand V2 = Op.getOperand(1); 3404 SDOperand PermMask = Op.getOperand(2); 3405 MVT::ValueType VT = Op.getValueType(); 3406 unsigned NumElems = PermMask.getNumOperands(); 3407 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 3408 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 3409 bool V1IsSplat = false; 3410 bool V2IsSplat = false; 3411 3412 if (isUndefShuffle(Op.Val)) 3413 return DAG.getNode(ISD::UNDEF, VT); 3414 3415 if (isZeroShuffle(Op.Val)) 3416 return getZeroVector(VT, DAG); 3417 3418 if (isIdentityMask(PermMask.Val)) 3419 return V1; 3420 else if (isIdentityMask(PermMask.Val, true)) 3421 return V2; 3422 3423 if (isSplatMask(PermMask.Val)) { 3424 if (NumElems <= 4) return Op; 3425 // Promote it to a v4i32 splat. 3426 return PromoteSplat(Op, DAG); 3427 } 3428 3429 // If the shuffle can be profitably rewritten as a narrower shuffle, then 3430 // do it! 3431 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 3432 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3433 if (NewOp.Val) 3434 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3435 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 3436 // FIXME: Figure out a cleaner way to do this. 3437 // Try to make use of movq to zero out the top part. 3438 if (ISD::isBuildVectorAllZeros(V2.Val)) { 3439 SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3440 if (NewOp.Val) { 3441 SDOperand NewV1 = NewOp.getOperand(0); 3442 SDOperand NewV2 = NewOp.getOperand(1); 3443 SDOperand NewMask = NewOp.getOperand(2); 3444 if (isCommutedMOVL(NewMask.Val, true, false)) { 3445 NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); 3446 NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(), 3447 NewV1, NewV2, getMOVLMask(2, DAG)); 3448 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3449 } 3450 } 3451 } else if (ISD::isBuildVectorAllZeros(V1.Val)) { 3452 SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); 3453 if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) 3454 return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); 3455 } 3456 } 3457 3458 if (X86::isMOVLMask(PermMask.Val)) 3459 return (V1IsUndef) ? V2 : Op; 3460 3461 if (X86::isMOVSHDUPMask(PermMask.Val) || 3462 X86::isMOVSLDUPMask(PermMask.Val) || 3463 X86::isMOVHLPSMask(PermMask.Val) || 3464 X86::isMOVHPMask(PermMask.Val) || 3465 X86::isMOVLPMask(PermMask.Val)) 3466 return Op; 3467 3468 if (ShouldXformToMOVHLPS(PermMask.Val) || 3469 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 3470 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3471 3472 bool Commuted = false; 3473 // FIXME: This should also accept a bitcast of a splat? Be careful, not 3474 // 1,1,1,1 -> v8i16 though. 3475 V1IsSplat = isSplatVector(V1.Val); 3476 V2IsSplat = isSplatVector(V2.Val); 3477 3478 // Canonicalize the splat or undef, if present, to be on the RHS. 3479 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 3480 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3481 std::swap(V1IsSplat, V2IsSplat); 3482 std::swap(V1IsUndef, V2IsUndef); 3483 Commuted = true; 3484 } 3485 3486 // FIXME: Figure out a cleaner way to do this. 3487 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 3488 if (V2IsUndef) return V1; 3489 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3490 if (V2IsSplat) { 3491 // V2 is a splat, so the mask may be malformed. That is, it may point 3492 // to any V2 element. The instruction selectior won't like this. Get 3493 // a corrected mask and commute to form a proper MOVS{S|D}. 3494 SDOperand NewMask = getMOVLMask(NumElems, DAG); 3495 if (NewMask.Val != PermMask.Val) 3496 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3497 } 3498 return Op; 3499 } 3500 3501 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3502 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3503 X86::isUNPCKLMask(PermMask.Val) || 3504 X86::isUNPCKHMask(PermMask.Val)) 3505 return Op; 3506 3507 if (V2IsSplat) { 3508 // Normalize mask so all entries that point to V2 points to its first 3509 // element then try to match unpck{h|l} again. If match, return a 3510 // new vector_shuffle with the corrected mask. 3511 SDOperand NewMask = NormalizeMask(PermMask, DAG); 3512 if (NewMask.Val != PermMask.Val) { 3513 if (X86::isUNPCKLMask(PermMask.Val, true)) { 3514 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 3515 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3516 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 3517 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 3518 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 3519 } 3520 } 3521 } 3522 3523 // Normalize the node to match x86 shuffle ops if needed 3524 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 3525 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3526 3527 if (Commuted) { 3528 // Commute is back and try unpck* again. 3529 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 3530 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 3531 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 3532 X86::isUNPCKLMask(PermMask.Val) || 3533 X86::isUNPCKHMask(PermMask.Val)) 3534 return Op; 3535 } 3536 3537 // If VT is integer, try PSHUF* first, then SHUFP*. 3538 if (MVT::isInteger(VT)) { 3539 // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically 3540 // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented. 3541 if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) && 3542 X86::isPSHUFDMask(PermMask.Val)) || 3543 X86::isPSHUFHWMask(PermMask.Val) || 3544 X86::isPSHUFLWMask(PermMask.Val)) { 3545 if (V2.getOpcode() != ISD::UNDEF) 3546 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3547 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3548 return Op; 3549 } 3550 3551 if (X86::isSHUFPMask(PermMask.Val) && 3552 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 3553 return Op; 3554 } else { 3555 // Floating point cases in the other order. 3556 if (X86::isSHUFPMask(PermMask.Val)) 3557 return Op; 3558 if (X86::isPSHUFDMask(PermMask.Val) || 3559 X86::isPSHUFHWMask(PermMask.Val) || 3560 X86::isPSHUFLWMask(PermMask.Val)) { 3561 if (V2.getOpcode() != ISD::UNDEF) 3562 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 3563 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 3564 return Op; 3565 } 3566 } 3567 3568 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 3569 if (VT == MVT::v8i16) { 3570 SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); 3571 if (NewOp.Val) 3572 return NewOp; 3573 } 3574 3575 // Handle all 4 wide cases with a number of shuffles. 3576 if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) { 3577 // Don't do this for MMX. 3578 MVT::ValueType MaskVT = PermMask.getValueType(); 3579 MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT); 3580 SmallVector<std::pair<int, int>, 8> Locs; 3581 Locs.reserve(NumElems); 3582 SmallVector<SDOperand, 8> Mask1(NumElems, 3583 DAG.getNode(ISD::UNDEF, MaskEVT)); 3584 SmallVector<SDOperand, 8> Mask2(NumElems, 3585 DAG.getNode(ISD::UNDEF, MaskEVT)); 3586 unsigned NumHi = 0; 3587 unsigned NumLo = 0; 3588 // If no more than two elements come from either vector. This can be 3589 // implemented with two shuffles. First shuffle gather the elements. 3590 // The second shuffle, which takes the first shuffle as both of its 3591 // vector operands, put the elements into the right order. 3592 for (unsigned i = 0; i != NumElems; ++i) { 3593 SDOperand Elt = PermMask.getOperand(i); 3594 if (Elt.getOpcode() == ISD::UNDEF) { 3595 Locs[i] = std::make_pair(-1, -1); 3596 } else { 3597 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 3598 if (Val < NumElems) { 3599 Locs[i] = std::make_pair(0, NumLo); 3600 Mask1[NumLo] = Elt; 3601 NumLo++; 3602 } else { 3603 Locs[i] = std::make_pair(1, NumHi); 3604 if (2+NumHi < NumElems) 3605 Mask1[2+NumHi] = Elt; 3606 NumHi++; 3607 } 3608 } 3609 } 3610 if (NumLo <= 2 && NumHi <= 2) { 3611 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3612 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3613 &Mask1[0], Mask1.size())); 3614 for (unsigned i = 0; i != NumElems; ++i) { 3615 if (Locs[i].first == -1) 3616 continue; 3617 else { 3618 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 3619 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 3620 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 3621 } 3622 } 3623 3624 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 3625 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3626 &Mask2[0], Mask2.size())); 3627 } 3628 3629 // Break it into (shuffle shuffle_hi, shuffle_lo). 3630 Locs.clear(); 3631 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3632 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 3633 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 3634 unsigned MaskIdx = 0; 3635 unsigned LoIdx = 0; 3636 unsigned HiIdx = NumElems/2; 3637 for (unsigned i = 0; i != NumElems; ++i) { 3638 if (i == NumElems/2) { 3639 MaskPtr = &HiMask; 3640 MaskIdx = 1; 3641 LoIdx = 0; 3642 HiIdx = NumElems/2; 3643 } 3644 SDOperand Elt = PermMask.getOperand(i); 3645 if (Elt.getOpcode() == ISD::UNDEF) { 3646 Locs[i] = std::make_pair(-1, -1); 3647 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 3648 Locs[i] = std::make_pair(MaskIdx, LoIdx); 3649 (*MaskPtr)[LoIdx] = Elt; 3650 LoIdx++; 3651 } else { 3652 Locs[i] = std::make_pair(MaskIdx, HiIdx); 3653 (*MaskPtr)[HiIdx] = Elt; 3654 HiIdx++; 3655 } 3656 } 3657 3658 SDOperand LoShuffle = 3659 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3660 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3661 &LoMask[0], LoMask.size())); 3662 SDOperand HiShuffle = 3663 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 3664 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3665 &HiMask[0], HiMask.size())); 3666 SmallVector<SDOperand, 8> MaskOps; 3667 for (unsigned i = 0; i != NumElems; ++i) { 3668 if (Locs[i].first == -1) { 3669 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 3670 } else { 3671 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 3672 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 3673 } 3674 } 3675 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 3676 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3677 &MaskOps[0], MaskOps.size())); 3678 } 3679 3680 return SDOperand(); 3681} 3682 3683SDOperand 3684X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, 3685 SelectionDAG &DAG) { 3686 MVT::ValueType VT = Op.getValueType(); 3687 if (MVT::getSizeInBits(VT) == 8) { 3688 SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, 3689 Op.getOperand(0), Op.getOperand(1)); 3690 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3691 DAG.getValueType(VT)); 3692 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3693 } else if (MVT::getSizeInBits(VT) == 16) { 3694 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, 3695 Op.getOperand(0), Op.getOperand(1)); 3696 SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, 3697 DAG.getValueType(VT)); 3698 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3699 } 3700 return SDOperand(); 3701} 3702 3703 3704SDOperand 3705X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3706 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3707 return SDOperand(); 3708 3709 if (Subtarget->hasSSE41()) 3710 return LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 3711 3712 MVT::ValueType VT = Op.getValueType(); 3713 // TODO: handle v16i8. 3714 if (MVT::getSizeInBits(VT) == 16) { 3715 SDOperand Vec = Op.getOperand(0); 3716 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3717 if (Idx == 0) 3718 return DAG.getNode(ISD::TRUNCATE, MVT::i16, 3719 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, 3720 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec), 3721 Op.getOperand(1))); 3722 // Transform it so it match pextrw which produces a 32-bit result. 3723 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 3724 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 3725 Op.getOperand(0), Op.getOperand(1)); 3726 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 3727 DAG.getValueType(VT)); 3728 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 3729 } else if (MVT::getSizeInBits(VT) == 32) { 3730 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3731 if (Idx == 0) 3732 return Op; 3733 // SHUFPS the element to the lowest double word, then movss. 3734 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3735 SmallVector<SDOperand, 8> IdxVec; 3736 IdxVec. 3737 push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT))); 3738 IdxVec. 3739 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3740 IdxVec. 3741 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3742 IdxVec. 3743 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3744 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3745 &IdxVec[0], IdxVec.size()); 3746 SDOperand Vec = Op.getOperand(0); 3747 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3748 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3749 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3750 DAG.getIntPtrConstant(0)); 3751 } else if (MVT::getSizeInBits(VT) == 64) { 3752 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 3753 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 3754 // to match extract_elt for f64. 3755 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3756 if (Idx == 0) 3757 return Op; 3758 3759 // UNPCKHPD the element to the lowest double word, then movsd. 3760 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 3761 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 3762 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 3763 SmallVector<SDOperand, 8> IdxVec; 3764 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT))); 3765 IdxVec. 3766 push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT))); 3767 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 3768 &IdxVec[0], IdxVec.size()); 3769 SDOperand Vec = Op.getOperand(0); 3770 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 3771 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 3772 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 3773 DAG.getIntPtrConstant(0)); 3774 } 3775 3776 return SDOperand(); 3777} 3778 3779SDOperand 3780X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ 3781 MVT::ValueType VT = Op.getValueType(); 3782 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3783 3784 SDOperand N0 = Op.getOperand(0); 3785 SDOperand N1 = Op.getOperand(1); 3786 SDOperand N2 = Op.getOperand(2); 3787 3788 if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) { 3789 unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB 3790 : X86ISD::PINSRW; 3791 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 3792 // argument. 3793 if (N1.getValueType() != MVT::i32) 3794 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3795 if (N2.getValueType() != MVT::i32) 3796 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3797 return DAG.getNode(Opc, VT, N0, N1, N2); 3798 } else if (EVT == MVT::f32) { 3799 // Bits [7:6] of the constant are the source select. This will always be 3800 // zero here. The DAG Combiner may combine an extract_elt index into these 3801 // bits. For example (insert (extract, 3), 2) could be matched by putting 3802 // the '3' into bits [7:6] of X86ISD::INSERTPS. 3803 // Bits [5:4] of the constant are the destination select. This is the 3804 // value of the incoming immediate. 3805 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 3806 // combine either bitwise AND or insert of float 0.0 to set these bits. 3807 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); 3808 return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); 3809 } 3810 return SDOperand(); 3811} 3812 3813SDOperand 3814X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 3815 MVT::ValueType VT = Op.getValueType(); 3816 MVT::ValueType EVT = MVT::getVectorElementType(VT); 3817 3818 if (Subtarget->hasSSE41()) 3819 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 3820 3821 if (EVT == MVT::i8) 3822 return SDOperand(); 3823 3824 SDOperand N0 = Op.getOperand(0); 3825 SDOperand N1 = Op.getOperand(1); 3826 SDOperand N2 = Op.getOperand(2); 3827 3828 if (MVT::getSizeInBits(EVT) == 16) { 3829 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 3830 // as its second argument. 3831 if (N1.getValueType() != MVT::i32) 3832 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 3833 if (N2.getValueType() != MVT::i32) 3834 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); 3835 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 3836 } 3837 return SDOperand(); 3838} 3839 3840SDOperand 3841X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 3842 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 3843 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 3844} 3845 3846// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3847// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 3848// one of the above mentioned nodes. It has to be wrapped because otherwise 3849// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3850// be used to form addressing mode. These wrapped nodes will be selected 3851// into MOV32ri. 3852SDOperand 3853X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 3854 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3855 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 3856 getPointerTy(), 3857 CP->getAlignment()); 3858 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3859 // With PIC, the address is actually $g + Offset. 3860 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3861 !Subtarget->isPICStyleRIPRel()) { 3862 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3863 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3864 Result); 3865 } 3866 3867 return Result; 3868} 3869 3870SDOperand 3871X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 3872 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3873 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 3874 // If it's a debug information descriptor, don't mess with it. 3875 if (DAG.isVerifiedDebugInfoDesc(Op)) 3876 return Result; 3877 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3878 // With PIC, the address is actually $g + Offset. 3879 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3880 !Subtarget->isPICStyleRIPRel()) { 3881 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3882 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3883 Result); 3884 } 3885 3886 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 3887 // load the value at address GV, not the value of GV itself. This means that 3888 // the GlobalAddress must be in the base or index register of the address, not 3889 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 3890 // The same applies for external symbols during PIC codegen 3891 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 3892 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, 3893 PseudoSourceValue::getGOT(), 0); 3894 3895 return Result; 3896} 3897 3898// Lower ISD::GlobalTLSAddress using the "general dynamic" model 3899static SDOperand 3900LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3901 const MVT::ValueType PtrVT) { 3902 SDOperand InFlag; 3903 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 3904 DAG.getNode(X86ISD::GlobalBaseReg, 3905 PtrVT), InFlag); 3906 InFlag = Chain.getValue(1); 3907 3908 // emit leal symbol@TLSGD(,%ebx,1), %eax 3909 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 3910 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3911 GA->getValueType(0), 3912 GA->getOffset()); 3913 SDOperand Ops[] = { Chain, TGA, InFlag }; 3914 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 3915 InFlag = Result.getValue(2); 3916 Chain = Result.getValue(1); 3917 3918 // call ___tls_get_addr. This function receives its argument in 3919 // the register EAX. 3920 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 3921 InFlag = Chain.getValue(1); 3922 3923 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3924 SDOperand Ops1[] = { Chain, 3925 DAG.getTargetExternalSymbol("___tls_get_addr", 3926 PtrVT), 3927 DAG.getRegister(X86::EAX, PtrVT), 3928 DAG.getRegister(X86::EBX, PtrVT), 3929 InFlag }; 3930 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 3931 InFlag = Chain.getValue(1); 3932 3933 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 3934} 3935 3936// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 3937// "local exec" model. 3938static SDOperand 3939LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3940 const MVT::ValueType PtrVT) { 3941 // Get the Thread Pointer 3942 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 3943 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 3944 // exec) 3945 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3946 GA->getValueType(0), 3947 GA->getOffset()); 3948 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 3949 3950 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 3951 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, 3952 PseudoSourceValue::getGOT(), 0); 3953 3954 // The address of the thread local variable is the add of the thread 3955 // pointer with the offset of the variable. 3956 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 3957} 3958 3959SDOperand 3960X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 3961 // TODO: implement the "local dynamic" model 3962 // TODO: implement the "initial exec"model for pic executables 3963 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 3964 "TLS not implemented for non-ELF and 64-bit targets"); 3965 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3966 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 3967 // otherwise use the "Local Exec"TLS Model 3968 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 3969 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 3970 else 3971 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 3972} 3973 3974SDOperand 3975X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3976 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3977 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3978 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3979 // With PIC, the address is actually $g + Offset. 3980 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3981 !Subtarget->isPICStyleRIPRel()) { 3982 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3983 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3984 Result); 3985 } 3986 3987 return Result; 3988} 3989 3990SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 3991 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3992 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 3993 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3994 // With PIC, the address is actually $g + Offset. 3995 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3996 !Subtarget->isPICStyleRIPRel()) { 3997 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3998 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3999 Result); 4000 } 4001 4002 return Result; 4003} 4004 4005/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and 4006/// take a 2 x i32 value to shift plus a shift amount. 4007SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 4008 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 4009 "Not an i64 shift!"); 4010 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 4011 SDOperand ShOpLo = Op.getOperand(0); 4012 SDOperand ShOpHi = Op.getOperand(1); 4013 SDOperand ShAmt = Op.getOperand(2); 4014 SDOperand Tmp1 = isSRA ? 4015 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 4016 DAG.getConstant(0, MVT::i32); 4017 4018 SDOperand Tmp2, Tmp3; 4019 if (Op.getOpcode() == ISD::SHL_PARTS) { 4020 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 4021 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 4022 } else { 4023 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 4024 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 4025 } 4026 4027 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4028 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 4029 DAG.getConstant(32, MVT::i8)); 4030 SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32, 4031 AndNode, DAG.getConstant(0, MVT::i8)); 4032 4033 SDOperand Hi, Lo; 4034 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4035 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 4036 SmallVector<SDOperand, 4> Ops; 4037 if (Op.getOpcode() == ISD::SHL_PARTS) { 4038 Ops.push_back(Tmp2); 4039 Ops.push_back(Tmp3); 4040 Ops.push_back(CC); 4041 Ops.push_back(Cond); 4042 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4043 4044 Ops.clear(); 4045 Ops.push_back(Tmp3); 4046 Ops.push_back(Tmp1); 4047 Ops.push_back(CC); 4048 Ops.push_back(Cond); 4049 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4050 } else { 4051 Ops.push_back(Tmp2); 4052 Ops.push_back(Tmp3); 4053 Ops.push_back(CC); 4054 Ops.push_back(Cond); 4055 Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4056 4057 Ops.clear(); 4058 Ops.push_back(Tmp3); 4059 Ops.push_back(Tmp1); 4060 Ops.push_back(CC); 4061 Ops.push_back(Cond); 4062 Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size()); 4063 } 4064 4065 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 4066 Ops.clear(); 4067 Ops.push_back(Lo); 4068 Ops.push_back(Hi); 4069 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 4070} 4071 4072SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 4073 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 4074 Op.getOperand(0).getValueType() >= MVT::i16 && 4075 "Unknown SINT_TO_FP to lower!"); 4076 4077 SDOperand Result; 4078 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 4079 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 4080 MachineFunction &MF = DAG.getMachineFunction(); 4081 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 4082 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4083 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 4084 StackSlot, 4085 PseudoSourceValue::getFixedStack(), 4086 SSFI); 4087 4088 // These are really Legal; caller falls through into that case. 4089 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 4090 return Result; 4091 if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 4092 Subtarget->is64Bit()) 4093 return Result; 4094 4095 // Build the FILD 4096 SDVTList Tys; 4097 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 4098 if (useSSE) 4099 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 4100 else 4101 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 4102 SmallVector<SDOperand, 8> Ops; 4103 Ops.push_back(Chain); 4104 Ops.push_back(StackSlot); 4105 Ops.push_back(DAG.getValueType(SrcVT)); 4106 Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 4107 Tys, &Ops[0], Ops.size()); 4108 4109 if (useSSE) { 4110 Chain = Result.getValue(1); 4111 SDOperand InFlag = Result.getValue(2); 4112 4113 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 4114 // shouldn't be necessary except that RFP cannot be live across 4115 // multiple blocks. When stackifier is fixed, they can be uncoupled. 4116 MachineFunction &MF = DAG.getMachineFunction(); 4117 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 4118 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4119 Tys = DAG.getVTList(MVT::Other); 4120 SmallVector<SDOperand, 8> Ops; 4121 Ops.push_back(Chain); 4122 Ops.push_back(Result); 4123 Ops.push_back(StackSlot); 4124 Ops.push_back(DAG.getValueType(Op.getValueType())); 4125 Ops.push_back(InFlag); 4126 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 4127 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, 4128 PseudoSourceValue::getFixedStack(), SSFI); 4129 } 4130 4131 return Result; 4132} 4133 4134std::pair<SDOperand,SDOperand> X86TargetLowering:: 4135FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { 4136 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 4137 "Unknown FP_TO_SINT to lower!"); 4138 4139 // These are really Legal. 4140 if (Op.getValueType() == MVT::i32 && 4141 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 4142 return std::make_pair(SDOperand(), SDOperand()); 4143 if (Subtarget->is64Bit() && 4144 Op.getValueType() == MVT::i64 && 4145 Op.getOperand(0).getValueType() != MVT::f80) 4146 return std::make_pair(SDOperand(), SDOperand()); 4147 4148 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 4149 // stack slot. 4150 MachineFunction &MF = DAG.getMachineFunction(); 4151 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 4152 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4153 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4154 unsigned Opc; 4155 switch (Op.getValueType()) { 4156 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 4157 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 4158 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 4159 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 4160 } 4161 4162 SDOperand Chain = DAG.getEntryNode(); 4163 SDOperand Value = Op.getOperand(0); 4164 if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { 4165 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 4166 Chain = DAG.getStore(Chain, Value, StackSlot, 4167 PseudoSourceValue::getFixedStack(), SSFI); 4168 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 4169 SDOperand Ops[] = { 4170 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 4171 }; 4172 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 4173 Chain = Value.getValue(1); 4174 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 4175 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 4176 } 4177 4178 // Build the FP_TO_INT*_IN_MEM 4179 SDOperand Ops[] = { Chain, Value, StackSlot }; 4180 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 4181 4182 return std::make_pair(FIST, StackSlot); 4183} 4184 4185SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 4186 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); 4187 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4188 if (FIST.Val == 0) return SDOperand(); 4189 4190 // Load the result. 4191 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 4192} 4193 4194SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { 4195 std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); 4196 SDOperand FIST = Vals.first, StackSlot = Vals.second; 4197 if (FIST.Val == 0) return 0; 4198 4199 // Return an i64 load from the stack slot. 4200 SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0); 4201 4202 // Use a MERGE_VALUES node to drop the chain result value. 4203 return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val; 4204} 4205 4206SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 4207 MVT::ValueType VT = Op.getValueType(); 4208 MVT::ValueType EltVT = VT; 4209 if (MVT::isVector(VT)) 4210 EltVT = MVT::getVectorElementType(VT); 4211 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4212 std::vector<Constant*> CV; 4213 if (EltVT == MVT::f64) { 4214 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63)))); 4215 CV.push_back(C); 4216 CV.push_back(C); 4217 } else { 4218 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31)))); 4219 CV.push_back(C); 4220 CV.push_back(C); 4221 CV.push_back(C); 4222 CV.push_back(C); 4223 } 4224 Constant *C = ConstantVector::get(CV); 4225 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4226 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4227 PseudoSourceValue::getConstantPool(), 0, 4228 false, 16); 4229 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 4230} 4231 4232SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 4233 MVT::ValueType VT = Op.getValueType(); 4234 MVT::ValueType EltVT = VT; 4235 unsigned EltNum = 1; 4236 if (MVT::isVector(VT)) { 4237 EltVT = MVT::getVectorElementType(VT); 4238 EltNum = MVT::getVectorNumElements(VT); 4239 } 4240 const Type *OpNTy = MVT::getTypeForValueType(EltVT); 4241 std::vector<Constant*> CV; 4242 if (EltVT == MVT::f64) { 4243 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63))); 4244 CV.push_back(C); 4245 CV.push_back(C); 4246 } else { 4247 Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31))); 4248 CV.push_back(C); 4249 CV.push_back(C); 4250 CV.push_back(C); 4251 CV.push_back(C); 4252 } 4253 Constant *C = ConstantVector::get(CV); 4254 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4255 SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4256 PseudoSourceValue::getConstantPool(), 0, 4257 false, 16); 4258 if (MVT::isVector(VT)) { 4259 return DAG.getNode(ISD::BIT_CONVERT, VT, 4260 DAG.getNode(ISD::XOR, MVT::v2i64, 4261 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)), 4262 DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask))); 4263 } else { 4264 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 4265 } 4266} 4267 4268SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 4269 SDOperand Op0 = Op.getOperand(0); 4270 SDOperand Op1 = Op.getOperand(1); 4271 MVT::ValueType VT = Op.getValueType(); 4272 MVT::ValueType SrcVT = Op1.getValueType(); 4273 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 4274 4275 // If second operand is smaller, extend it first. 4276 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 4277 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 4278 SrcVT = VT; 4279 SrcTy = MVT::getTypeForValueType(SrcVT); 4280 } 4281 // And if it is bigger, shrink it first. 4282 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4283 Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1)); 4284 SrcVT = VT; 4285 SrcTy = MVT::getTypeForValueType(SrcVT); 4286 } 4287 4288 // At this point the operands and the result should have the same 4289 // type, and that won't be f80 since that is not custom lowered. 4290 4291 // First get the sign bit of second operand. 4292 std::vector<Constant*> CV; 4293 if (SrcVT == MVT::f64) { 4294 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63)))); 4295 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4296 } else { 4297 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31)))); 4298 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4299 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4300 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4301 } 4302 Constant *C = ConstantVector::get(CV); 4303 SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4304 SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, 4305 PseudoSourceValue::getConstantPool(), 0, 4306 false, 16); 4307 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 4308 4309 // Shift sign bit right or left if the two operands have different types. 4310 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 4311 // Op0 is MVT::f32, Op1 is MVT::f64. 4312 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 4313 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 4314 DAG.getConstant(32, MVT::i32)); 4315 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 4316 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 4317 DAG.getIntPtrConstant(0)); 4318 } 4319 4320 // Clear first operand sign bit. 4321 CV.clear(); 4322 if (VT == MVT::f64) { 4323 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63))))); 4324 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0)))); 4325 } else { 4326 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31))))); 4327 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4328 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4329 CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0)))); 4330 } 4331 C = ConstantVector::get(CV); 4332 CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); 4333 SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, 4334 PseudoSourceValue::getConstantPool(), 0, 4335 false, 16); 4336 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 4337 4338 // Or the value with the sign bit. 4339 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 4340} 4341 4342SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 4343 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 4344 SDOperand Cond; 4345 SDOperand Op0 = Op.getOperand(0); 4346 SDOperand Op1 = Op.getOperand(1); 4347 SDOperand CC = Op.getOperand(2); 4348 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4349 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 4350 unsigned X86CC; 4351 4352 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 4353 Op0, Op1, DAG)) { 4354 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4355 return DAG.getNode(X86ISD::SETCC, MVT::i8, 4356 DAG.getConstant(X86CC, MVT::i8), Cond); 4357 } 4358 4359 assert(isFP && "Illegal integer SetCC!"); 4360 4361 Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); 4362 switch (SetCCOpcode) { 4363 default: assert(false && "Illegal floating point SetCC!"); 4364 case ISD::SETOEQ: { // !PF & ZF 4365 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4366 DAG.getConstant(X86::COND_NP, MVT::i8), Cond); 4367 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4368 DAG.getConstant(X86::COND_E, MVT::i8), Cond); 4369 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 4370 } 4371 case ISD::SETUNE: { // PF | !ZF 4372 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4373 DAG.getConstant(X86::COND_P, MVT::i8), Cond); 4374 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, 4375 DAG.getConstant(X86::COND_NE, MVT::i8), Cond); 4376 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 4377 } 4378 } 4379} 4380 4381 4382SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 4383 bool addTest = true; 4384 SDOperand Cond = Op.getOperand(0); 4385 SDOperand CC; 4386 4387 if (Cond.getOpcode() == ISD::SETCC) 4388 Cond = LowerSETCC(Cond, DAG); 4389 4390 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4391 // setting operand in place of the X86ISD::SETCC. 4392 if (Cond.getOpcode() == X86ISD::SETCC) { 4393 CC = Cond.getOperand(0); 4394 4395 SDOperand Cmp = Cond.getOperand(1); 4396 unsigned Opc = Cmp.getOpcode(); 4397 MVT::ValueType VT = Op.getValueType(); 4398 4399 bool IllegalFPCMov = false; 4400 if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) && 4401 !isScalarFPTypeInSSEReg(VT)) // FPStack? 4402 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 4403 4404 if ((Opc == X86ISD::CMP || 4405 Opc == X86ISD::COMI || 4406 Opc == X86ISD::UCOMI) && !IllegalFPCMov) { 4407 Cond = Cmp; 4408 addTest = false; 4409 } 4410 } 4411 4412 if (addTest) { 4413 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4414 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4415 } 4416 4417 const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(), 4418 MVT::Flag); 4419 SmallVector<SDOperand, 4> Ops; 4420 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 4421 // condition is true. 4422 Ops.push_back(Op.getOperand(2)); 4423 Ops.push_back(Op.getOperand(1)); 4424 Ops.push_back(CC); 4425 Ops.push_back(Cond); 4426 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 4427} 4428 4429SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 4430 bool addTest = true; 4431 SDOperand Chain = Op.getOperand(0); 4432 SDOperand Cond = Op.getOperand(1); 4433 SDOperand Dest = Op.getOperand(2); 4434 SDOperand CC; 4435 4436 if (Cond.getOpcode() == ISD::SETCC) 4437 Cond = LowerSETCC(Cond, DAG); 4438 4439 // If condition flag is set by a X86ISD::CMP, then use it as the condition 4440 // setting operand in place of the X86ISD::SETCC. 4441 if (Cond.getOpcode() == X86ISD::SETCC) { 4442 CC = Cond.getOperand(0); 4443 4444 SDOperand Cmp = Cond.getOperand(1); 4445 unsigned Opc = Cmp.getOpcode(); 4446 if (Opc == X86ISD::CMP || 4447 Opc == X86ISD::COMI || 4448 Opc == X86ISD::UCOMI) { 4449 Cond = Cmp; 4450 addTest = false; 4451 } 4452 } 4453 4454 if (addTest) { 4455 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 4456 Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); 4457 } 4458 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 4459 Chain, Op.getOperand(2), CC, Cond); 4460} 4461 4462 4463// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 4464// Calls to _alloca is needed to probe the stack when allocating more than 4k 4465// bytes in one go. Touching the stack at 4K increments is necessary to ensure 4466// that the guard pages used by the OS virtual memory manager are allocated in 4467// correct sequence. 4468SDOperand 4469X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 4470 SelectionDAG &DAG) { 4471 assert(Subtarget->isTargetCygMing() && 4472 "This should be used only on Cygwin/Mingw targets"); 4473 4474 // Get the inputs. 4475 SDOperand Chain = Op.getOperand(0); 4476 SDOperand Size = Op.getOperand(1); 4477 // FIXME: Ensure alignment here 4478 4479 SDOperand Flag; 4480 4481 MVT::ValueType IntPtr = getPointerTy(); 4482 MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 4483 4484 Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag); 4485 Flag = Chain.getValue(1); 4486 4487 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 4488 SDOperand Ops[] = { Chain, 4489 DAG.getTargetExternalSymbol("_alloca", IntPtr), 4490 DAG.getRegister(X86::EAX, IntPtr), 4491 Flag }; 4492 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4); 4493 Flag = Chain.getValue(1); 4494 4495 Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); 4496 4497 std::vector<MVT::ValueType> Tys; 4498 Tys.push_back(SPTy); 4499 Tys.push_back(MVT::Other); 4500 SDOperand Ops1[2] = { Chain.getValue(0), Chain }; 4501 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2); 4502} 4503 4504SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 4505 SDOperand InFlag(0, 0); 4506 SDOperand Chain = Op.getOperand(0); 4507 unsigned Align = 4508 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 4509 if (Align == 0) Align = 1; 4510 4511 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4512 // If not DWORD aligned or size is more than the threshold, call memset. 4513 // The libc version is likely to be faster for these cases. It can use the 4514 // address value and run time information about the CPU. 4515 if ((Align & 3) != 0 || 4516 (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) { 4517 MVT::ValueType IntPtr = getPointerTy(); 4518 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 4519 TargetLowering::ArgListTy Args; 4520 TargetLowering::ArgListEntry Entry; 4521 Entry.Node = Op.getOperand(1); 4522 Entry.Ty = IntPtrTy; 4523 Args.push_back(Entry); 4524 // Extend the unsigned i8 argument to be an int value for the call. 4525 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 4526 Entry.Ty = IntPtrTy; 4527 Args.push_back(Entry); 4528 Entry.Node = Op.getOperand(3); 4529 Args.push_back(Entry); 4530 std::pair<SDOperand,SDOperand> CallResult = 4531 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 4532 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 4533 return CallResult.second; 4534 } 4535 4536 MVT::ValueType AVT; 4537 SDOperand Count; 4538 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4539 unsigned BytesLeft = 0; 4540 bool TwoRepStos = false; 4541 if (ValC) { 4542 unsigned ValReg; 4543 uint64_t Val = ValC->getValue() & 255; 4544 4545 // If the value is a constant, then we can potentially use larger sets. 4546 switch (Align & 3) { 4547 case 2: // WORD aligned 4548 AVT = MVT::i16; 4549 ValReg = X86::AX; 4550 Val = (Val << 8) | Val; 4551 break; 4552 case 0: // DWORD aligned 4553 AVT = MVT::i32; 4554 ValReg = X86::EAX; 4555 Val = (Val << 8) | Val; 4556 Val = (Val << 16) | Val; 4557 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 4558 AVT = MVT::i64; 4559 ValReg = X86::RAX; 4560 Val = (Val << 32) | Val; 4561 } 4562 break; 4563 default: // Byte aligned 4564 AVT = MVT::i8; 4565 ValReg = X86::AL; 4566 Count = Op.getOperand(3); 4567 break; 4568 } 4569 4570 if (AVT > MVT::i8) { 4571 if (I) { 4572 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4573 Count = DAG.getIntPtrConstant(I->getValue() / UBytes); 4574 BytesLeft = I->getValue() % UBytes; 4575 } else { 4576 assert(AVT >= MVT::i32 && 4577 "Do not use rep;stos if not at least DWORD aligned"); 4578 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 4579 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 4580 TwoRepStos = true; 4581 } 4582 } 4583 4584 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 4585 InFlag); 4586 InFlag = Chain.getValue(1); 4587 } else { 4588 AVT = MVT::i8; 4589 Count = Op.getOperand(3); 4590 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 4591 InFlag = Chain.getValue(1); 4592 } 4593 4594 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4595 Count, InFlag); 4596 InFlag = Chain.getValue(1); 4597 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4598 Op.getOperand(1), InFlag); 4599 InFlag = Chain.getValue(1); 4600 4601 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4602 SmallVector<SDOperand, 8> Ops; 4603 Ops.push_back(Chain); 4604 Ops.push_back(DAG.getValueType(AVT)); 4605 Ops.push_back(InFlag); 4606 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4607 4608 if (TwoRepStos) { 4609 InFlag = Chain.getValue(1); 4610 Count = Op.getOperand(3); 4611 MVT::ValueType CVT = Count.getValueType(); 4612 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 4613 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 4614 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 4615 Left, InFlag); 4616 InFlag = Chain.getValue(1); 4617 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4618 Ops.clear(); 4619 Ops.push_back(Chain); 4620 Ops.push_back(DAG.getValueType(MVT::i8)); 4621 Ops.push_back(InFlag); 4622 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 4623 } else if (BytesLeft) { 4624 // Issue stores for the last 1 - 7 bytes. 4625 SDOperand Value; 4626 unsigned Val = ValC->getValue() & 255; 4627 unsigned Offset = I->getValue() - BytesLeft; 4628 SDOperand DstAddr = Op.getOperand(1); 4629 MVT::ValueType AddrVT = DstAddr.getValueType(); 4630 if (BytesLeft >= 4) { 4631 Val = (Val << 8) | Val; 4632 Val = (Val << 16) | Val; 4633 Value = DAG.getConstant(Val, MVT::i32); 4634 Chain = DAG.getStore(Chain, Value, 4635 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4636 DAG.getConstant(Offset, AddrVT)), 4637 NULL, 0); 4638 BytesLeft -= 4; 4639 Offset += 4; 4640 } 4641 if (BytesLeft >= 2) { 4642 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 4643 Chain = DAG.getStore(Chain, Value, 4644 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4645 DAG.getConstant(Offset, AddrVT)), 4646 NULL, 0); 4647 BytesLeft -= 2; 4648 Offset += 2; 4649 } 4650 if (BytesLeft == 1) { 4651 Value = DAG.getConstant(Val, MVT::i8); 4652 Chain = DAG.getStore(Chain, Value, 4653 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 4654 DAG.getConstant(Offset, AddrVT)), 4655 NULL, 0); 4656 } 4657 } 4658 4659 return Chain; 4660} 4661 4662SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain, 4663 SDOperand Dest, 4664 SDOperand Source, 4665 unsigned Size, 4666 unsigned Align, 4667 SelectionDAG &DAG) { 4668 MVT::ValueType AVT; 4669 unsigned BytesLeft = 0; 4670 switch (Align & 3) { 4671 case 2: // WORD aligned 4672 AVT = MVT::i16; 4673 break; 4674 case 0: // DWORD aligned 4675 AVT = MVT::i32; 4676 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 4677 AVT = MVT::i64; 4678 break; 4679 default: // Byte aligned 4680 AVT = MVT::i8; 4681 break; 4682 } 4683 4684 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 4685 SDOperand Count = DAG.getIntPtrConstant(Size / UBytes); 4686 BytesLeft = Size % UBytes; 4687 4688 SDOperand InFlag(0, 0); 4689 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 4690 Count, InFlag); 4691 InFlag = Chain.getValue(1); 4692 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 4693 Dest, InFlag); 4694 InFlag = Chain.getValue(1); 4695 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 4696 Source, InFlag); 4697 InFlag = Chain.getValue(1); 4698 4699 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4700 SmallVector<SDOperand, 8> Ops; 4701 Ops.push_back(Chain); 4702 Ops.push_back(DAG.getValueType(AVT)); 4703 Ops.push_back(InFlag); 4704 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 4705 4706 if (BytesLeft) { 4707 // Issue loads and stores for the last 1 - 7 bytes. 4708 unsigned Offset = Size - BytesLeft; 4709 SDOperand DstAddr = Dest; 4710 MVT::ValueType DstVT = DstAddr.getValueType(); 4711 SDOperand SrcAddr = Source; 4712 MVT::ValueType SrcVT = SrcAddr.getValueType(); 4713 SDOperand Value; 4714 if (BytesLeft >= 4) { 4715 Value = DAG.getLoad(MVT::i32, Chain, 4716 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4717 DAG.getConstant(Offset, SrcVT)), 4718 NULL, 0); 4719 Chain = Value.getValue(1); 4720 Chain = DAG.getStore(Chain, Value, 4721 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4722 DAG.getConstant(Offset, DstVT)), 4723 NULL, 0); 4724 BytesLeft -= 4; 4725 Offset += 4; 4726 } 4727 if (BytesLeft >= 2) { 4728 Value = DAG.getLoad(MVT::i16, Chain, 4729 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4730 DAG.getConstant(Offset, SrcVT)), 4731 NULL, 0); 4732 Chain = Value.getValue(1); 4733 Chain = DAG.getStore(Chain, Value, 4734 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4735 DAG.getConstant(Offset, DstVT)), 4736 NULL, 0); 4737 BytesLeft -= 2; 4738 Offset += 2; 4739 } 4740 4741 if (BytesLeft == 1) { 4742 Value = DAG.getLoad(MVT::i8, Chain, 4743 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 4744 DAG.getConstant(Offset, SrcVT)), 4745 NULL, 0); 4746 Chain = Value.getValue(1); 4747 Chain = DAG.getStore(Chain, Value, 4748 DAG.getNode(ISD::ADD, DstVT, DstAddr, 4749 DAG.getConstant(Offset, DstVT)), 4750 NULL, 0); 4751 } 4752 } 4753 4754 return Chain; 4755} 4756 4757/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain 4758SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ 4759 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 4760 SDOperand TheChain = N->getOperand(0); 4761 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); 4762 if (Subtarget->is64Bit()) { 4763 SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 4764 SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, 4765 MVT::i64, rax.getValue(2)); 4766 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, 4767 DAG.getConstant(32, MVT::i8)); 4768 SDOperand Ops[] = { 4769 DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) 4770 }; 4771 4772 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4773 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4774 } 4775 4776 SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 4777 SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, 4778 MVT::i32, eax.getValue(2)); 4779 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 4780 SDOperand Ops[] = { eax, edx }; 4781 Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); 4782 4783 // Use a MERGE_VALUES to return the value and chain. 4784 Ops[1] = edx.getValue(1); 4785 Tys = DAG.getVTList(MVT::i64, MVT::Other); 4786 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val; 4787} 4788 4789SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 4790 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4791 4792 if (!Subtarget->is64Bit()) { 4793 // vastart just stores the address of the VarArgsFrameIndex slot into the 4794 // memory location argument. 4795 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4796 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); 4797 } 4798 4799 // __va_list_tag: 4800 // gp_offset (0 - 6 * 8) 4801 // fp_offset (48 - 48 + 8 * 16) 4802 // overflow_arg_area (point to parameters coming in memory). 4803 // reg_save_area 4804 SmallVector<SDOperand, 8> MemOps; 4805 SDOperand FIN = Op.getOperand(1); 4806 // Store gp_offset 4807 SDOperand Store = DAG.getStore(Op.getOperand(0), 4808 DAG.getConstant(VarArgsGPOffset, MVT::i32), 4809 FIN, SV, 0); 4810 MemOps.push_back(Store); 4811 4812 // Store fp_offset 4813 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4814 Store = DAG.getStore(Op.getOperand(0), 4815 DAG.getConstant(VarArgsFPOffset, MVT::i32), 4816 FIN, SV, 0); 4817 MemOps.push_back(Store); 4818 4819 // Store ptr to overflow_arg_area 4820 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); 4821 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 4822 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); 4823 MemOps.push_back(Store); 4824 4825 // Store ptr to reg_save_area. 4826 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); 4827 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 4828 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); 4829 MemOps.push_back(Store); 4830 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 4831} 4832 4833SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 4834 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 4835 SDOperand Chain = Op.getOperand(0); 4836 SDOperand DstPtr = Op.getOperand(1); 4837 SDOperand SrcPtr = Op.getOperand(2); 4838 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4839 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4840 4841 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, SrcSV, 0); 4842 Chain = SrcPtr.getValue(1); 4843 for (unsigned i = 0; i < 3; ++i) { 4844 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, SrcSV, 0); 4845 Chain = Val.getValue(1); 4846 Chain = DAG.getStore(Chain, Val, DstPtr, DstSV, 0); 4847 if (i == 2) 4848 break; 4849 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 4850 DAG.getIntPtrConstant(8)); 4851 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 4852 DAG.getIntPtrConstant(8)); 4853 } 4854 return Chain; 4855} 4856 4857SDOperand 4858X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4859 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4860 switch (IntNo) { 4861 default: return SDOperand(); // Don't custom lower most intrinsics. 4862 // Comparison intrinsics. 4863 case Intrinsic::x86_sse_comieq_ss: 4864 case Intrinsic::x86_sse_comilt_ss: 4865 case Intrinsic::x86_sse_comile_ss: 4866 case Intrinsic::x86_sse_comigt_ss: 4867 case Intrinsic::x86_sse_comige_ss: 4868 case Intrinsic::x86_sse_comineq_ss: 4869 case Intrinsic::x86_sse_ucomieq_ss: 4870 case Intrinsic::x86_sse_ucomilt_ss: 4871 case Intrinsic::x86_sse_ucomile_ss: 4872 case Intrinsic::x86_sse_ucomigt_ss: 4873 case Intrinsic::x86_sse_ucomige_ss: 4874 case Intrinsic::x86_sse_ucomineq_ss: 4875 case Intrinsic::x86_sse2_comieq_sd: 4876 case Intrinsic::x86_sse2_comilt_sd: 4877 case Intrinsic::x86_sse2_comile_sd: 4878 case Intrinsic::x86_sse2_comigt_sd: 4879 case Intrinsic::x86_sse2_comige_sd: 4880 case Intrinsic::x86_sse2_comineq_sd: 4881 case Intrinsic::x86_sse2_ucomieq_sd: 4882 case Intrinsic::x86_sse2_ucomilt_sd: 4883 case Intrinsic::x86_sse2_ucomile_sd: 4884 case Intrinsic::x86_sse2_ucomigt_sd: 4885 case Intrinsic::x86_sse2_ucomige_sd: 4886 case Intrinsic::x86_sse2_ucomineq_sd: { 4887 unsigned Opc = 0; 4888 ISD::CondCode CC = ISD::SETCC_INVALID; 4889 switch (IntNo) { 4890 default: break; 4891 case Intrinsic::x86_sse_comieq_ss: 4892 case Intrinsic::x86_sse2_comieq_sd: 4893 Opc = X86ISD::COMI; 4894 CC = ISD::SETEQ; 4895 break; 4896 case Intrinsic::x86_sse_comilt_ss: 4897 case Intrinsic::x86_sse2_comilt_sd: 4898 Opc = X86ISD::COMI; 4899 CC = ISD::SETLT; 4900 break; 4901 case Intrinsic::x86_sse_comile_ss: 4902 case Intrinsic::x86_sse2_comile_sd: 4903 Opc = X86ISD::COMI; 4904 CC = ISD::SETLE; 4905 break; 4906 case Intrinsic::x86_sse_comigt_ss: 4907 case Intrinsic::x86_sse2_comigt_sd: 4908 Opc = X86ISD::COMI; 4909 CC = ISD::SETGT; 4910 break; 4911 case Intrinsic::x86_sse_comige_ss: 4912 case Intrinsic::x86_sse2_comige_sd: 4913 Opc = X86ISD::COMI; 4914 CC = ISD::SETGE; 4915 break; 4916 case Intrinsic::x86_sse_comineq_ss: 4917 case Intrinsic::x86_sse2_comineq_sd: 4918 Opc = X86ISD::COMI; 4919 CC = ISD::SETNE; 4920 break; 4921 case Intrinsic::x86_sse_ucomieq_ss: 4922 case Intrinsic::x86_sse2_ucomieq_sd: 4923 Opc = X86ISD::UCOMI; 4924 CC = ISD::SETEQ; 4925 break; 4926 case Intrinsic::x86_sse_ucomilt_ss: 4927 case Intrinsic::x86_sse2_ucomilt_sd: 4928 Opc = X86ISD::UCOMI; 4929 CC = ISD::SETLT; 4930 break; 4931 case Intrinsic::x86_sse_ucomile_ss: 4932 case Intrinsic::x86_sse2_ucomile_sd: 4933 Opc = X86ISD::UCOMI; 4934 CC = ISD::SETLE; 4935 break; 4936 case Intrinsic::x86_sse_ucomigt_ss: 4937 case Intrinsic::x86_sse2_ucomigt_sd: 4938 Opc = X86ISD::UCOMI; 4939 CC = ISD::SETGT; 4940 break; 4941 case Intrinsic::x86_sse_ucomige_ss: 4942 case Intrinsic::x86_sse2_ucomige_sd: 4943 Opc = X86ISD::UCOMI; 4944 CC = ISD::SETGE; 4945 break; 4946 case Intrinsic::x86_sse_ucomineq_ss: 4947 case Intrinsic::x86_sse2_ucomineq_sd: 4948 Opc = X86ISD::UCOMI; 4949 CC = ISD::SETNE; 4950 break; 4951 } 4952 4953 unsigned X86CC; 4954 SDOperand LHS = Op.getOperand(1); 4955 SDOperand RHS = Op.getOperand(2); 4956 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4957 4958 SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); 4959 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, 4960 DAG.getConstant(X86CC, MVT::i8), Cond); 4961 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4962 } 4963 } 4964} 4965 4966SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4967 // Depths > 0 not supported yet! 4968 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4969 return SDOperand(); 4970 4971 // Just load the return address 4972 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4973 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4974} 4975 4976SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4977 // Depths > 0 not supported yet! 4978 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4979 return SDOperand(); 4980 4981 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4982 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 4983 DAG.getIntPtrConstant(4)); 4984} 4985 4986SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, 4987 SelectionDAG &DAG) { 4988 // Is not yet supported on x86-64 4989 if (Subtarget->is64Bit()) 4990 return SDOperand(); 4991 4992 return DAG.getIntPtrConstant(8); 4993} 4994 4995SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) 4996{ 4997 assert(!Subtarget->is64Bit() && 4998 "Lowering of eh_return builtin is not supported yet on x86-64"); 4999 5000 MachineFunction &MF = DAG.getMachineFunction(); 5001 SDOperand Chain = Op.getOperand(0); 5002 SDOperand Offset = Op.getOperand(1); 5003 SDOperand Handler = Op.getOperand(2); 5004 5005 SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), 5006 getPointerTy()); 5007 5008 SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, 5009 DAG.getIntPtrConstant(-4UL)); 5010 StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); 5011 Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); 5012 Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr); 5013 MF.getRegInfo().addLiveOut(X86::ECX); 5014 5015 return DAG.getNode(X86ISD::EH_RETURN, MVT::Other, 5016 Chain, DAG.getRegister(X86::ECX, getPointerTy())); 5017} 5018 5019SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, 5020 SelectionDAG &DAG) { 5021 SDOperand Root = Op.getOperand(0); 5022 SDOperand Trmp = Op.getOperand(1); // trampoline 5023 SDOperand FPtr = Op.getOperand(2); // nested function 5024 SDOperand Nest = Op.getOperand(3); // 'nest' parameter value 5025 5026 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 5027 5028 const X86InstrInfo *TII = 5029 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 5030 5031 if (Subtarget->is64Bit()) { 5032 SDOperand OutChains[6]; 5033 5034 // Large code-model. 5035 5036 const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r); 5037 const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri); 5038 5039 const unsigned char N86R10 = 5040 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10); 5041 const unsigned char N86R11 = 5042 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11); 5043 5044 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 5045 5046 // Load the pointer to the nested function into R11. 5047 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 5048 SDOperand Addr = Trmp; 5049 OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5050 TrmpAddr, 0); 5051 5052 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64)); 5053 OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2); 5054 5055 // Load the 'nest' parameter value into R10. 5056 // R10 is specified in X86CallingConv.td 5057 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 5058 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64)); 5059 OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5060 TrmpAddr, 10); 5061 5062 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64)); 5063 OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2); 5064 5065 // Jump to the nested function. 5066 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 5067 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64)); 5068 OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, 5069 TrmpAddr, 20); 5070 5071 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 5072 Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64)); 5073 OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, 5074 TrmpAddr, 22); 5075 5076 SDOperand Ops[] = 5077 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; 5078 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5079 } else { 5080 const Function *Func = 5081 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 5082 unsigned CC = Func->getCallingConv(); 5083 unsigned NestReg; 5084 5085 switch (CC) { 5086 default: 5087 assert(0 && "Unsupported calling convention"); 5088 case CallingConv::C: 5089 case CallingConv::X86_StdCall: { 5090 // Pass 'nest' parameter in ECX. 5091 // Must be kept in sync with X86CallingConv.td 5092 NestReg = X86::ECX; 5093 5094 // Check that ECX wasn't needed by an 'inreg' parameter. 5095 const FunctionType *FTy = Func->getFunctionType(); 5096 const ParamAttrsList *Attrs = Func->getParamAttrs(); 5097 5098 if (Attrs && !Func->isVarArg()) { 5099 unsigned InRegCount = 0; 5100 unsigned Idx = 1; 5101 5102 for (FunctionType::param_iterator I = FTy->param_begin(), 5103 E = FTy->param_end(); I != E; ++I, ++Idx) 5104 if (Attrs->paramHasAttr(Idx, ParamAttr::InReg)) 5105 // FIXME: should only count parameters that are lowered to integers. 5106 InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32; 5107 5108 if (InRegCount > 2) { 5109 cerr << "Nest register in use - reduce number of inreg parameters!\n"; 5110 abort(); 5111 } 5112 } 5113 break; 5114 } 5115 case CallingConv::X86_FastCall: 5116 // Pass 'nest' parameter in EAX. 5117 // Must be kept in sync with X86CallingConv.td 5118 NestReg = X86::EAX; 5119 break; 5120 } 5121 5122 SDOperand OutChains[4]; 5123 SDOperand Addr, Disp; 5124 5125 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); 5126 Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); 5127 5128 const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri); 5129 const unsigned char N86Reg = 5130 ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg); 5131 OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 5132 Trmp, TrmpAddr, 0); 5133 5134 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32)); 5135 OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1); 5136 5137 const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP); 5138 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32)); 5139 OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr, 5140 TrmpAddr, 5, false, 1); 5141 5142 Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); 5143 OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); 5144 5145 SDOperand Ops[] = 5146 { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; 5147 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2); 5148 } 5149} 5150 5151SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 5152 /* 5153 The rounding mode is in bits 11:10 of FPSR, and has the following 5154 settings: 5155 00 Round to nearest 5156 01 Round to -inf 5157 10 Round to +inf 5158 11 Round to 0 5159 5160 FLT_ROUNDS, on the other hand, expects the following: 5161 -1 Undefined 5162 0 Round to 0 5163 1 Round to nearest 5164 2 Round to +inf 5165 3 Round to -inf 5166 5167 To perform the conversion, we do: 5168 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 5169 */ 5170 5171 MachineFunction &MF = DAG.getMachineFunction(); 5172 const TargetMachine &TM = MF.getTarget(); 5173 const TargetFrameInfo &TFI = *TM.getFrameInfo(); 5174 unsigned StackAlignment = TFI.getStackAlignment(); 5175 MVT::ValueType VT = Op.getValueType(); 5176 5177 // Save FP Control Word to stack slot 5178 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); 5179 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 5180 5181 SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, 5182 DAG.getEntryNode(), StackSlot); 5183 5184 // Load FP Control Word from stack slot 5185 SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); 5186 5187 // Transform as necessary 5188 SDOperand CWD1 = 5189 DAG.getNode(ISD::SRL, MVT::i16, 5190 DAG.getNode(ISD::AND, MVT::i16, 5191 CWD, DAG.getConstant(0x800, MVT::i16)), 5192 DAG.getConstant(11, MVT::i8)); 5193 SDOperand CWD2 = 5194 DAG.getNode(ISD::SRL, MVT::i16, 5195 DAG.getNode(ISD::AND, MVT::i16, 5196 CWD, DAG.getConstant(0x400, MVT::i16)), 5197 DAG.getConstant(9, MVT::i8)); 5198 5199 SDOperand RetVal = 5200 DAG.getNode(ISD::AND, MVT::i16, 5201 DAG.getNode(ISD::ADD, MVT::i16, 5202 DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), 5203 DAG.getConstant(1, MVT::i16)), 5204 DAG.getConstant(3, MVT::i16)); 5205 5206 5207 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 5208 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 5209} 5210 5211SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { 5212 MVT::ValueType VT = Op.getValueType(); 5213 MVT::ValueType OpVT = VT; 5214 unsigned NumBits = MVT::getSizeInBits(VT); 5215 5216 Op = Op.getOperand(0); 5217 if (VT == MVT::i8) { 5218 // Zero extend to i32 since there is not an i8 bsr. 5219 OpVT = MVT::i32; 5220 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5221 } 5222 5223 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 5224 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5225 Op = DAG.getNode(X86ISD::BSR, VTs, Op); 5226 5227 // If src is zero (i.e. bsr sets ZF), returns NumBits. 5228 SmallVector<SDOperand, 4> Ops; 5229 Ops.push_back(Op); 5230 Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); 5231 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5232 Ops.push_back(Op.getValue(1)); 5233 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5234 5235 // Finally xor with NumBits-1. 5236 Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 5237 5238 if (VT == MVT::i8) 5239 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5240 return Op; 5241} 5242 5243SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { 5244 MVT::ValueType VT = Op.getValueType(); 5245 MVT::ValueType OpVT = VT; 5246 unsigned NumBits = MVT::getSizeInBits(VT); 5247 5248 Op = Op.getOperand(0); 5249 if (VT == MVT::i8) { 5250 OpVT = MVT::i32; 5251 Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op); 5252 } 5253 5254 // Issue a bsf (scan bits forward) which also sets EFLAGS. 5255 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 5256 Op = DAG.getNode(X86ISD::BSF, VTs, Op); 5257 5258 // If src is zero (i.e. bsf sets ZF), returns NumBits. 5259 SmallVector<SDOperand, 4> Ops; 5260 Ops.push_back(Op); 5261 Ops.push_back(DAG.getConstant(NumBits, OpVT)); 5262 Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); 5263 Ops.push_back(Op.getValue(1)); 5264 Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4); 5265 5266 if (VT == MVT::i8) 5267 Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op); 5268 return Op; 5269} 5270 5271/// LowerOperation - Provide custom lowering hooks for some operations. 5272/// 5273SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 5274 switch (Op.getOpcode()) { 5275 default: assert(0 && "Should not custom lower this!"); 5276 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5277 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5278 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5279 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5280 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5281 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5282 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5283 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5284 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 5285 case ISD::SHL_PARTS: 5286 case ISD::SRA_PARTS: 5287 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 5288 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 5289 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 5290 case ISD::FABS: return LowerFABS(Op, DAG); 5291 case ISD::FNEG: return LowerFNEG(Op, DAG); 5292 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5293 case ISD::SETCC: return LowerSETCC(Op, DAG); 5294 case ISD::SELECT: return LowerSELECT(Op, DAG); 5295 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 5296 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5297 case ISD::CALL: return LowerCALL(Op, DAG); 5298 case ISD::RET: return LowerRET(Op, DAG); 5299 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 5300 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 5301 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 5302 case ISD::VASTART: return LowerVASTART(Op, DAG); 5303 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 5304 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5305 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5306 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5307 case ISD::FRAME_TO_ARGS_OFFSET: 5308 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 5309 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 5310 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 5311 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 5312 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5313 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 5314 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 5315 5316 // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. 5317 case ISD::READCYCLECOUNTER: 5318 return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); 5319 } 5320} 5321 5322/// ExpandOperation - Provide custom lowering hooks for expanding operations. 5323SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 5324 switch (N->getOpcode()) { 5325 default: assert(0 && "Should not custom lower this!"); 5326 case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); 5327 case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); 5328 } 5329} 5330 5331const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 5332 switch (Opcode) { 5333 default: return NULL; 5334 case X86ISD::BSF: return "X86ISD::BSF"; 5335 case X86ISD::BSR: return "X86ISD::BSR"; 5336 case X86ISD::SHLD: return "X86ISD::SHLD"; 5337 case X86ISD::SHRD: return "X86ISD::SHRD"; 5338 case X86ISD::FAND: return "X86ISD::FAND"; 5339 case X86ISD::FOR: return "X86ISD::FOR"; 5340 case X86ISD::FXOR: return "X86ISD::FXOR"; 5341 case X86ISD::FSRL: return "X86ISD::FSRL"; 5342 case X86ISD::FILD: return "X86ISD::FILD"; 5343 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 5344 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 5345 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 5346 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 5347 case X86ISD::FLD: return "X86ISD::FLD"; 5348 case X86ISD::FST: return "X86ISD::FST"; 5349 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 5350 case X86ISD::FP_GET_RESULT2: return "X86ISD::FP_GET_RESULT2"; 5351 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 5352 case X86ISD::CALL: return "X86ISD::CALL"; 5353 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 5354 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 5355 case X86ISD::CMP: return "X86ISD::CMP"; 5356 case X86ISD::COMI: return "X86ISD::COMI"; 5357 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 5358 case X86ISD::SETCC: return "X86ISD::SETCC"; 5359 case X86ISD::CMOV: return "X86ISD::CMOV"; 5360 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 5361 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 5362 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 5363 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 5364 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 5365 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 5366 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 5367 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 5368 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 5369 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 5370 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 5371 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 5372 case X86ISD::FMAX: return "X86ISD::FMAX"; 5373 case X86ISD::FMIN: return "X86ISD::FMIN"; 5374 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 5375 case X86ISD::FRCP: return "X86ISD::FRCP"; 5376 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 5377 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 5378 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 5379 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 5380 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 5381 } 5382} 5383 5384// isLegalAddressingMode - Return true if the addressing mode represented 5385// by AM is legal for this target, for a load/store of the specified type. 5386bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 5387 const Type *Ty) const { 5388 // X86 supports extremely general addressing modes. 5389 5390 // X86 allows a sign-extended 32-bit immediate field as a displacement. 5391 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 5392 return false; 5393 5394 if (AM.BaseGV) { 5395 // We can only fold this if we don't need an extra load. 5396 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 5397 return false; 5398 5399 // X86-64 only supports addr of globals in small code model. 5400 if (Subtarget->is64Bit()) { 5401 if (getTargetMachine().getCodeModel() != CodeModel::Small) 5402 return false; 5403 // If lower 4G is not available, then we must use rip-relative addressing. 5404 if (AM.BaseOffs || AM.Scale > 1) 5405 return false; 5406 } 5407 } 5408 5409 switch (AM.Scale) { 5410 case 0: 5411 case 1: 5412 case 2: 5413 case 4: 5414 case 8: 5415 // These scales always work. 5416 break; 5417 case 3: 5418 case 5: 5419 case 9: 5420 // These scales are formed with basereg+scalereg. Only accept if there is 5421 // no basereg yet. 5422 if (AM.HasBaseReg) 5423 return false; 5424 break; 5425 default: // Other stuff never works. 5426 return false; 5427 } 5428 5429 return true; 5430} 5431 5432 5433bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 5434 if (!Ty1->isInteger() || !Ty2->isInteger()) 5435 return false; 5436 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 5437 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 5438 if (NumBits1 <= NumBits2) 5439 return false; 5440 return Subtarget->is64Bit() || NumBits1 < 64; 5441} 5442 5443bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1, 5444 MVT::ValueType VT2) const { 5445 if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2)) 5446 return false; 5447 unsigned NumBits1 = MVT::getSizeInBits(VT1); 5448 unsigned NumBits2 = MVT::getSizeInBits(VT2); 5449 if (NumBits1 <= NumBits2) 5450 return false; 5451 return Subtarget->is64Bit() || NumBits1 < 64; 5452} 5453 5454/// isShuffleMaskLegal - Targets can use this to indicate that they only 5455/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5456/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5457/// are assumed to be legal. 5458bool 5459X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 5460 // Only do shuffles on 128-bit vector types for now. 5461 if (MVT::getSizeInBits(VT) == 64) return false; 5462 return (Mask.Val->getNumOperands() <= 4 || 5463 isIdentityMask(Mask.Val) || 5464 isIdentityMask(Mask.Val, true) || 5465 isSplatMask(Mask.Val) || 5466 isPSHUFHW_PSHUFLWMask(Mask.Val) || 5467 X86::isUNPCKLMask(Mask.Val) || 5468 X86::isUNPCKHMask(Mask.Val) || 5469 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 5470 X86::isUNPCKH_v_undef_Mask(Mask.Val)); 5471} 5472 5473bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 5474 MVT::ValueType EVT, 5475 SelectionDAG &DAG) const { 5476 unsigned NumElts = BVOps.size(); 5477 // Only do shuffles on 128-bit vector types for now. 5478 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 5479 if (NumElts == 2) return true; 5480 if (NumElts == 4) { 5481 return (isMOVLMask(&BVOps[0], 4) || 5482 isCommutedMOVL(&BVOps[0], 4, true) || 5483 isSHUFPMask(&BVOps[0], 4) || 5484 isCommutedSHUFP(&BVOps[0], 4)); 5485 } 5486 return false; 5487} 5488 5489//===----------------------------------------------------------------------===// 5490// X86 Scheduler Hooks 5491//===----------------------------------------------------------------------===// 5492 5493MachineBasicBlock * 5494X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5495 MachineBasicBlock *BB) { 5496 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5497 switch (MI->getOpcode()) { 5498 default: assert(false && "Unexpected instr type to insert"); 5499 case X86::CMOV_FR32: 5500 case X86::CMOV_FR64: 5501 case X86::CMOV_V4F32: 5502 case X86::CMOV_V2F64: 5503 case X86::CMOV_V2I64: { 5504 // To "insert" a SELECT_CC instruction, we actually have to insert the 5505 // diamond control-flow pattern. The incoming instruction knows the 5506 // destination vreg to set, the condition code register to branch on, the 5507 // true/false values to select between, and a branch opcode to use. 5508 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5509 ilist<MachineBasicBlock>::iterator It = BB; 5510 ++It; 5511 5512 // thisMBB: 5513 // ... 5514 // TrueVal = ... 5515 // cmpTY ccX, r1, r2 5516 // bCC copy1MBB 5517 // fallthrough --> copy0MBB 5518 MachineBasicBlock *thisMBB = BB; 5519 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 5520 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 5521 unsigned Opc = 5522 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 5523 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 5524 MachineFunction *F = BB->getParent(); 5525 F->getBasicBlockList().insert(It, copy0MBB); 5526 F->getBasicBlockList().insert(It, sinkMBB); 5527 // Update machine-CFG edges by first adding all successors of the current 5528 // block to the new block which will contain the Phi node for the select. 5529 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 5530 e = BB->succ_end(); i != e; ++i) 5531 sinkMBB->addSuccessor(*i); 5532 // Next, remove all successors of the current block, and add the true 5533 // and fallthrough blocks as its successors. 5534 while(!BB->succ_empty()) 5535 BB->removeSuccessor(BB->succ_begin()); 5536 BB->addSuccessor(copy0MBB); 5537 BB->addSuccessor(sinkMBB); 5538 5539 // copy0MBB: 5540 // %FalseValue = ... 5541 // # fallthrough to sinkMBB 5542 BB = copy0MBB; 5543 5544 // Update machine-CFG edges 5545 BB->addSuccessor(sinkMBB); 5546 5547 // sinkMBB: 5548 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5549 // ... 5550 BB = sinkMBB; 5551 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 5552 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5553 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5554 5555 delete MI; // The pseudo instruction is gone now. 5556 return BB; 5557 } 5558 5559 case X86::FP32_TO_INT16_IN_MEM: 5560 case X86::FP32_TO_INT32_IN_MEM: 5561 case X86::FP32_TO_INT64_IN_MEM: 5562 case X86::FP64_TO_INT16_IN_MEM: 5563 case X86::FP64_TO_INT32_IN_MEM: 5564 case X86::FP64_TO_INT64_IN_MEM: 5565 case X86::FP80_TO_INT16_IN_MEM: 5566 case X86::FP80_TO_INT32_IN_MEM: 5567 case X86::FP80_TO_INT64_IN_MEM: { 5568 // Change the floating point control register to use "round towards zero" 5569 // mode when truncating to an integer value. 5570 MachineFunction *F = BB->getParent(); 5571 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 5572 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 5573 5574 // Load the old value of the high byte of the control word... 5575 unsigned OldCW = 5576 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 5577 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 5578 5579 // Set the high part to be round to zero... 5580 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 5581 .addImm(0xC7F); 5582 5583 // Reload the modified control word now... 5584 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5585 5586 // Restore the memory image of control word to original value 5587 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 5588 .addReg(OldCW); 5589 5590 // Get the X86 opcode to use. 5591 unsigned Opc; 5592 switch (MI->getOpcode()) { 5593 default: assert(0 && "illegal opcode!"); 5594 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 5595 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 5596 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 5597 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 5598 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 5599 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 5600 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 5601 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 5602 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 5603 } 5604 5605 X86AddressMode AM; 5606 MachineOperand &Op = MI->getOperand(0); 5607 if (Op.isRegister()) { 5608 AM.BaseType = X86AddressMode::RegBase; 5609 AM.Base.Reg = Op.getReg(); 5610 } else { 5611 AM.BaseType = X86AddressMode::FrameIndexBase; 5612 AM.Base.FrameIndex = Op.getIndex(); 5613 } 5614 Op = MI->getOperand(1); 5615 if (Op.isImmediate()) 5616 AM.Scale = Op.getImm(); 5617 Op = MI->getOperand(2); 5618 if (Op.isImmediate()) 5619 AM.IndexReg = Op.getImm(); 5620 Op = MI->getOperand(3); 5621 if (Op.isGlobalAddress()) { 5622 AM.GV = Op.getGlobal(); 5623 } else { 5624 AM.Disp = Op.getImm(); 5625 } 5626 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 5627 .addReg(MI->getOperand(4).getReg()); 5628 5629 // Reload the original control word now. 5630 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 5631 5632 delete MI; // The pseudo instruction is gone now. 5633 return BB; 5634 } 5635 } 5636} 5637 5638//===----------------------------------------------------------------------===// 5639// X86 Optimization Hooks 5640//===----------------------------------------------------------------------===// 5641 5642void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 5643 uint64_t Mask, 5644 uint64_t &KnownZero, 5645 uint64_t &KnownOne, 5646 const SelectionDAG &DAG, 5647 unsigned Depth) const { 5648 unsigned Opc = Op.getOpcode(); 5649 assert((Opc >= ISD::BUILTIN_OP_END || 5650 Opc == ISD::INTRINSIC_WO_CHAIN || 5651 Opc == ISD::INTRINSIC_W_CHAIN || 5652 Opc == ISD::INTRINSIC_VOID) && 5653 "Should use MaskedValueIsZero if you don't know whether Op" 5654 " is a target node!"); 5655 5656 KnownZero = KnownOne = 0; // Don't know anything. 5657 switch (Opc) { 5658 default: break; 5659 case X86ISD::SETCC: 5660 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 5661 break; 5662 } 5663} 5664 5665/// getShuffleScalarElt - Returns the scalar element that will make up the ith 5666/// element of the result of the vector shuffle. 5667static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 5668 MVT::ValueType VT = N->getValueType(0); 5669 SDOperand PermMask = N->getOperand(2); 5670 unsigned NumElems = PermMask.getNumOperands(); 5671 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 5672 i %= NumElems; 5673 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5674 return (i == 0) 5675 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5676 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 5677 SDOperand Idx = PermMask.getOperand(i); 5678 if (Idx.getOpcode() == ISD::UNDEF) 5679 return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT)); 5680 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 5681 } 5682 return SDOperand(); 5683} 5684 5685/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 5686/// node is a GlobalAddress + an offset. 5687static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 5688 unsigned Opc = N->getOpcode(); 5689 if (Opc == X86ISD::Wrapper) { 5690 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 5691 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 5692 return true; 5693 } 5694 } else if (Opc == ISD::ADD) { 5695 SDOperand N1 = N->getOperand(0); 5696 SDOperand N2 = N->getOperand(1); 5697 if (isGAPlusOffset(N1.Val, GA, Offset)) { 5698 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 5699 if (V) { 5700 Offset += V->getSignExtended(); 5701 return true; 5702 } 5703 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 5704 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 5705 if (V) { 5706 Offset += V->getSignExtended(); 5707 return true; 5708 } 5709 } 5710 } 5711 return false; 5712} 5713 5714/// isConsecutiveLoad - Returns true if N is loading from an address of Base 5715/// + Dist * Size. 5716static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 5717 MachineFrameInfo *MFI) { 5718 if (N->getOperand(0).Val != Base->getOperand(0).Val) 5719 return false; 5720 5721 SDOperand Loc = N->getOperand(1); 5722 SDOperand BaseLoc = Base->getOperand(1); 5723 if (Loc.getOpcode() == ISD::FrameIndex) { 5724 if (BaseLoc.getOpcode() != ISD::FrameIndex) 5725 return false; 5726 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 5727 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 5728 int FS = MFI->getObjectSize(FI); 5729 int BFS = MFI->getObjectSize(BFI); 5730 if (FS != BFS || FS != Size) return false; 5731 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 5732 } else { 5733 GlobalValue *GV1 = NULL; 5734 GlobalValue *GV2 = NULL; 5735 int64_t Offset1 = 0; 5736 int64_t Offset2 = 0; 5737 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 5738 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 5739 if (isGA1 && isGA2 && GV1 == GV2) 5740 return Offset1 == (Offset2 + Dist*Size); 5741 } 5742 5743 return false; 5744} 5745 5746static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 5747 const X86Subtarget *Subtarget) { 5748 GlobalValue *GV; 5749 int64_t Offset = 0; 5750 if (isGAPlusOffset(Base, GV, Offset)) 5751 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 5752 // DAG combine handles the stack object case. 5753 return false; 5754} 5755 5756 5757/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 5758/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 5759/// if the load addresses are consecutive, non-overlapping, and in the right 5760/// order. 5761static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 5762 const X86Subtarget *Subtarget) { 5763 MachineFunction &MF = DAG.getMachineFunction(); 5764 MachineFrameInfo *MFI = MF.getFrameInfo(); 5765 MVT::ValueType VT = N->getValueType(0); 5766 MVT::ValueType EVT = MVT::getVectorElementType(VT); 5767 SDOperand PermMask = N->getOperand(2); 5768 int NumElems = (int)PermMask.getNumOperands(); 5769 SDNode *Base = NULL; 5770 for (int i = 0; i < NumElems; ++i) { 5771 SDOperand Idx = PermMask.getOperand(i); 5772 if (Idx.getOpcode() == ISD::UNDEF) { 5773 if (!Base) return SDOperand(); 5774 } else { 5775 SDOperand Arg = 5776 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 5777 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 5778 return SDOperand(); 5779 if (!Base) 5780 Base = Arg.Val; 5781 else if (!isConsecutiveLoad(Arg.Val, Base, 5782 i, MVT::getSizeInBits(EVT)/8,MFI)) 5783 return SDOperand(); 5784 } 5785 } 5786 5787 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 5788 LoadSDNode *LD = cast<LoadSDNode>(Base); 5789 if (isAlign16) { 5790 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5791 LD->getSrcValueOffset(), LD->isVolatile()); 5792 } else { 5793 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 5794 LD->getSrcValueOffset(), LD->isVolatile(), 5795 LD->getAlignment()); 5796 } 5797} 5798 5799/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 5800static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 5801 const X86Subtarget *Subtarget) { 5802 SDOperand Cond = N->getOperand(0); 5803 5804 // If we have SSE[12] support, try to form min/max nodes. 5805 if (Subtarget->hasSSE2() && 5806 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 5807 if (Cond.getOpcode() == ISD::SETCC) { 5808 // Get the LHS/RHS of the select. 5809 SDOperand LHS = N->getOperand(1); 5810 SDOperand RHS = N->getOperand(2); 5811 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 5812 5813 unsigned Opcode = 0; 5814 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 5815 switch (CC) { 5816 default: break; 5817 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 5818 case ISD::SETULE: 5819 case ISD::SETLE: 5820 if (!UnsafeFPMath) break; 5821 // FALL THROUGH. 5822 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 5823 case ISD::SETLT: 5824 Opcode = X86ISD::FMIN; 5825 break; 5826 5827 case ISD::SETOGT: // (X > Y) ? X : Y -> max 5828 case ISD::SETUGT: 5829 case ISD::SETGT: 5830 if (!UnsafeFPMath) break; 5831 // FALL THROUGH. 5832 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 5833 case ISD::SETGE: 5834 Opcode = X86ISD::FMAX; 5835 break; 5836 } 5837 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 5838 switch (CC) { 5839 default: break; 5840 case ISD::SETOGT: // (X > Y) ? Y : X -> min 5841 case ISD::SETUGT: 5842 case ISD::SETGT: 5843 if (!UnsafeFPMath) break; 5844 // FALL THROUGH. 5845 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 5846 case ISD::SETGE: 5847 Opcode = X86ISD::FMIN; 5848 break; 5849 5850 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 5851 case ISD::SETULE: 5852 case ISD::SETLE: 5853 if (!UnsafeFPMath) break; 5854 // FALL THROUGH. 5855 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 5856 case ISD::SETLT: 5857 Opcode = X86ISD::FMAX; 5858 break; 5859 } 5860 } 5861 5862 if (Opcode) 5863 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 5864 } 5865 5866 } 5867 5868 return SDOperand(); 5869} 5870 5871/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 5872/// X86ISD::FXOR nodes. 5873static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 5874 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 5875 // F[X]OR(0.0, x) -> x 5876 // F[X]OR(x, 0.0) -> x 5877 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 5878 if (C->getValueAPF().isPosZero()) 5879 return N->getOperand(1); 5880 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 5881 if (C->getValueAPF().isPosZero()) 5882 return N->getOperand(0); 5883 return SDOperand(); 5884} 5885 5886/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 5887static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 5888 // FAND(0.0, x) -> 0.0 5889 // FAND(x, 0.0) -> 0.0 5890 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 5891 if (C->getValueAPF().isPosZero()) 5892 return N->getOperand(0); 5893 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 5894 if (C->getValueAPF().isPosZero()) 5895 return N->getOperand(1); 5896 return SDOperand(); 5897} 5898 5899 5900SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 5901 DAGCombinerInfo &DCI) const { 5902 SelectionDAG &DAG = DCI.DAG; 5903 switch (N->getOpcode()) { 5904 default: break; 5905 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget); 5906 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 5907 case X86ISD::FXOR: 5908 case X86ISD::FOR: return PerformFORCombine(N, DAG); 5909 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 5910 } 5911 5912 return SDOperand(); 5913} 5914 5915//===----------------------------------------------------------------------===// 5916// X86 Inline Assembly Support 5917//===----------------------------------------------------------------------===// 5918 5919/// getConstraintType - Given a constraint letter, return the type of 5920/// constraint it is for this target. 5921X86TargetLowering::ConstraintType 5922X86TargetLowering::getConstraintType(const std::string &Constraint) const { 5923 if (Constraint.size() == 1) { 5924 switch (Constraint[0]) { 5925 case 'A': 5926 case 'r': 5927 case 'R': 5928 case 'l': 5929 case 'q': 5930 case 'Q': 5931 case 'x': 5932 case 'Y': 5933 return C_RegisterClass; 5934 default: 5935 break; 5936 } 5937 } 5938 return TargetLowering::getConstraintType(Constraint); 5939} 5940 5941/// LowerXConstraint - try to replace an X constraint, which matches anything, 5942/// with another that has more specific requirements based on the type of the 5943/// corresponding operand. 5944void X86TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 5945 std::string& s) const { 5946 if (MVT::isFloatingPoint(ConstraintVT)) { 5947 if (Subtarget->hasSSE2()) 5948 s = "Y"; 5949 else if (Subtarget->hasSSE1()) 5950 s = "x"; 5951 else 5952 s = "f"; 5953 } else 5954 return TargetLowering::lowerXConstraint(ConstraintVT, s); 5955} 5956 5957/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5958/// vector. If it is invalid, don't add anything to Ops. 5959void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 5960 char Constraint, 5961 std::vector<SDOperand>&Ops, 5962 SelectionDAG &DAG) { 5963 SDOperand Result(0, 0); 5964 5965 switch (Constraint) { 5966 default: break; 5967 case 'I': 5968 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5969 if (C->getValue() <= 31) { 5970 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5971 break; 5972 } 5973 } 5974 return; 5975 case 'N': 5976 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 5977 if (C->getValue() <= 255) { 5978 Result = DAG.getTargetConstant(C->getValue(), Op.getValueType()); 5979 break; 5980 } 5981 } 5982 return; 5983 case 'i': { 5984 // Literal immediates are always ok. 5985 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 5986 Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 5987 break; 5988 } 5989 5990 // If we are in non-pic codegen mode, we allow the address of a global (with 5991 // an optional displacement) to be used with 'i'. 5992 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 5993 int64_t Offset = 0; 5994 5995 // Match either (GA) or (GA+C) 5996 if (GA) { 5997 Offset = GA->getOffset(); 5998 } else if (Op.getOpcode() == ISD::ADD) { 5999 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6000 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6001 if (C && GA) { 6002 Offset = GA->getOffset()+C->getValue(); 6003 } else { 6004 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6005 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 6006 if (C && GA) 6007 Offset = GA->getOffset()+C->getValue(); 6008 else 6009 C = 0, GA = 0; 6010 } 6011 } 6012 6013 if (GA) { 6014 // If addressing this global requires a load (e.g. in PIC mode), we can't 6015 // match. 6016 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 6017 false)) 6018 return; 6019 6020 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 6021 Offset); 6022 Result = Op; 6023 break; 6024 } 6025 6026 // Otherwise, not valid for this mode. 6027 return; 6028 } 6029 } 6030 6031 if (Result.Val) { 6032 Ops.push_back(Result); 6033 return; 6034 } 6035 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6036} 6037 6038std::vector<unsigned> X86TargetLowering:: 6039getRegClassForInlineAsmConstraint(const std::string &Constraint, 6040 MVT::ValueType VT) const { 6041 if (Constraint.size() == 1) { 6042 // FIXME: not handling fp-stack yet! 6043 switch (Constraint[0]) { // GCC X86 Constraint Letters 6044 default: break; // Unknown constraint letter 6045 case 'A': // EAX/EDX 6046 if (VT == MVT::i32 || VT == MVT::i64) 6047 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 6048 break; 6049 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 6050 case 'Q': // Q_REGS 6051 if (VT == MVT::i32) 6052 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 6053 else if (VT == MVT::i16) 6054 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 6055 else if (VT == MVT::i8) 6056 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0); 6057 else if (VT == MVT::i64) 6058 return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0); 6059 break; 6060 } 6061 } 6062 6063 return std::vector<unsigned>(); 6064} 6065 6066std::pair<unsigned, const TargetRegisterClass*> 6067X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6068 MVT::ValueType VT) const { 6069 // First, see if this is a constraint that directly corresponds to an LLVM 6070 // register class. 6071 if (Constraint.size() == 1) { 6072 // GCC Constraint Letters 6073 switch (Constraint[0]) { 6074 default: break; 6075 case 'r': // GENERAL_REGS 6076 case 'R': // LEGACY_REGS 6077 case 'l': // INDEX_REGS 6078 if (VT == MVT::i64 && Subtarget->is64Bit()) 6079 return std::make_pair(0U, X86::GR64RegisterClass); 6080 if (VT == MVT::i32) 6081 return std::make_pair(0U, X86::GR32RegisterClass); 6082 else if (VT == MVT::i16) 6083 return std::make_pair(0U, X86::GR16RegisterClass); 6084 else if (VT == MVT::i8) 6085 return std::make_pair(0U, X86::GR8RegisterClass); 6086 break; 6087 case 'y': // MMX_REGS if MMX allowed. 6088 if (!Subtarget->hasMMX()) break; 6089 return std::make_pair(0U, X86::VR64RegisterClass); 6090 break; 6091 case 'Y': // SSE_REGS if SSE2 allowed 6092 if (!Subtarget->hasSSE2()) break; 6093 // FALL THROUGH. 6094 case 'x': // SSE_REGS if SSE1 allowed 6095 if (!Subtarget->hasSSE1()) break; 6096 6097 switch (VT) { 6098 default: break; 6099 // Scalar SSE types. 6100 case MVT::f32: 6101 case MVT::i32: 6102 return std::make_pair(0U, X86::FR32RegisterClass); 6103 case MVT::f64: 6104 case MVT::i64: 6105 return std::make_pair(0U, X86::FR64RegisterClass); 6106 // Vector types. 6107 case MVT::v16i8: 6108 case MVT::v8i16: 6109 case MVT::v4i32: 6110 case MVT::v2i64: 6111 case MVT::v4f32: 6112 case MVT::v2f64: 6113 return std::make_pair(0U, X86::VR128RegisterClass); 6114 } 6115 break; 6116 } 6117 } 6118 6119 // Use the default implementation in TargetLowering to convert the register 6120 // constraint into a member of a register class. 6121 std::pair<unsigned, const TargetRegisterClass*> Res; 6122 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6123 6124 // Not found as a standard register? 6125 if (Res.second == 0) { 6126 // GCC calls "st(0)" just plain "st". 6127 if (StringsEqualNoCase("{st}", Constraint)) { 6128 Res.first = X86::ST0; 6129 Res.second = X86::RFP80RegisterClass; 6130 } 6131 6132 return Res; 6133 } 6134 6135 // Otherwise, check to see if this is a register class of the wrong value 6136 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 6137 // turn into {ax},{dx}. 6138 if (Res.second->hasType(VT)) 6139 return Res; // Correct type already, nothing to do. 6140 6141 // All of the single-register GCC register classes map their values onto 6142 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 6143 // really want an 8-bit or 32-bit register, map to the appropriate register 6144 // class and return the appropriate register. 6145 if (Res.second != X86::GR16RegisterClass) 6146 return Res; 6147 6148 if (VT == MVT::i8) { 6149 unsigned DestReg = 0; 6150 switch (Res.first) { 6151 default: break; 6152 case X86::AX: DestReg = X86::AL; break; 6153 case X86::DX: DestReg = X86::DL; break; 6154 case X86::CX: DestReg = X86::CL; break; 6155 case X86::BX: DestReg = X86::BL; break; 6156 } 6157 if (DestReg) { 6158 Res.first = DestReg; 6159 Res.second = Res.second = X86::GR8RegisterClass; 6160 } 6161 } else if (VT == MVT::i32) { 6162 unsigned DestReg = 0; 6163 switch (Res.first) { 6164 default: break; 6165 case X86::AX: DestReg = X86::EAX; break; 6166 case X86::DX: DestReg = X86::EDX; break; 6167 case X86::CX: DestReg = X86::ECX; break; 6168 case X86::BX: DestReg = X86::EBX; break; 6169 case X86::SI: DestReg = X86::ESI; break; 6170 case X86::DI: DestReg = X86::EDI; break; 6171 case X86::BP: DestReg = X86::EBP; break; 6172 case X86::SP: DestReg = X86::ESP; break; 6173 } 6174 if (DestReg) { 6175 Res.first = DestReg; 6176 Res.second = Res.second = X86::GR32RegisterClass; 6177 } 6178 } else if (VT == MVT::i64) { 6179 unsigned DestReg = 0; 6180 switch (Res.first) { 6181 default: break; 6182 case X86::AX: DestReg = X86::RAX; break; 6183 case X86::DX: DestReg = X86::RDX; break; 6184 case X86::CX: DestReg = X86::RCX; break; 6185 case X86::BX: DestReg = X86::RBX; break; 6186 case X86::SI: DestReg = X86::RSI; break; 6187 case X86::DI: DestReg = X86::RDI; break; 6188 case X86::BP: DestReg = X86::RBP; break; 6189 case X86::SP: DestReg = X86::RSP; break; 6190 } 6191 if (DestReg) { 6192 Res.first = DestReg; 6193 Res.second = Res.second = X86::GR64RegisterClass; 6194 } 6195 } 6196 6197 return Res; 6198} 6199