X86ISelLowering.cpp revision 07c70cd86629846a2584e7311964e9f7753c94b0
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86ISelLowering.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86TargetMachine.h" 20#include "llvm/CallingConv.h" 21#include "llvm/Constants.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/ADT/VectorExtras.h" 27#include "llvm/Analysis/ScalarEvolutionExpressions.h" 28#include "llvm/CodeGen/CallingConvLower.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunction.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/SelectionDAG.h" 33#include "llvm/CodeGen/SSARegMap.h" 34#include "llvm/Support/MathExtras.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/ADT/StringExtras.h" 37using namespace llvm; 38 39X86TargetLowering::X86TargetLowering(TargetMachine &TM) 40 : TargetLowering(TM) { 41 Subtarget = &TM.getSubtarget<X86Subtarget>(); 42 X86ScalarSSE = Subtarget->hasSSE2(); 43 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 44 45 // Set up the TargetLowering object. 46 47 // X86 is weird, it always uses i8 for shift amounts and setcc results. 48 setShiftAmountType(MVT::i8); 49 setSetCCResultType(MVT::i8); 50 setSetCCResultContents(ZeroOrOneSetCCResult); 51 setSchedulingPreference(SchedulingForRegPressure); 52 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0 53 setStackPointerRegisterToSaveRestore(X86StackPtr); 54 55 if (Subtarget->isTargetDarwin()) { 56 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 57 setUseUnderscoreSetJmp(false); 58 setUseUnderscoreLongJmp(false); 59 } else if (Subtarget->isTargetMingw()) { 60 // MS runtime is weird: it exports _setjmp, but longjmp! 61 setUseUnderscoreSetJmp(true); 62 setUseUnderscoreLongJmp(false); 63 } else { 64 setUseUnderscoreSetJmp(true); 65 setUseUnderscoreLongJmp(true); 66 } 67 68 // Set up the register classes. 69 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 70 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 71 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 72 if (Subtarget->is64Bit()) 73 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 74 75 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 76 77 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 78 // operation. 79 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 80 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 81 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 82 83 if (Subtarget->is64Bit()) { 84 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 85 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 86 } else { 87 if (X86ScalarSSE) 88 // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP. 89 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); 90 else 91 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 92 } 93 94 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 95 // this operation. 96 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 97 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 98 // SSE has no i16 to fp conversion, only i32 99 if (X86ScalarSSE) 100 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 101 else { 102 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 103 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 104 } 105 106 if (!Subtarget->is64Bit()) { 107 // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode. 108 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 109 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 110 } 111 112 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 113 // this operation. 114 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 115 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 116 117 if (X86ScalarSSE) { 118 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 119 } else { 120 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 121 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 122 } 123 124 // Handle FP_TO_UINT by promoting the destination to a larger signed 125 // conversion. 126 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 127 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 128 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 129 130 if (Subtarget->is64Bit()) { 131 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 132 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 133 } else { 134 if (X86ScalarSSE && !Subtarget->hasSSE3()) 135 // Expand FP_TO_UINT into a select. 136 // FIXME: We would like to use a Custom expander here eventually to do 137 // the optimal thing for SSE vs. the default expansion in the legalizer. 138 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 139 else 140 // With SSE3 we can use fisttpll to convert to a signed i64. 141 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 142 } 143 144 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 145 if (!X86ScalarSSE) { 146 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); 147 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); 148 } 149 150 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 151 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 152 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 153 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 154 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 155 if (Subtarget->is64Bit()) 156 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); 157 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand); 158 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 159 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 160 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 161 setOperationAction(ISD::FREM , MVT::f64 , Expand); 162 163 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 164 setOperationAction(ISD::CTTZ , MVT::i8 , Expand); 165 setOperationAction(ISD::CTLZ , MVT::i8 , Expand); 166 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 167 setOperationAction(ISD::CTTZ , MVT::i16 , Expand); 168 setOperationAction(ISD::CTLZ , MVT::i16 , Expand); 169 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 170 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 171 setOperationAction(ISD::CTLZ , MVT::i32 , Expand); 172 if (Subtarget->is64Bit()) { 173 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 174 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 175 setOperationAction(ISD::CTLZ , MVT::i64 , Expand); 176 } 177 178 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 179 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 180 181 // These should be promoted to a larger select which is supported. 182 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 183 setOperationAction(ISD::SELECT , MVT::i8 , Promote); 184 // X86 wants to expand cmov itself. 185 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 186 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 187 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 188 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 189 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 190 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 191 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 192 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 193 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 194 if (Subtarget->is64Bit()) { 195 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 196 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 197 } 198 // X86 ret instruction may pop stack. 199 setOperationAction(ISD::RET , MVT::Other, Custom); 200 // Darwin ABI issue. 201 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 202 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 203 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 204 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 205 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 206 if (Subtarget->is64Bit()) { 207 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 208 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 209 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 210 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 211 } 212 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 213 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 214 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 215 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 216 // X86 wants to expand memset / memcpy itself. 217 setOperationAction(ISD::MEMSET , MVT::Other, Custom); 218 setOperationAction(ISD::MEMCPY , MVT::Other, Custom); 219 220 // We don't have line number support yet. 221 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 222 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 223 // FIXME - use subtarget debug flags 224 if (!Subtarget->isTargetDarwin() && 225 !Subtarget->isTargetELF() && 226 !Subtarget->isTargetCygMing()) 227 setOperationAction(ISD::LABEL, MVT::Other, Expand); 228 229 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 230 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 231 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 232 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 233 if (Subtarget->is64Bit()) { 234 // FIXME: Verify 235 setExceptionPointerRegister(X86::RAX); 236 setExceptionSelectorRegister(X86::RDX); 237 } else { 238 setExceptionPointerRegister(X86::EAX); 239 setExceptionSelectorRegister(X86::EDX); 240 } 241 242 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 243 setOperationAction(ISD::VASTART , MVT::Other, Custom); 244 setOperationAction(ISD::VAARG , MVT::Other, Expand); 245 setOperationAction(ISD::VAEND , MVT::Other, Expand); 246 if (Subtarget->is64Bit()) 247 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 248 else 249 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 250 251 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 252 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 253 if (Subtarget->is64Bit()) 254 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 255 if (Subtarget->isTargetCygMing()) 256 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 257 else 258 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 259 260 if (X86ScalarSSE) { 261 // Set up the FP register classes. 262 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 263 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 264 265 // Use ANDPD to simulate FABS. 266 setOperationAction(ISD::FABS , MVT::f64, Custom); 267 setOperationAction(ISD::FABS , MVT::f32, Custom); 268 269 // Use XORP to simulate FNEG. 270 setOperationAction(ISD::FNEG , MVT::f64, Custom); 271 setOperationAction(ISD::FNEG , MVT::f32, Custom); 272 273 // Use ANDPD and ORPD to simulate FCOPYSIGN. 274 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 275 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 276 277 // We don't support sin/cos/fmod 278 setOperationAction(ISD::FSIN , MVT::f64, Expand); 279 setOperationAction(ISD::FCOS , MVT::f64, Expand); 280 setOperationAction(ISD::FREM , MVT::f64, Expand); 281 setOperationAction(ISD::FSIN , MVT::f32, Expand); 282 setOperationAction(ISD::FCOS , MVT::f32, Expand); 283 setOperationAction(ISD::FREM , MVT::f32, Expand); 284 285 // Expand FP immediates into loads from the stack, except for the special 286 // cases we handle. 287 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 288 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 289 addLegalFPImmediate(+0.0); // xorps / xorpd 290 } else { 291 // Set up the FP register classes. 292 addRegisterClass(MVT::f64, X86::RFPRegisterClass); 293 294 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 295 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 296 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 297 298 if (!UnsafeFPMath) { 299 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 300 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 301 } 302 303 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 304 addLegalFPImmediate(+0.0); // FLD0 305 addLegalFPImmediate(+1.0); // FLD1 306 addLegalFPImmediate(-0.0); // FLD0/FCHS 307 addLegalFPImmediate(-1.0); // FLD1/FCHS 308 } 309 310 // First set operation action for all vector types to expand. Then we 311 // will selectively turn on ones that can be effectively codegen'd. 312 for (unsigned VT = (unsigned)MVT::Vector + 1; 313 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) { 314 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand); 315 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand); 316 setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand); 317 setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand); 318 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 319 setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand); 320 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 321 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 322 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 323 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 324 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 325 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand); 326 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand); 327 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 328 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 329 } 330 331 if (Subtarget->hasMMX()) { 332 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass); 333 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass); 334 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass); 335 addRegisterClass(MVT::v1i64, X86::VR64RegisterClass); 336 337 // FIXME: add MMX packed arithmetics 338 339 setOperationAction(ISD::ADD, MVT::v8i8, Legal); 340 setOperationAction(ISD::ADD, MVT::v4i16, Legal); 341 setOperationAction(ISD::ADD, MVT::v2i32, Legal); 342 setOperationAction(ISD::ADD, MVT::v1i64, Legal); 343 344 setOperationAction(ISD::SUB, MVT::v8i8, Legal); 345 setOperationAction(ISD::SUB, MVT::v4i16, Legal); 346 setOperationAction(ISD::SUB, MVT::v2i32, Legal); 347 348 setOperationAction(ISD::MULHS, MVT::v4i16, Legal); 349 setOperationAction(ISD::MUL, MVT::v4i16, Legal); 350 351 setOperationAction(ISD::AND, MVT::v8i8, Promote); 352 AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64); 353 setOperationAction(ISD::AND, MVT::v4i16, Promote); 354 AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64); 355 setOperationAction(ISD::AND, MVT::v2i32, Promote); 356 AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64); 357 setOperationAction(ISD::AND, MVT::v1i64, Legal); 358 359 setOperationAction(ISD::OR, MVT::v8i8, Promote); 360 AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64); 361 setOperationAction(ISD::OR, MVT::v4i16, Promote); 362 AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64); 363 setOperationAction(ISD::OR, MVT::v2i32, Promote); 364 AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64); 365 setOperationAction(ISD::OR, MVT::v1i64, Legal); 366 367 setOperationAction(ISD::XOR, MVT::v8i8, Promote); 368 AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64); 369 setOperationAction(ISD::XOR, MVT::v4i16, Promote); 370 AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64); 371 setOperationAction(ISD::XOR, MVT::v2i32, Promote); 372 AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64); 373 setOperationAction(ISD::XOR, MVT::v1i64, Legal); 374 375 setOperationAction(ISD::LOAD, MVT::v8i8, Promote); 376 AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64); 377 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 378 AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64); 379 setOperationAction(ISD::LOAD, MVT::v2i32, Promote); 380 AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64); 381 setOperationAction(ISD::LOAD, MVT::v1i64, Legal); 382 383 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 384 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 385 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 386 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 387 388 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 389 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 390 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 391 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 392 393 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom); 394 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom); 395 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom); 396 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom); 397 } 398 399 if (Subtarget->hasSSE1()) { 400 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 401 402 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 403 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 404 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 405 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 406 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 407 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 408 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 409 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 410 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 411 } 412 413 if (Subtarget->hasSSE2()) { 414 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 415 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 416 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 417 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 418 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 419 420 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 421 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 422 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 423 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 424 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 425 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 426 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 427 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 428 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 429 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 430 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 431 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 432 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 433 434 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 435 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 436 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 437 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 438 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones. 439 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 440 441 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 442 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 443 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom); 444 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom); 445 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom); 446 } 447 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 448 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 449 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 450 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 451 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 452 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 453 454 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 455 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) { 456 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote); 457 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64); 458 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote); 459 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64); 460 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote); 461 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64); 462 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote); 463 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64); 464 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 465 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64); 466 } 467 468 // Custom lower v2i64 and v2f64 selects. 469 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 470 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 471 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 472 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 473 } 474 475 // We want to custom lower some of our intrinsics. 476 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 477 478 // We have target-specific dag combine patterns for the following nodes: 479 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 480 setTargetDAGCombine(ISD::SELECT); 481 482 computeRegisterProperties(); 483 484 // FIXME: These should be based on subtarget info. Plus, the values should 485 // be smaller when we are in optimizing for size mode. 486 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores 487 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores 488 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores 489 allowUnalignedMemoryAccesses = true; // x86 supports it! 490} 491 492 493//===----------------------------------------------------------------------===// 494// Return Value Calling Convention Implementation 495//===----------------------------------------------------------------------===// 496 497#include "X86GenCallingConv.inc" 498 499/// LowerRET - Lower an ISD::RET node. 500SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { 501 assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); 502 503 SmallVector<CCValAssign, 16> RVLocs; 504 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 505 CCState CCInfo(CC, getTargetMachine(), RVLocs); 506 CCInfo.AnalyzeReturn(Op.Val, RetCC_X86); 507 508 509 // If this is the first return lowered for this function, add the regs to the 510 // liveout set for the function. 511 if (DAG.getMachineFunction().liveout_empty()) { 512 for (unsigned i = 0; i != RVLocs.size(); ++i) 513 if (RVLocs[i].isRegLoc()) 514 DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg()); 515 } 516 517 SDOperand Chain = Op.getOperand(0); 518 SDOperand Flag; 519 520 // Copy the result values into the output registers. 521 if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() || 522 RVLocs[0].getLocReg() != X86::ST0) { 523 for (unsigned i = 0; i != RVLocs.size(); ++i) { 524 CCValAssign &VA = RVLocs[i]; 525 assert(VA.isRegLoc() && "Can only return in registers!"); 526 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), 527 Flag); 528 Flag = Chain.getValue(1); 529 } 530 } else { 531 // We need to handle a destination of ST0 specially, because it isn't really 532 // a register. 533 SDOperand Value = Op.getOperand(1); 534 535 // If this is an FP return with ScalarSSE, we need to move the value from 536 // an XMM register onto the fp-stack. 537 if (X86ScalarSSE) { 538 SDOperand MemLoc; 539 540 // If this is a load into a scalarsse value, don't store the loaded value 541 // back to the stack, only to reload it: just replace the scalar-sse load. 542 if (ISD::isNON_EXTLoad(Value.Val) && 543 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) { 544 Chain = Value.getOperand(0); 545 MemLoc = Value.getOperand(1); 546 } else { 547 // Spill the value to memory and reload it into top of stack. 548 unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8; 549 MachineFunction &MF = DAG.getMachineFunction(); 550 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 551 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy()); 552 Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0); 553 } 554 SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other); 555 SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())}; 556 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 557 Chain = Value.getValue(1); 558 } 559 560 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 561 SDOperand Ops[] = { Chain, Value }; 562 Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2); 563 Flag = Chain.getValue(1); 564 } 565 566 SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16); 567 if (Flag.Val) 568 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag); 569 else 570 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop); 571} 572 573 574/// LowerCallResult - Lower the result values of an ISD::CALL into the 575/// appropriate copies out of appropriate physical registers. This assumes that 576/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 577/// being lowered. The returns a SDNode with the same number of values as the 578/// ISD::CALL. 579SDNode *X86TargetLowering:: 580LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, 581 unsigned CallingConv, SelectionDAG &DAG) { 582 583 // Assign locations to each value returned by this call. 584 SmallVector<CCValAssign, 16> RVLocs; 585 CCState CCInfo(CallingConv, getTargetMachine(), RVLocs); 586 CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); 587 588 589 SmallVector<SDOperand, 8> ResultVals; 590 591 // Copy all of the result registers out of their specified physreg. 592 if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) { 593 for (unsigned i = 0; i != RVLocs.size(); ++i) { 594 Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), 595 RVLocs[i].getValVT(), InFlag).getValue(1); 596 InFlag = Chain.getValue(2); 597 ResultVals.push_back(Chain.getValue(0)); 598 } 599 } else { 600 // Copies from the FP stack are special, as ST0 isn't a valid register 601 // before the fp stackifier runs. 602 603 // Copy ST0 into an RFP register with FP_GET_RESULT. 604 SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 605 SDOperand GROps[] = { Chain, InFlag }; 606 SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2); 607 Chain = RetVal.getValue(1); 608 InFlag = RetVal.getValue(2); 609 610 // If we are using ScalarSSE, store ST(0) to the stack and reload it into 611 // an XMM register. 612 if (X86ScalarSSE) { 613 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This 614 // shouldn't be necessary except that RFP cannot be live across 615 // multiple blocks. When stackifier is fixed, they can be uncoupled. 616 MachineFunction &MF = DAG.getMachineFunction(); 617 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 618 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 619 SDOperand Ops[] = { 620 Chain, RetVal, StackSlot, DAG.getValueType(RVLocs[0].getValVT()), InFlag 621 }; 622 Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5); 623 RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0); 624 Chain = RetVal.getValue(1); 625 } 626 627 if (RVLocs[0].getValVT() == MVT::f32 && !X86ScalarSSE) 628 // FIXME: we would really like to remember that this FP_ROUND 629 // operation is okay to eliminate if we allow excess FP precision. 630 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal); 631 ResultVals.push_back(RetVal); 632 } 633 634 // Merge everything together with a MERGE_VALUES node. 635 ResultVals.push_back(Chain); 636 return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(), 637 &ResultVals[0], ResultVals.size()).Val; 638} 639 640 641//===----------------------------------------------------------------------===// 642// C & StdCall Calling Convention implementation 643//===----------------------------------------------------------------------===// 644// StdCall calling convention seems to be standard for many Windows' API 645// routines and around. It differs from C calling convention just a little: 646// callee should clean up the stack, not caller. Symbols should be also 647// decorated in some fancy way :) It doesn't support any vector arguments. 648 649/// AddLiveIn - This helper function adds the specified physical register to the 650/// MachineFunction as a live in value. It also creates a corresponding virtual 651/// register for it. 652static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, 653 const TargetRegisterClass *RC) { 654 assert(RC->contains(PReg) && "Not the correct regclass!"); 655 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC); 656 MF.addLiveIn(PReg, VReg); 657 return VReg; 658} 659 660SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG, 661 bool isStdCall) { 662 unsigned NumArgs = Op.Val->getNumValues() - 1; 663 MachineFunction &MF = DAG.getMachineFunction(); 664 MachineFrameInfo *MFI = MF.getFrameInfo(); 665 SDOperand Root = Op.getOperand(0); 666 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 667 668 // Assign locations to all of the incoming arguments. 669 SmallVector<CCValAssign, 16> ArgLocs; 670 CCState CCInfo(MF.getFunction()->getCallingConv(), getTargetMachine(), 671 ArgLocs); 672 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C); 673 674 SmallVector<SDOperand, 8> ArgValues; 675 unsigned LastVal = ~0U; 676 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 677 CCValAssign &VA = ArgLocs[i]; 678 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 679 // places. 680 assert(VA.getValNo() != LastVal && 681 "Don't support value assigned to multiple locs yet"); 682 LastVal = VA.getValNo(); 683 684 if (VA.isRegLoc()) { 685 MVT::ValueType RegVT = VA.getLocVT(); 686 TargetRegisterClass *RC; 687 if (RegVT == MVT::i32) 688 RC = X86::GR32RegisterClass; 689 else { 690 assert(MVT::isVector(RegVT)); 691 RC = X86::VR128RegisterClass; 692 } 693 694 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 695 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 696 697 // If this is an 8 or 16-bit value, it is really passed promoted to 32 698 // bits. Insert an assert[sz]ext to capture this, then truncate to the 699 // right size. 700 if (VA.getLocInfo() == CCValAssign::SExt) 701 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 702 DAG.getValueType(VA.getValVT())); 703 else if (VA.getLocInfo() == CCValAssign::ZExt) 704 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 705 DAG.getValueType(VA.getValVT())); 706 707 if (VA.getLocInfo() != CCValAssign::Full) 708 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 709 710 ArgValues.push_back(ArgValue); 711 } else { 712 assert(VA.isMemLoc()); 713 714 // Create the nodes corresponding to a load from this parameter slot. 715 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 716 VA.getLocMemOffset()); 717 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 718 ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0)); 719 } 720 } 721 722 unsigned StackSize = CCInfo.getNextStackOffset(); 723 724 ArgValues.push_back(Root); 725 726 // If the function takes variable number of arguments, make a frame index for 727 // the start of the first vararg value... for expansion of llvm.va_start. 728 if (isVarArg) 729 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 730 731 if (isStdCall && !isVarArg) { 732 BytesToPopOnReturn = StackSize; // Callee pops everything.. 733 BytesCallerReserves = 0; 734 } else { 735 BytesToPopOnReturn = 0; // Callee pops nothing. 736 737 // If this is an sret function, the return should pop the hidden pointer. 738 if (NumArgs && 739 (cast<ConstantSDNode>(Op.getOperand(3))->getValue() & 740 ISD::ParamFlags::StructReturn)) 741 BytesToPopOnReturn = 4; 742 743 BytesCallerReserves = StackSize; 744 } 745 746 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 747 ReturnAddrIndex = 0; // No return address slot generated yet. 748 749 MF.getInfo<X86MachineFunctionInfo>() 750 ->setBytesToPopOnReturn(BytesToPopOnReturn); 751 752 // Return the new list of results. 753 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 754 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 755} 756 757SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, 758 unsigned CC) { 759 SDOperand Chain = Op.getOperand(0); 760 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 761 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 762 SDOperand Callee = Op.getOperand(4); 763 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 764 765 // Analyze operands of the call, assigning locations to each operand. 766 SmallVector<CCValAssign, 16> ArgLocs; 767 CCState CCInfo(CC, getTargetMachine(), ArgLocs); 768 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C); 769 770 // Get a count of how many bytes are to be pushed on the stack. 771 unsigned NumBytes = CCInfo.getNextStackOffset(); 772 773 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 774 775 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 776 SmallVector<SDOperand, 8> MemOpChains; 777 778 SDOperand StackPtr; 779 780 // Walk the register/memloc assignments, inserting copies/loads. 781 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 782 CCValAssign &VA = ArgLocs[i]; 783 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 784 785 // Promote the value if needed. 786 switch (VA.getLocInfo()) { 787 default: assert(0 && "Unknown loc info!"); 788 case CCValAssign::Full: break; 789 case CCValAssign::SExt: 790 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 791 break; 792 case CCValAssign::ZExt: 793 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 794 break; 795 case CCValAssign::AExt: 796 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 797 break; 798 } 799 800 if (VA.isRegLoc()) { 801 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 802 } else { 803 assert(VA.isMemLoc()); 804 if (StackPtr.Val == 0) 805 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 806 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 807 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 808 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 809 } 810 } 811 812 // If the first argument is an sret pointer, remember it. 813 bool isSRet = NumOps && 814 (cast<ConstantSDNode>(Op.getOperand(6))->getValue() & 815 ISD::ParamFlags::StructReturn); 816 817 if (!MemOpChains.empty()) 818 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 819 &MemOpChains[0], MemOpChains.size()); 820 821 // Build a sequence of copy-to-reg nodes chained together with token chain 822 // and flag operands which copy the outgoing args into registers. 823 SDOperand InFlag; 824 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 825 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 826 InFlag); 827 InFlag = Chain.getValue(1); 828 } 829 830 // ELF / PIC requires GOT in the EBX register before function calls via PLT 831 // GOT pointer. 832 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 833 Subtarget->isPICStyleGOT()) { 834 Chain = DAG.getCopyToReg(Chain, X86::EBX, 835 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 836 InFlag); 837 InFlag = Chain.getValue(1); 838 } 839 840 // If the callee is a GlobalAddress node (quite common, every direct call is) 841 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 842 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 843 // We should use extra load for direct calls to dllimported functions in 844 // non-JIT mode. 845 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 846 getTargetMachine(), true)) 847 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 848 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 849 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 850 851 // Returns a chain & a flag for retval copy to use. 852 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 853 SmallVector<SDOperand, 8> Ops; 854 Ops.push_back(Chain); 855 Ops.push_back(Callee); 856 857 // Add argument registers to the end of the list so that they are known live 858 // into the call. 859 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 860 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 861 RegsToPass[i].second.getValueType())); 862 863 // Add an implicit use GOT pointer in EBX. 864 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 865 Subtarget->isPICStyleGOT()) 866 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 867 868 if (InFlag.Val) 869 Ops.push_back(InFlag); 870 871 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 872 NodeTys, &Ops[0], Ops.size()); 873 InFlag = Chain.getValue(1); 874 875 // Create the CALLSEQ_END node. 876 unsigned NumBytesForCalleeToPush = 0; 877 878 if (CC == CallingConv::X86_StdCall) { 879 if (isVarArg) 880 NumBytesForCalleeToPush = isSRet ? 4 : 0; 881 else 882 NumBytesForCalleeToPush = NumBytes; 883 } else { 884 // If this is is a call to a struct-return function, the callee 885 // pops the hidden struct pointer, so we have to push it back. 886 // This is common for Darwin/X86, Linux & Mingw32 targets. 887 NumBytesForCalleeToPush = isSRet ? 4 : 0; 888 } 889 890 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 891 Ops.clear(); 892 Ops.push_back(Chain); 893 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 894 Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy())); 895 Ops.push_back(InFlag); 896 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 897 InFlag = Chain.getValue(1); 898 899 // Handle result values, copying them out of physregs into vregs that we 900 // return. 901 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 902} 903 904 905//===----------------------------------------------------------------------===// 906// FastCall Calling Convention implementation 907//===----------------------------------------------------------------------===// 908// 909// The X86 'fastcall' calling convention passes up to two integer arguments in 910// registers (an appropriate portion of ECX/EDX), passes arguments in C order, 911// and requires that the callee pop its arguments off the stack (allowing proper 912// tail calls), and has the same return value conventions as C calling convs. 913// 914// This calling convention always arranges for the callee pop value to be 8n+4 915// bytes, which is needed for tail recursion elimination and stack alignment 916// reasons. 917SDOperand 918X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) { 919 MachineFunction &MF = DAG.getMachineFunction(); 920 MachineFrameInfo *MFI = MF.getFrameInfo(); 921 SDOperand Root = Op.getOperand(0); 922 923 // Assign locations to all of the incoming arguments. 924 SmallVector<CCValAssign, 16> ArgLocs; 925 CCState CCInfo(MF.getFunction()->getCallingConv(), getTargetMachine(), 926 ArgLocs); 927 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall); 928 929 SmallVector<SDOperand, 8> ArgValues; 930 unsigned LastVal = ~0U; 931 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 932 CCValAssign &VA = ArgLocs[i]; 933 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 934 // places. 935 assert(VA.getValNo() != LastVal && 936 "Don't support value assigned to multiple locs yet"); 937 LastVal = VA.getValNo(); 938 939 if (VA.isRegLoc()) { 940 MVT::ValueType RegVT = VA.getLocVT(); 941 TargetRegisterClass *RC; 942 if (RegVT == MVT::i32) 943 RC = X86::GR32RegisterClass; 944 else { 945 assert(MVT::isVector(RegVT)); 946 RC = X86::VR128RegisterClass; 947 } 948 949 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 950 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 951 952 // If this is an 8 or 16-bit value, it is really passed promoted to 32 953 // bits. Insert an assert[sz]ext to capture this, then truncate to the 954 // right size. 955 if (VA.getLocInfo() == CCValAssign::SExt) 956 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 957 DAG.getValueType(VA.getValVT())); 958 else if (VA.getLocInfo() == CCValAssign::ZExt) 959 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 960 DAG.getValueType(VA.getValVT())); 961 962 if (VA.getLocInfo() != CCValAssign::Full) 963 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 964 965 ArgValues.push_back(ArgValue); 966 } else { 967 assert(VA.isMemLoc()); 968 969 // Create the nodes corresponding to a load from this parameter slot. 970 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 971 VA.getLocMemOffset()); 972 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 973 ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0)); 974 } 975 } 976 977 ArgValues.push_back(Root); 978 979 unsigned StackSize = CCInfo.getNextStackOffset(); 980 981 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 982 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 983 // arguments and the arguments after the retaddr has been pushed are aligned. 984 if ((StackSize & 7) == 0) 985 StackSize += 4; 986 } 987 988 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs. 989 RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only. 990 ReturnAddrIndex = 0; // No return address slot generated yet. 991 BytesToPopOnReturn = StackSize; // Callee pops all stack arguments. 992 BytesCallerReserves = 0; 993 994 MF.getInfo<X86MachineFunctionInfo>() 995 ->setBytesToPopOnReturn(BytesToPopOnReturn); 996 997 // Return the new list of results. 998 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 999 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1000} 1001 1002SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, 1003 unsigned CC) { 1004 SDOperand Chain = Op.getOperand(0); 1005 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1006 SDOperand Callee = Op.getOperand(4); 1007 1008 // Analyze operands of the call, assigning locations to each operand. 1009 SmallVector<CCValAssign, 16> ArgLocs; 1010 CCState CCInfo(CC, getTargetMachine(), ArgLocs); 1011 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall); 1012 1013 // Get a count of how many bytes are to be pushed on the stack. 1014 unsigned NumBytes = CCInfo.getNextStackOffset(); 1015 1016 if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) { 1017 // Make sure the instruction takes 8n+4 bytes to make sure the start of the 1018 // arguments and the arguments after the retaddr has been pushed are aligned. 1019 if ((NumBytes & 7) == 0) 1020 NumBytes += 4; 1021 } 1022 1023 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1024 1025 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1026 SmallVector<SDOperand, 8> MemOpChains; 1027 1028 SDOperand StackPtr; 1029 1030 // Walk the register/memloc assignments, inserting copies/loads. 1031 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1032 CCValAssign &VA = ArgLocs[i]; 1033 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1034 1035 // Promote the value if needed. 1036 switch (VA.getLocInfo()) { 1037 default: assert(0 && "Unknown loc info!"); 1038 case CCValAssign::Full: break; 1039 case CCValAssign::SExt: 1040 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1041 break; 1042 case CCValAssign::ZExt: 1043 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1044 break; 1045 case CCValAssign::AExt: 1046 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1047 break; 1048 } 1049 1050 if (VA.isRegLoc()) { 1051 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1052 } else { 1053 assert(VA.isMemLoc()); 1054 if (StackPtr.Val == 0) 1055 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1056 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1057 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1058 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1059 } 1060 } 1061 1062 if (!MemOpChains.empty()) 1063 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1064 &MemOpChains[0], MemOpChains.size()); 1065 1066 // Build a sequence of copy-to-reg nodes chained together with token chain 1067 // and flag operands which copy the outgoing args into registers. 1068 SDOperand InFlag; 1069 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1070 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1071 InFlag); 1072 InFlag = Chain.getValue(1); 1073 } 1074 1075 // If the callee is a GlobalAddress node (quite common, every direct call is) 1076 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1077 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1078 // We should use extra load for direct calls to dllimported functions in 1079 // non-JIT mode. 1080 if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1081 getTargetMachine(), true)) 1082 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1083 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1084 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1085 1086 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1087 // GOT pointer. 1088 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1089 Subtarget->isPICStyleGOT()) { 1090 Chain = DAG.getCopyToReg(Chain, X86::EBX, 1091 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 1092 InFlag); 1093 InFlag = Chain.getValue(1); 1094 } 1095 1096 // Returns a chain & a flag for retval copy to use. 1097 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1098 SmallVector<SDOperand, 8> Ops; 1099 Ops.push_back(Chain); 1100 Ops.push_back(Callee); 1101 1102 // Add argument registers to the end of the list so that they are known live 1103 // into the call. 1104 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1105 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1106 RegsToPass[i].second.getValueType())); 1107 1108 // Add an implicit use GOT pointer in EBX. 1109 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1110 Subtarget->isPICStyleGOT()) 1111 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 1112 1113 if (InFlag.Val) 1114 Ops.push_back(InFlag); 1115 1116 // FIXME: Do not generate X86ISD::TAILCALL for now. 1117 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1118 NodeTys, &Ops[0], Ops.size()); 1119 InFlag = Chain.getValue(1); 1120 1121 // Returns a flag for retval copy to use. 1122 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1123 Ops.clear(); 1124 Ops.push_back(Chain); 1125 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1126 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1127 Ops.push_back(InFlag); 1128 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1129 InFlag = Chain.getValue(1); 1130 1131 // Handle result values, copying them out of physregs into vregs that we 1132 // return. 1133 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1134} 1135 1136 1137//===----------------------------------------------------------------------===// 1138// X86-64 C Calling Convention implementation 1139//===----------------------------------------------------------------------===// 1140 1141SDOperand 1142X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) { 1143 MachineFunction &MF = DAG.getMachineFunction(); 1144 MachineFrameInfo *MFI = MF.getFrameInfo(); 1145 SDOperand Root = Op.getOperand(0); 1146 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1147 1148 static const unsigned GPR64ArgRegs[] = { 1149 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1150 }; 1151 static const unsigned XMMArgRegs[] = { 1152 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1153 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1154 }; 1155 1156 1157 // Assign locations to all of the incoming arguments. 1158 SmallVector<CCValAssign, 16> ArgLocs; 1159 CCState CCInfo(MF.getFunction()->getCallingConv(), getTargetMachine(), 1160 ArgLocs); 1161 CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C); 1162 1163 SmallVector<SDOperand, 8> ArgValues; 1164 unsigned LastVal = ~0U; 1165 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1166 CCValAssign &VA = ArgLocs[i]; 1167 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1168 // places. 1169 assert(VA.getValNo() != LastVal && 1170 "Don't support value assigned to multiple locs yet"); 1171 LastVal = VA.getValNo(); 1172 1173 if (VA.isRegLoc()) { 1174 MVT::ValueType RegVT = VA.getLocVT(); 1175 TargetRegisterClass *RC; 1176 if (RegVT == MVT::i32) 1177 RC = X86::GR32RegisterClass; 1178 else if (RegVT == MVT::i64) 1179 RC = X86::GR64RegisterClass; 1180 else if (RegVT == MVT::f32) 1181 RC = X86::FR32RegisterClass; 1182 else if (RegVT == MVT::f64) 1183 RC = X86::FR64RegisterClass; 1184 else { 1185 assert(MVT::isVector(RegVT)); 1186 RC = X86::VR128RegisterClass; 1187 } 1188 1189 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); 1190 SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); 1191 1192 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1193 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1194 // right size. 1195 if (VA.getLocInfo() == CCValAssign::SExt) 1196 ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue, 1197 DAG.getValueType(VA.getValVT())); 1198 else if (VA.getLocInfo() == CCValAssign::ZExt) 1199 ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue, 1200 DAG.getValueType(VA.getValVT())); 1201 1202 if (VA.getLocInfo() != CCValAssign::Full) 1203 ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue); 1204 1205 ArgValues.push_back(ArgValue); 1206 } else { 1207 assert(VA.isMemLoc()); 1208 1209 // Create the nodes corresponding to a load from this parameter slot. 1210 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8, 1211 VA.getLocMemOffset()); 1212 SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); 1213 ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0)); 1214 } 1215 } 1216 1217 unsigned StackSize = CCInfo.getNextStackOffset(); 1218 1219 // If the function takes variable number of arguments, make a frame index for 1220 // the start of the first vararg value... for expansion of llvm.va_start. 1221 if (isVarArg) { 1222 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6); 1223 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1224 1225 // For X86-64, if there are vararg parameters that are passed via 1226 // registers, then we must store them to their spots on the stack so they 1227 // may be loaded by deferencing the result of va_next. 1228 VarArgsGPOffset = NumIntRegs * 8; 1229 VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16; 1230 VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize); 1231 RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16); 1232 1233 // Store the integer parameter registers. 1234 SmallVector<SDOperand, 8> MemOps; 1235 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 1236 SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1237 DAG.getConstant(VarArgsGPOffset, getPointerTy())); 1238 for (; NumIntRegs != 6; ++NumIntRegs) { 1239 unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], 1240 X86::GR64RegisterClass); 1241 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1242 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1243 MemOps.push_back(Store); 1244 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1245 DAG.getConstant(8, getPointerTy())); 1246 } 1247 1248 // Now store the XMM (fp + vector) parameter registers. 1249 FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, 1250 DAG.getConstant(VarArgsFPOffset, getPointerTy())); 1251 for (; NumXMMRegs != 8; ++NumXMMRegs) { 1252 unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], 1253 X86::VR128RegisterClass); 1254 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); 1255 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1256 MemOps.push_back(Store); 1257 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 1258 DAG.getConstant(16, getPointerTy())); 1259 } 1260 if (!MemOps.empty()) 1261 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 1262 &MemOps[0], MemOps.size()); 1263 } 1264 1265 ArgValues.push_back(Root); 1266 1267 ReturnAddrIndex = 0; // No return address slot generated yet. 1268 BytesToPopOnReturn = 0; // Callee pops nothing. 1269 BytesCallerReserves = StackSize; 1270 1271 // Return the new list of results. 1272 return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 1273 &ArgValues[0], ArgValues.size()).getValue(Op.ResNo); 1274} 1275 1276SDOperand 1277X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG, 1278 unsigned CC) { 1279 SDOperand Chain = Op.getOperand(0); 1280 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1281 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 1282 SDOperand Callee = Op.getOperand(4); 1283 1284 // Analyze operands of the call, assigning locations to each operand. 1285 SmallVector<CCValAssign, 16> ArgLocs; 1286 CCState CCInfo(CC, getTargetMachine(), ArgLocs); 1287 CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C); 1288 1289 // Get a count of how many bytes are to be pushed on the stack. 1290 unsigned NumBytes = CCInfo.getNextStackOffset(); 1291 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); 1292 1293 SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; 1294 SmallVector<SDOperand, 8> MemOpChains; 1295 1296 SDOperand StackPtr; 1297 1298 // Walk the register/memloc assignments, inserting copies/loads. 1299 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1300 CCValAssign &VA = ArgLocs[i]; 1301 SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); 1302 1303 // Promote the value if needed. 1304 switch (VA.getLocInfo()) { 1305 default: assert(0 && "Unknown loc info!"); 1306 case CCValAssign::Full: break; 1307 case CCValAssign::SExt: 1308 Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg); 1309 break; 1310 case CCValAssign::ZExt: 1311 Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg); 1312 break; 1313 case CCValAssign::AExt: 1314 Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg); 1315 break; 1316 } 1317 1318 if (VA.isRegLoc()) { 1319 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1320 } else { 1321 assert(VA.isMemLoc()); 1322 if (StackPtr.Val == 0) 1323 StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy()); 1324 SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy()); 1325 PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); 1326 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1327 } 1328 } 1329 1330 if (!MemOpChains.empty()) 1331 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1332 &MemOpChains[0], MemOpChains.size()); 1333 1334 // Build a sequence of copy-to-reg nodes chained together with token chain 1335 // and flag operands which copy the outgoing args into registers. 1336 SDOperand InFlag; 1337 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1338 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1339 InFlag); 1340 InFlag = Chain.getValue(1); 1341 } 1342 1343 if (isVarArg) { 1344 // From AMD64 ABI document: 1345 // For calls that may call functions that use varargs or stdargs 1346 // (prototype-less calls or calls to functions containing ellipsis (...) in 1347 // the declaration) %al is used as hidden argument to specify the number 1348 // of SSE registers used. The contents of %al do not need to match exactly 1349 // the number of registers, but must be an ubound on the number of SSE 1350 // registers used and is in the range 0 - 8 inclusive. 1351 1352 // Count the number of XMM registers allocated. 1353 static const unsigned XMMArgRegs[] = { 1354 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1355 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1356 }; 1357 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 1358 1359 Chain = DAG.getCopyToReg(Chain, X86::AL, 1360 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 1361 InFlag = Chain.getValue(1); 1362 } 1363 1364 // If the callee is a GlobalAddress node (quite common, every direct call is) 1365 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1366 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1367 // We should use extra load for direct calls to dllimported functions in 1368 // non-JIT mode. 1369 if (getTargetMachine().getCodeModel() != CodeModel::Large 1370 && !Subtarget->GVRequiresExtraLoad(G->getGlobal(), 1371 getTargetMachine(), true)) 1372 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy()); 1373 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1374 if (getTargetMachine().getCodeModel() != CodeModel::Large) 1375 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 1376 1377 // Returns a chain & a flag for retval copy to use. 1378 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1379 SmallVector<SDOperand, 8> Ops; 1380 Ops.push_back(Chain); 1381 Ops.push_back(Callee); 1382 1383 // Add argument registers to the end of the list so that they are known live 1384 // into the call. 1385 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1386 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1387 RegsToPass[i].second.getValueType())); 1388 1389 if (InFlag.Val) 1390 Ops.push_back(InFlag); 1391 1392 // FIXME: Do not generate X86ISD::TAILCALL for now. 1393 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, 1394 NodeTys, &Ops[0], Ops.size()); 1395 InFlag = Chain.getValue(1); 1396 1397 // Returns a flag for retval copy to use. 1398 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1399 Ops.clear(); 1400 Ops.push_back(Chain); 1401 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy())); 1402 Ops.push_back(DAG.getConstant(0, getPointerTy())); 1403 Ops.push_back(InFlag); 1404 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size()); 1405 InFlag = Chain.getValue(1); 1406 1407 // Handle result values, copying them out of physregs into vregs that we 1408 // return. 1409 return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); 1410} 1411 1412 1413//===----------------------------------------------------------------------===// 1414// Other Lowering Hooks 1415//===----------------------------------------------------------------------===// 1416 1417 1418SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { 1419 if (ReturnAddrIndex == 0) { 1420 // Set up a frame object for the return address. 1421 MachineFunction &MF = DAG.getMachineFunction(); 1422 if (Subtarget->is64Bit()) 1423 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8); 1424 else 1425 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4); 1426 } 1427 1428 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 1429} 1430 1431 1432 1433/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86 1434/// specific condition code. It returns a false if it cannot do a direct 1435/// translation. X86CC is the translated CondCode. LHS/RHS are modified as 1436/// needed. 1437static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 1438 unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, 1439 SelectionDAG &DAG) { 1440 X86CC = X86::COND_INVALID; 1441 if (!isFP) { 1442 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 1443 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 1444 // X > -1 -> X == 0, jump !sign. 1445 RHS = DAG.getConstant(0, RHS.getValueType()); 1446 X86CC = X86::COND_NS; 1447 return true; 1448 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 1449 // X < 0 -> X == 0, jump on sign. 1450 X86CC = X86::COND_S; 1451 return true; 1452 } 1453 } 1454 1455 switch (SetCCOpcode) { 1456 default: break; 1457 case ISD::SETEQ: X86CC = X86::COND_E; break; 1458 case ISD::SETGT: X86CC = X86::COND_G; break; 1459 case ISD::SETGE: X86CC = X86::COND_GE; break; 1460 case ISD::SETLT: X86CC = X86::COND_L; break; 1461 case ISD::SETLE: X86CC = X86::COND_LE; break; 1462 case ISD::SETNE: X86CC = X86::COND_NE; break; 1463 case ISD::SETULT: X86CC = X86::COND_B; break; 1464 case ISD::SETUGT: X86CC = X86::COND_A; break; 1465 case ISD::SETULE: X86CC = X86::COND_BE; break; 1466 case ISD::SETUGE: X86CC = X86::COND_AE; break; 1467 } 1468 } else { 1469 // On a floating point condition, the flags are set as follows: 1470 // ZF PF CF op 1471 // 0 | 0 | 0 | X > Y 1472 // 0 | 0 | 1 | X < Y 1473 // 1 | 0 | 0 | X == Y 1474 // 1 | 1 | 1 | unordered 1475 bool Flip = false; 1476 switch (SetCCOpcode) { 1477 default: break; 1478 case ISD::SETUEQ: 1479 case ISD::SETEQ: X86CC = X86::COND_E; break; 1480 case ISD::SETOLT: Flip = true; // Fallthrough 1481 case ISD::SETOGT: 1482 case ISD::SETGT: X86CC = X86::COND_A; break; 1483 case ISD::SETOLE: Flip = true; // Fallthrough 1484 case ISD::SETOGE: 1485 case ISD::SETGE: X86CC = X86::COND_AE; break; 1486 case ISD::SETUGT: Flip = true; // Fallthrough 1487 case ISD::SETULT: 1488 case ISD::SETLT: X86CC = X86::COND_B; break; 1489 case ISD::SETUGE: Flip = true; // Fallthrough 1490 case ISD::SETULE: 1491 case ISD::SETLE: X86CC = X86::COND_BE; break; 1492 case ISD::SETONE: 1493 case ISD::SETNE: X86CC = X86::COND_NE; break; 1494 case ISD::SETUO: X86CC = X86::COND_P; break; 1495 case ISD::SETO: X86CC = X86::COND_NP; break; 1496 } 1497 if (Flip) 1498 std::swap(LHS, RHS); 1499 } 1500 1501 return X86CC != X86::COND_INVALID; 1502} 1503 1504/// hasFPCMov - is there a floating point cmov for the specific X86 condition 1505/// code. Current x86 isa includes the following FP cmov instructions: 1506/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 1507static bool hasFPCMov(unsigned X86CC) { 1508 switch (X86CC) { 1509 default: 1510 return false; 1511 case X86::COND_B: 1512 case X86::COND_BE: 1513 case X86::COND_E: 1514 case X86::COND_P: 1515 case X86::COND_A: 1516 case X86::COND_AE: 1517 case X86::COND_NE: 1518 case X86::COND_NP: 1519 return true; 1520 } 1521} 1522 1523/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return 1524/// true if Op is undef or if its value falls within the specified range (L, H]. 1525static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { 1526 if (Op.getOpcode() == ISD::UNDEF) 1527 return true; 1528 1529 unsigned Val = cast<ConstantSDNode>(Op)->getValue(); 1530 return (Val >= Low && Val < Hi); 1531} 1532 1533/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return 1534/// true if Op is undef or if its value equal to the specified value. 1535static bool isUndefOrEqual(SDOperand Op, unsigned Val) { 1536 if (Op.getOpcode() == ISD::UNDEF) 1537 return true; 1538 return cast<ConstantSDNode>(Op)->getValue() == Val; 1539} 1540 1541/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 1542/// specifies a shuffle of elements that is suitable for input to PSHUFD. 1543bool X86::isPSHUFDMask(SDNode *N) { 1544 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1545 1546 if (N->getNumOperands() != 4) 1547 return false; 1548 1549 // Check if the value doesn't reference the second vector. 1550 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1551 SDOperand Arg = N->getOperand(i); 1552 if (Arg.getOpcode() == ISD::UNDEF) continue; 1553 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1554 if (cast<ConstantSDNode>(Arg)->getValue() >= 4) 1555 return false; 1556 } 1557 1558 return true; 1559} 1560 1561/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 1562/// specifies a shuffle of elements that is suitable for input to PSHUFHW. 1563bool X86::isPSHUFHWMask(SDNode *N) { 1564 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1565 1566 if (N->getNumOperands() != 8) 1567 return false; 1568 1569 // Lower quadword copied in order. 1570 for (unsigned i = 0; i != 4; ++i) { 1571 SDOperand Arg = N->getOperand(i); 1572 if (Arg.getOpcode() == ISD::UNDEF) continue; 1573 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1574 if (cast<ConstantSDNode>(Arg)->getValue() != i) 1575 return false; 1576 } 1577 1578 // Upper quadword shuffled. 1579 for (unsigned i = 4; i != 8; ++i) { 1580 SDOperand Arg = N->getOperand(i); 1581 if (Arg.getOpcode() == ISD::UNDEF) continue; 1582 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1583 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1584 if (Val < 4 || Val > 7) 1585 return false; 1586 } 1587 1588 return true; 1589} 1590 1591/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 1592/// specifies a shuffle of elements that is suitable for input to PSHUFLW. 1593bool X86::isPSHUFLWMask(SDNode *N) { 1594 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1595 1596 if (N->getNumOperands() != 8) 1597 return false; 1598 1599 // Upper quadword copied in order. 1600 for (unsigned i = 4; i != 8; ++i) 1601 if (!isUndefOrEqual(N->getOperand(i), i)) 1602 return false; 1603 1604 // Lower quadword shuffled. 1605 for (unsigned i = 0; i != 4; ++i) 1606 if (!isUndefOrInRange(N->getOperand(i), 0, 4)) 1607 return false; 1608 1609 return true; 1610} 1611 1612/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 1613/// specifies a shuffle of elements that is suitable for input to SHUFP*. 1614static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) { 1615 if (NumElems != 2 && NumElems != 4) return false; 1616 1617 unsigned Half = NumElems / 2; 1618 for (unsigned i = 0; i < Half; ++i) 1619 if (!isUndefOrInRange(Elems[i], 0, NumElems)) 1620 return false; 1621 for (unsigned i = Half; i < NumElems; ++i) 1622 if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2)) 1623 return false; 1624 1625 return true; 1626} 1627 1628bool X86::isSHUFPMask(SDNode *N) { 1629 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1630 return ::isSHUFPMask(N->op_begin(), N->getNumOperands()); 1631} 1632 1633/// isCommutedSHUFP - Returns true if the shuffle mask is except 1634/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 1635/// half elements to come from vector 1 (which would equal the dest.) and 1636/// the upper half to come from vector 2. 1637static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) { 1638 if (NumOps != 2 && NumOps != 4) return false; 1639 1640 unsigned Half = NumOps / 2; 1641 for (unsigned i = 0; i < Half; ++i) 1642 if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2)) 1643 return false; 1644 for (unsigned i = Half; i < NumOps; ++i) 1645 if (!isUndefOrInRange(Ops[i], 0, NumOps)) 1646 return false; 1647 return true; 1648} 1649 1650static bool isCommutedSHUFP(SDNode *N) { 1651 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1652 return isCommutedSHUFP(N->op_begin(), N->getNumOperands()); 1653} 1654 1655/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 1656/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 1657bool X86::isMOVHLPSMask(SDNode *N) { 1658 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1659 1660 if (N->getNumOperands() != 4) 1661 return false; 1662 1663 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 1664 return isUndefOrEqual(N->getOperand(0), 6) && 1665 isUndefOrEqual(N->getOperand(1), 7) && 1666 isUndefOrEqual(N->getOperand(2), 2) && 1667 isUndefOrEqual(N->getOperand(3), 3); 1668} 1669 1670/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 1671/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 1672/// <2, 3, 2, 3> 1673bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) { 1674 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1675 1676 if (N->getNumOperands() != 4) 1677 return false; 1678 1679 // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3 1680 return isUndefOrEqual(N->getOperand(0), 2) && 1681 isUndefOrEqual(N->getOperand(1), 3) && 1682 isUndefOrEqual(N->getOperand(2), 2) && 1683 isUndefOrEqual(N->getOperand(3), 3); 1684} 1685 1686/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 1687/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 1688bool X86::isMOVLPMask(SDNode *N) { 1689 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1690 1691 unsigned NumElems = N->getNumOperands(); 1692 if (NumElems != 2 && NumElems != 4) 1693 return false; 1694 1695 for (unsigned i = 0; i < NumElems/2; ++i) 1696 if (!isUndefOrEqual(N->getOperand(i), i + NumElems)) 1697 return false; 1698 1699 for (unsigned i = NumElems/2; i < NumElems; ++i) 1700 if (!isUndefOrEqual(N->getOperand(i), i)) 1701 return false; 1702 1703 return true; 1704} 1705 1706/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 1707/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D} 1708/// and MOVLHPS. 1709bool X86::isMOVHPMask(SDNode *N) { 1710 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1711 1712 unsigned NumElems = N->getNumOperands(); 1713 if (NumElems != 2 && NumElems != 4) 1714 return false; 1715 1716 for (unsigned i = 0; i < NumElems/2; ++i) 1717 if (!isUndefOrEqual(N->getOperand(i), i)) 1718 return false; 1719 1720 for (unsigned i = 0; i < NumElems/2; ++i) { 1721 SDOperand Arg = N->getOperand(i + NumElems/2); 1722 if (!isUndefOrEqual(Arg, i + NumElems)) 1723 return false; 1724 } 1725 1726 return true; 1727} 1728 1729/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 1730/// specifies a shuffle of elements that is suitable for input to UNPCKL. 1731bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts, 1732 bool V2IsSplat = false) { 1733 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 1734 return false; 1735 1736 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 1737 SDOperand BitI = Elts[i]; 1738 SDOperand BitI1 = Elts[i+1]; 1739 if (!isUndefOrEqual(BitI, j)) 1740 return false; 1741 if (V2IsSplat) { 1742 if (isUndefOrEqual(BitI1, NumElts)) 1743 return false; 1744 } else { 1745 if (!isUndefOrEqual(BitI1, j + NumElts)) 1746 return false; 1747 } 1748 } 1749 1750 return true; 1751} 1752 1753bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) { 1754 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1755 return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 1756} 1757 1758/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 1759/// specifies a shuffle of elements that is suitable for input to UNPCKH. 1760bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts, 1761 bool V2IsSplat = false) { 1762 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 1763 return false; 1764 1765 for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { 1766 SDOperand BitI = Elts[i]; 1767 SDOperand BitI1 = Elts[i+1]; 1768 if (!isUndefOrEqual(BitI, j + NumElts/2)) 1769 return false; 1770 if (V2IsSplat) { 1771 if (isUndefOrEqual(BitI1, NumElts)) 1772 return false; 1773 } else { 1774 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 1775 return false; 1776 } 1777 } 1778 1779 return true; 1780} 1781 1782bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) { 1783 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1784 return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat); 1785} 1786 1787/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 1788/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 1789/// <0, 0, 1, 1> 1790bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { 1791 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1792 1793 unsigned NumElems = N->getNumOperands(); 1794 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 1795 return false; 1796 1797 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { 1798 SDOperand BitI = N->getOperand(i); 1799 SDOperand BitI1 = N->getOperand(i+1); 1800 1801 if (!isUndefOrEqual(BitI, j)) 1802 return false; 1803 if (!isUndefOrEqual(BitI1, j)) 1804 return false; 1805 } 1806 1807 return true; 1808} 1809 1810/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 1811/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 1812/// <2, 2, 3, 3> 1813bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { 1814 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1815 1816 unsigned NumElems = N->getNumOperands(); 1817 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 1818 return false; 1819 1820 for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 1821 SDOperand BitI = N->getOperand(i); 1822 SDOperand BitI1 = N->getOperand(i + 1); 1823 1824 if (!isUndefOrEqual(BitI, j)) 1825 return false; 1826 if (!isUndefOrEqual(BitI1, j)) 1827 return false; 1828 } 1829 1830 return true; 1831} 1832 1833/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 1834/// specifies a shuffle of elements that is suitable for input to MOVSS, 1835/// MOVSD, and MOVD, i.e. setting the lowest element. 1836static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) { 1837 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 1838 return false; 1839 1840 if (!isUndefOrEqual(Elts[0], NumElts)) 1841 return false; 1842 1843 for (unsigned i = 1; i < NumElts; ++i) { 1844 if (!isUndefOrEqual(Elts[i], i)) 1845 return false; 1846 } 1847 1848 return true; 1849} 1850 1851bool X86::isMOVLMask(SDNode *N) { 1852 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1853 return ::isMOVLMask(N->op_begin(), N->getNumOperands()); 1854} 1855 1856/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 1857/// of what x86 movss want. X86 movs requires the lowest element to be lowest 1858/// element of vector 2 and the other elements to come from vector 1 in order. 1859static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps, 1860 bool V2IsSplat = false, 1861 bool V2IsUndef = false) { 1862 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 1863 return false; 1864 1865 if (!isUndefOrEqual(Ops[0], 0)) 1866 return false; 1867 1868 for (unsigned i = 1; i < NumOps; ++i) { 1869 SDOperand Arg = Ops[i]; 1870 if (!(isUndefOrEqual(Arg, i+NumOps) || 1871 (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || 1872 (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) 1873 return false; 1874 } 1875 1876 return true; 1877} 1878 1879static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false, 1880 bool V2IsUndef = false) { 1881 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1882 return isCommutedMOVL(N->op_begin(), N->getNumOperands(), 1883 V2IsSplat, V2IsUndef); 1884} 1885 1886/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 1887/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 1888bool X86::isMOVSHDUPMask(SDNode *N) { 1889 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1890 1891 if (N->getNumOperands() != 4) 1892 return false; 1893 1894 // Expect 1, 1, 3, 3 1895 for (unsigned i = 0; i < 2; ++i) { 1896 SDOperand Arg = N->getOperand(i); 1897 if (Arg.getOpcode() == ISD::UNDEF) continue; 1898 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1899 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1900 if (Val != 1) return false; 1901 } 1902 1903 bool HasHi = false; 1904 for (unsigned i = 2; i < 4; ++i) { 1905 SDOperand Arg = N->getOperand(i); 1906 if (Arg.getOpcode() == ISD::UNDEF) continue; 1907 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1908 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1909 if (Val != 3) return false; 1910 HasHi = true; 1911 } 1912 1913 // Don't use movshdup if it can be done with a shufps. 1914 return HasHi; 1915} 1916 1917/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 1918/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 1919bool X86::isMOVSLDUPMask(SDNode *N) { 1920 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1921 1922 if (N->getNumOperands() != 4) 1923 return false; 1924 1925 // Expect 0, 0, 2, 2 1926 for (unsigned i = 0; i < 2; ++i) { 1927 SDOperand Arg = N->getOperand(i); 1928 if (Arg.getOpcode() == ISD::UNDEF) continue; 1929 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1930 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1931 if (Val != 0) return false; 1932 } 1933 1934 bool HasHi = false; 1935 for (unsigned i = 2; i < 4; ++i) { 1936 SDOperand Arg = N->getOperand(i); 1937 if (Arg.getOpcode() == ISD::UNDEF) continue; 1938 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1939 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 1940 if (Val != 2) return false; 1941 HasHi = true; 1942 } 1943 1944 // Don't use movshdup if it can be done with a shufps. 1945 return HasHi; 1946} 1947 1948/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 1949/// a splat of a single element. 1950static bool isSplatMask(SDNode *N) { 1951 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1952 1953 // This is a splat operation if each element of the permute is the same, and 1954 // if the value doesn't reference the second vector. 1955 unsigned NumElems = N->getNumOperands(); 1956 SDOperand ElementBase; 1957 unsigned i = 0; 1958 for (; i != NumElems; ++i) { 1959 SDOperand Elt = N->getOperand(i); 1960 if (isa<ConstantSDNode>(Elt)) { 1961 ElementBase = Elt; 1962 break; 1963 } 1964 } 1965 1966 if (!ElementBase.Val) 1967 return false; 1968 1969 for (; i != NumElems; ++i) { 1970 SDOperand Arg = N->getOperand(i); 1971 if (Arg.getOpcode() == ISD::UNDEF) continue; 1972 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 1973 if (Arg != ElementBase) return false; 1974 } 1975 1976 // Make sure it is a splat of the first vector operand. 1977 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems; 1978} 1979 1980/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies 1981/// a splat of a single element and it's a 2 or 4 element mask. 1982bool X86::isSplatMask(SDNode *N) { 1983 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1984 1985 // We can only splat 64-bit, and 32-bit quantities with a single instruction. 1986 if (N->getNumOperands() != 4 && N->getNumOperands() != 2) 1987 return false; 1988 return ::isSplatMask(N); 1989} 1990 1991/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand 1992/// specifies a splat of zero element. 1993bool X86::isSplatLoMask(SDNode *N) { 1994 assert(N->getOpcode() == ISD::BUILD_VECTOR); 1995 1996 for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i) 1997 if (!isUndefOrEqual(N->getOperand(i), 0)) 1998 return false; 1999 return true; 2000} 2001 2002/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 2003/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 2004/// instructions. 2005unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 2006 unsigned NumOperands = N->getNumOperands(); 2007 unsigned Shift = (NumOperands == 4) ? 2 : 1; 2008 unsigned Mask = 0; 2009 for (unsigned i = 0; i < NumOperands; ++i) { 2010 unsigned Val = 0; 2011 SDOperand Arg = N->getOperand(NumOperands-i-1); 2012 if (Arg.getOpcode() != ISD::UNDEF) 2013 Val = cast<ConstantSDNode>(Arg)->getValue(); 2014 if (Val >= NumOperands) Val -= NumOperands; 2015 Mask |= Val; 2016 if (i != NumOperands - 1) 2017 Mask <<= Shift; 2018 } 2019 2020 return Mask; 2021} 2022 2023/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 2024/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW 2025/// instructions. 2026unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 2027 unsigned Mask = 0; 2028 // 8 nodes, but we only care about the last 4. 2029 for (unsigned i = 7; i >= 4; --i) { 2030 unsigned Val = 0; 2031 SDOperand Arg = N->getOperand(i); 2032 if (Arg.getOpcode() != ISD::UNDEF) 2033 Val = cast<ConstantSDNode>(Arg)->getValue(); 2034 Mask |= (Val - 4); 2035 if (i != 4) 2036 Mask <<= 2; 2037 } 2038 2039 return Mask; 2040} 2041 2042/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 2043/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW 2044/// instructions. 2045unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 2046 unsigned Mask = 0; 2047 // 8 nodes, but we only care about the first 4. 2048 for (int i = 3; i >= 0; --i) { 2049 unsigned Val = 0; 2050 SDOperand Arg = N->getOperand(i); 2051 if (Arg.getOpcode() != ISD::UNDEF) 2052 Val = cast<ConstantSDNode>(Arg)->getValue(); 2053 Mask |= Val; 2054 if (i != 0) 2055 Mask <<= 2; 2056 } 2057 2058 return Mask; 2059} 2060 2061/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand 2062/// specifies a 8 element shuffle that can be broken into a pair of 2063/// PSHUFHW and PSHUFLW. 2064static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { 2065 assert(N->getOpcode() == ISD::BUILD_VECTOR); 2066 2067 if (N->getNumOperands() != 8) 2068 return false; 2069 2070 // Lower quadword shuffled. 2071 for (unsigned i = 0; i != 4; ++i) { 2072 SDOperand Arg = N->getOperand(i); 2073 if (Arg.getOpcode() == ISD::UNDEF) continue; 2074 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2075 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2076 if (Val > 4) 2077 return false; 2078 } 2079 2080 // Upper quadword shuffled. 2081 for (unsigned i = 4; i != 8; ++i) { 2082 SDOperand Arg = N->getOperand(i); 2083 if (Arg.getOpcode() == ISD::UNDEF) continue; 2084 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2085 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2086 if (Val < 4 || Val > 7) 2087 return false; 2088 } 2089 2090 return true; 2091} 2092 2093/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as 2094/// values in ther permute mask. 2095static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, 2096 SDOperand &V2, SDOperand &Mask, 2097 SelectionDAG &DAG) { 2098 MVT::ValueType VT = Op.getValueType(); 2099 MVT::ValueType MaskVT = Mask.getValueType(); 2100 MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT); 2101 unsigned NumElems = Mask.getNumOperands(); 2102 SmallVector<SDOperand, 8> MaskVec; 2103 2104 for (unsigned i = 0; i != NumElems; ++i) { 2105 SDOperand Arg = Mask.getOperand(i); 2106 if (Arg.getOpcode() == ISD::UNDEF) { 2107 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); 2108 continue; 2109 } 2110 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); 2111 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2112 if (Val < NumElems) 2113 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT)); 2114 else 2115 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT)); 2116 } 2117 2118 std::swap(V1, V2); 2119 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2120 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2121} 2122 2123/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 2124/// match movhlps. The lower half elements should come from upper half of 2125/// V1 (and in order), and the upper half elements should come from the upper 2126/// half of V2 (and in order). 2127static bool ShouldXformToMOVHLPS(SDNode *Mask) { 2128 unsigned NumElems = Mask->getNumOperands(); 2129 if (NumElems != 4) 2130 return false; 2131 for (unsigned i = 0, e = 2; i != e; ++i) 2132 if (!isUndefOrEqual(Mask->getOperand(i), i+2)) 2133 return false; 2134 for (unsigned i = 2; i != 4; ++i) 2135 if (!isUndefOrEqual(Mask->getOperand(i), i+4)) 2136 return false; 2137 return true; 2138} 2139 2140/// isScalarLoadToVector - Returns true if the node is a scalar load that 2141/// is promoted to a vector. 2142static inline bool isScalarLoadToVector(SDNode *N) { 2143 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) { 2144 N = N->getOperand(0).Val; 2145 return ISD::isNON_EXTLoad(N); 2146 } 2147 return false; 2148} 2149 2150/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 2151/// match movlp{s|d}. The lower half elements should come from lower half of 2152/// V1 (and in order), and the upper half elements should come from the upper 2153/// half of V2 (and in order). And since V1 will become the source of the 2154/// MOVLP, it must be either a vector load or a scalar load to vector. 2155static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) { 2156 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 2157 return false; 2158 // Is V2 is a vector load, don't do this transformation. We will try to use 2159 // load folding shufps op. 2160 if (ISD::isNON_EXTLoad(V2)) 2161 return false; 2162 2163 unsigned NumElems = Mask->getNumOperands(); 2164 if (NumElems != 2 && NumElems != 4) 2165 return false; 2166 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 2167 if (!isUndefOrEqual(Mask->getOperand(i), i)) 2168 return false; 2169 for (unsigned i = NumElems/2; i != NumElems; ++i) 2170 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems)) 2171 return false; 2172 return true; 2173} 2174 2175/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 2176/// all the same. 2177static bool isSplatVector(SDNode *N) { 2178 if (N->getOpcode() != ISD::BUILD_VECTOR) 2179 return false; 2180 2181 SDOperand SplatValue = N->getOperand(0); 2182 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 2183 if (N->getOperand(i) != SplatValue) 2184 return false; 2185 return true; 2186} 2187 2188/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 2189/// to an undef. 2190static bool isUndefShuffle(SDNode *N) { 2191 if (N->getOpcode() != ISD::BUILD_VECTOR) 2192 return false; 2193 2194 SDOperand V1 = N->getOperand(0); 2195 SDOperand V2 = N->getOperand(1); 2196 SDOperand Mask = N->getOperand(2); 2197 unsigned NumElems = Mask.getNumOperands(); 2198 for (unsigned i = 0; i != NumElems; ++i) { 2199 SDOperand Arg = Mask.getOperand(i); 2200 if (Arg.getOpcode() != ISD::UNDEF) { 2201 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2202 if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) 2203 return false; 2204 else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF) 2205 return false; 2206 } 2207 } 2208 return true; 2209} 2210 2211/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 2212/// that point to V2 points to its first element. 2213static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { 2214 assert(Mask.getOpcode() == ISD::BUILD_VECTOR); 2215 2216 bool Changed = false; 2217 SmallVector<SDOperand, 8> MaskVec; 2218 unsigned NumElems = Mask.getNumOperands(); 2219 for (unsigned i = 0; i != NumElems; ++i) { 2220 SDOperand Arg = Mask.getOperand(i); 2221 if (Arg.getOpcode() != ISD::UNDEF) { 2222 unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); 2223 if (Val > NumElems) { 2224 Arg = DAG.getConstant(NumElems, Arg.getValueType()); 2225 Changed = true; 2226 } 2227 } 2228 MaskVec.push_back(Arg); 2229 } 2230 2231 if (Changed) 2232 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), 2233 &MaskVec[0], MaskVec.size()); 2234 return Mask; 2235} 2236 2237/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 2238/// operation of specified width. 2239static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { 2240 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2241 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2242 2243 SmallVector<SDOperand, 8> MaskVec; 2244 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); 2245 for (unsigned i = 1; i != NumElems; ++i) 2246 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2247 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2248} 2249 2250/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation 2251/// of specified width. 2252static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { 2253 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2254 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2255 SmallVector<SDOperand, 8> MaskVec; 2256 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 2257 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2258 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); 2259 } 2260 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2261} 2262 2263/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation 2264/// of specified width. 2265static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { 2266 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2267 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2268 unsigned Half = NumElems/2; 2269 SmallVector<SDOperand, 8> MaskVec; 2270 for (unsigned i = 0; i != Half; ++i) { 2271 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); 2272 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); 2273 } 2274 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); 2275} 2276 2277/// getZeroVector - Returns a vector of specified type with all zero elements. 2278/// 2279static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) { 2280 assert(MVT::isVector(VT) && "Expected a vector type"); 2281 unsigned NumElems = getVectorNumElements(VT); 2282 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 2283 bool isFP = MVT::isFloatingPoint(EVT); 2284 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT); 2285 SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero); 2286 return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size()); 2287} 2288 2289/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32. 2290/// 2291static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) { 2292 SDOperand V1 = Op.getOperand(0); 2293 SDOperand Mask = Op.getOperand(2); 2294 MVT::ValueType VT = Op.getValueType(); 2295 unsigned NumElems = Mask.getNumOperands(); 2296 Mask = getUnpacklMask(NumElems, DAG); 2297 while (NumElems != 4) { 2298 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask); 2299 NumElems >>= 1; 2300 } 2301 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1); 2302 2303 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 2304 Mask = getZeroVector(MaskVT, DAG); 2305 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1, 2306 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask); 2307 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); 2308} 2309 2310/// isZeroNode - Returns true if Elt is a constant zero or a floating point 2311/// constant +0.0. 2312static inline bool isZeroNode(SDOperand Elt) { 2313 return ((isa<ConstantSDNode>(Elt) && 2314 cast<ConstantSDNode>(Elt)->getValue() == 0) || 2315 (isa<ConstantFPSDNode>(Elt) && 2316 cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0))); 2317} 2318 2319/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 2320/// vector and zero or undef vector. 2321static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT, 2322 unsigned NumElems, unsigned Idx, 2323 bool isZero, SelectionDAG &DAG) { 2324 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT); 2325 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2326 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 2327 SDOperand Zero = DAG.getConstant(0, EVT); 2328 SmallVector<SDOperand, 8> MaskVec(NumElems, Zero); 2329 MaskVec[Idx] = DAG.getConstant(NumElems, EVT); 2330 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2331 &MaskVec[0], MaskVec.size()); 2332 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2333} 2334 2335/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 2336/// 2337static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, 2338 unsigned NumNonZero, unsigned NumZero, 2339 SelectionDAG &DAG, TargetLowering &TLI) { 2340 if (NumNonZero > 8) 2341 return SDOperand(); 2342 2343 SDOperand V(0, 0); 2344 bool First = true; 2345 for (unsigned i = 0; i < 16; ++i) { 2346 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 2347 if (ThisIsNonZero && First) { 2348 if (NumZero) 2349 V = getZeroVector(MVT::v8i16, DAG); 2350 else 2351 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2352 First = false; 2353 } 2354 2355 if ((i & 1) != 0) { 2356 SDOperand ThisElt(0, 0), LastElt(0, 0); 2357 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 2358 if (LastIsNonZero) { 2359 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); 2360 } 2361 if (ThisIsNonZero) { 2362 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i)); 2363 ThisElt = DAG.getNode(ISD::SHL, MVT::i16, 2364 ThisElt, DAG.getConstant(8, MVT::i8)); 2365 if (LastIsNonZero) 2366 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt); 2367 } else 2368 ThisElt = LastElt; 2369 2370 if (ThisElt.Val) 2371 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt, 2372 DAG.getConstant(i/2, TLI.getPointerTy())); 2373 } 2374 } 2375 2376 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V); 2377} 2378 2379/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 2380/// 2381static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, 2382 unsigned NumNonZero, unsigned NumZero, 2383 SelectionDAG &DAG, TargetLowering &TLI) { 2384 if (NumNonZero > 4) 2385 return SDOperand(); 2386 2387 SDOperand V(0, 0); 2388 bool First = true; 2389 for (unsigned i = 0; i < 8; ++i) { 2390 bool isNonZero = (NonZeros & (1 << i)) != 0; 2391 if (isNonZero) { 2392 if (First) { 2393 if (NumZero) 2394 V = getZeroVector(MVT::v8i16, DAG); 2395 else 2396 V = DAG.getNode(ISD::UNDEF, MVT::v8i16); 2397 First = false; 2398 } 2399 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i), 2400 DAG.getConstant(i, TLI.getPointerTy())); 2401 } 2402 } 2403 2404 return V; 2405} 2406 2407SDOperand 2408X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2409 // All zero's are handled with pxor. 2410 if (ISD::isBuildVectorAllZeros(Op.Val)) 2411 return Op; 2412 2413 // All one's are handled with pcmpeqd. 2414 if (ISD::isBuildVectorAllOnes(Op.Val)) 2415 return Op; 2416 2417 MVT::ValueType VT = Op.getValueType(); 2418 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 2419 unsigned EVTBits = MVT::getSizeInBits(EVT); 2420 2421 unsigned NumElems = Op.getNumOperands(); 2422 unsigned NumZero = 0; 2423 unsigned NumNonZero = 0; 2424 unsigned NonZeros = 0; 2425 std::set<SDOperand> Values; 2426 for (unsigned i = 0; i < NumElems; ++i) { 2427 SDOperand Elt = Op.getOperand(i); 2428 if (Elt.getOpcode() != ISD::UNDEF) { 2429 Values.insert(Elt); 2430 if (isZeroNode(Elt)) 2431 NumZero++; 2432 else { 2433 NonZeros |= (1 << i); 2434 NumNonZero++; 2435 } 2436 } 2437 } 2438 2439 if (NumNonZero == 0) 2440 // Must be a mix of zero and undef. Return a zero vector. 2441 return getZeroVector(VT, DAG); 2442 2443 // Splat is obviously ok. Let legalizer expand it to a shuffle. 2444 if (Values.size() == 1) 2445 return SDOperand(); 2446 2447 // Special case for single non-zero element. 2448 if (NumNonZero == 1) { 2449 unsigned Idx = CountTrailingZeros_32(NonZeros); 2450 SDOperand Item = Op.getOperand(Idx); 2451 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item); 2452 if (Idx == 0) 2453 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 2454 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx, 2455 NumZero > 0, DAG); 2456 2457 if (EVTBits == 32) { 2458 // Turn it into a shuffle of zero and zero-extended scalar to vector. 2459 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0, 2460 DAG); 2461 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2462 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 2463 SmallVector<SDOperand, 8> MaskVec; 2464 for (unsigned i = 0; i < NumElems; i++) 2465 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); 2466 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2467 &MaskVec[0], MaskVec.size()); 2468 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, 2469 DAG.getNode(ISD::UNDEF, VT), Mask); 2470 } 2471 } 2472 2473 // Let legalizer expand 2-wide build_vectors. 2474 if (EVTBits == 64) 2475 return SDOperand(); 2476 2477 // If element VT is < 32 bits, convert it to inserts into a zero vector. 2478 if (EVTBits == 8 && NumElems == 16) { 2479 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 2480 *this); 2481 if (V.Val) return V; 2482 } 2483 2484 if (EVTBits == 16 && NumElems == 8) { 2485 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 2486 *this); 2487 if (V.Val) return V; 2488 } 2489 2490 // If element VT is == 32 bits, turn it into a number of shuffles. 2491 SmallVector<SDOperand, 8> V; 2492 V.resize(NumElems); 2493 if (NumElems == 4 && NumZero > 0) { 2494 for (unsigned i = 0; i < 4; ++i) { 2495 bool isZero = !(NonZeros & (1 << i)); 2496 if (isZero) 2497 V[i] = getZeroVector(VT, DAG); 2498 else 2499 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 2500 } 2501 2502 for (unsigned i = 0; i < 2; ++i) { 2503 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 2504 default: break; 2505 case 0: 2506 V[i] = V[i*2]; // Must be a zero vector. 2507 break; 2508 case 1: 2509 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2], 2510 getMOVLMask(NumElems, DAG)); 2511 break; 2512 case 2: 2513 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 2514 getMOVLMask(NumElems, DAG)); 2515 break; 2516 case 3: 2517 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1], 2518 getUnpacklMask(NumElems, DAG)); 2519 break; 2520 } 2521 } 2522 2523 // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd) 2524 // clears the upper bits. 2525 // FIXME: we can do the same for v4f32 case when we know both parts of 2526 // the lower half come from scalar_to_vector (loadf32). We should do 2527 // that in post legalizer dag combiner with target specific hooks. 2528 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0) 2529 return V[0]; 2530 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2531 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT); 2532 SmallVector<SDOperand, 8> MaskVec; 2533 bool Reverse = (NonZeros & 0x3) == 2; 2534 for (unsigned i = 0; i < 2; ++i) 2535 if (Reverse) 2536 MaskVec.push_back(DAG.getConstant(1-i, EVT)); 2537 else 2538 MaskVec.push_back(DAG.getConstant(i, EVT)); 2539 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 2540 for (unsigned i = 0; i < 2; ++i) 2541 if (Reverse) 2542 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); 2543 else 2544 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); 2545 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2546 &MaskVec[0], MaskVec.size()); 2547 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); 2548 } 2549 2550 if (Values.size() > 2) { 2551 // Expand into a number of unpckl*. 2552 // e.g. for v4f32 2553 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 2554 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 2555 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 2556 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); 2557 for (unsigned i = 0; i < NumElems; ++i) 2558 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); 2559 NumElems >>= 1; 2560 while (NumElems != 0) { 2561 for (unsigned i = 0; i < NumElems; ++i) 2562 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems], 2563 UnpckMask); 2564 NumElems >>= 1; 2565 } 2566 return V[0]; 2567 } 2568 2569 return SDOperand(); 2570} 2571 2572SDOperand 2573X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 2574 SDOperand V1 = Op.getOperand(0); 2575 SDOperand V2 = Op.getOperand(1); 2576 SDOperand PermMask = Op.getOperand(2); 2577 MVT::ValueType VT = Op.getValueType(); 2578 unsigned NumElems = PermMask.getNumOperands(); 2579 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 2580 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 2581 bool V1IsSplat = false; 2582 bool V2IsSplat = false; 2583 2584 if (isUndefShuffle(Op.Val)) 2585 return DAG.getNode(ISD::UNDEF, VT); 2586 2587 if (isSplatMask(PermMask.Val)) { 2588 if (NumElems <= 4) return Op; 2589 // Promote it to a v4i32 splat. 2590 return PromoteSplat(Op, DAG); 2591 } 2592 2593 if (X86::isMOVLMask(PermMask.Val)) 2594 return (V1IsUndef) ? V2 : Op; 2595 2596 if (X86::isMOVSHDUPMask(PermMask.Val) || 2597 X86::isMOVSLDUPMask(PermMask.Val) || 2598 X86::isMOVHLPSMask(PermMask.Val) || 2599 X86::isMOVHPMask(PermMask.Val) || 2600 X86::isMOVLPMask(PermMask.Val)) 2601 return Op; 2602 2603 if (ShouldXformToMOVHLPS(PermMask.Val) || 2604 ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val)) 2605 return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2606 2607 bool Commuted = false; 2608 V1IsSplat = isSplatVector(V1.Val); 2609 V2IsSplat = isSplatVector(V2.Val); 2610 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 2611 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2612 std::swap(V1IsSplat, V2IsSplat); 2613 std::swap(V1IsUndef, V2IsUndef); 2614 Commuted = true; 2615 } 2616 2617 if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) { 2618 if (V2IsUndef) return V1; 2619 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2620 if (V2IsSplat) { 2621 // V2 is a splat, so the mask may be malformed. That is, it may point 2622 // to any V2 element. The instruction selectior won't like this. Get 2623 // a corrected mask and commute to form a proper MOVS{S|D}. 2624 SDOperand NewMask = getMOVLMask(NumElems, DAG); 2625 if (NewMask.Val != PermMask.Val) 2626 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 2627 } 2628 return Op; 2629 } 2630 2631 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 2632 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 2633 X86::isUNPCKLMask(PermMask.Val) || 2634 X86::isUNPCKHMask(PermMask.Val)) 2635 return Op; 2636 2637 if (V2IsSplat) { 2638 // Normalize mask so all entries that point to V2 points to its first 2639 // element then try to match unpck{h|l} again. If match, return a 2640 // new vector_shuffle with the corrected mask. 2641 SDOperand NewMask = NormalizeMask(PermMask, DAG); 2642 if (NewMask.Val != PermMask.Val) { 2643 if (X86::isUNPCKLMask(PermMask.Val, true)) { 2644 SDOperand NewMask = getUnpacklMask(NumElems, DAG); 2645 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 2646 } else if (X86::isUNPCKHMask(PermMask.Val, true)) { 2647 SDOperand NewMask = getUnpackhMask(NumElems, DAG); 2648 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); 2649 } 2650 } 2651 } 2652 2653 // Normalize the node to match x86 shuffle ops if needed 2654 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val)) 2655 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2656 2657 if (Commuted) { 2658 // Commute is back and try unpck* again. 2659 Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG); 2660 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) || 2661 X86::isUNPCKH_v_undef_Mask(PermMask.Val) || 2662 X86::isUNPCKLMask(PermMask.Val) || 2663 X86::isUNPCKHMask(PermMask.Val)) 2664 return Op; 2665 } 2666 2667 // If VT is integer, try PSHUF* first, then SHUFP*. 2668 if (MVT::isInteger(VT)) { 2669 if (X86::isPSHUFDMask(PermMask.Val) || 2670 X86::isPSHUFHWMask(PermMask.Val) || 2671 X86::isPSHUFLWMask(PermMask.Val)) { 2672 if (V2.getOpcode() != ISD::UNDEF) 2673 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 2674 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 2675 return Op; 2676 } 2677 2678 if (X86::isSHUFPMask(PermMask.Val) && 2679 MVT::getSizeInBits(VT) != 64) // Don't do this for MMX. 2680 return Op; 2681 2682 // Handle v8i16 shuffle high / low shuffle node pair. 2683 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) { 2684 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems); 2685 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2686 SmallVector<SDOperand, 8> MaskVec; 2687 for (unsigned i = 0; i != 4; ++i) 2688 MaskVec.push_back(PermMask.getOperand(i)); 2689 for (unsigned i = 4; i != 8; ++i) 2690 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2691 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2692 &MaskVec[0], MaskVec.size()); 2693 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2694 MaskVec.clear(); 2695 for (unsigned i = 0; i != 4; ++i) 2696 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2697 for (unsigned i = 4; i != 8; ++i) 2698 MaskVec.push_back(PermMask.getOperand(i)); 2699 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size()); 2700 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); 2701 } 2702 } else { 2703 // Floating point cases in the other order. 2704 if (X86::isSHUFPMask(PermMask.Val)) 2705 return Op; 2706 if (X86::isPSHUFDMask(PermMask.Val) || 2707 X86::isPSHUFHWMask(PermMask.Val) || 2708 X86::isPSHUFLWMask(PermMask.Val)) { 2709 if (V2.getOpcode() != ISD::UNDEF) 2710 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, 2711 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask); 2712 return Op; 2713 } 2714 } 2715 2716 if (NumElems == 4 && 2717 // Don't do this for MMX. 2718 MVT::getSizeInBits(VT) != 64) { 2719 MVT::ValueType MaskVT = PermMask.getValueType(); 2720 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT); 2721 SmallVector<std::pair<int, int>, 8> Locs; 2722 Locs.reserve(NumElems); 2723 SmallVector<SDOperand, 8> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2724 SmallVector<SDOperand, 8> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2725 unsigned NumHi = 0; 2726 unsigned NumLo = 0; 2727 // If no more than two elements come from either vector. This can be 2728 // implemented with two shuffles. First shuffle gather the elements. 2729 // The second shuffle, which takes the first shuffle as both of its 2730 // vector operands, put the elements into the right order. 2731 for (unsigned i = 0; i != NumElems; ++i) { 2732 SDOperand Elt = PermMask.getOperand(i); 2733 if (Elt.getOpcode() == ISD::UNDEF) { 2734 Locs[i] = std::make_pair(-1, -1); 2735 } else { 2736 unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); 2737 if (Val < NumElems) { 2738 Locs[i] = std::make_pair(0, NumLo); 2739 Mask1[NumLo] = Elt; 2740 NumLo++; 2741 } else { 2742 Locs[i] = std::make_pair(1, NumHi); 2743 if (2+NumHi < NumElems) 2744 Mask1[2+NumHi] = Elt; 2745 NumHi++; 2746 } 2747 } 2748 } 2749 if (NumLo <= 2 && NumHi <= 2) { 2750 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 2751 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2752 &Mask1[0], Mask1.size())); 2753 for (unsigned i = 0; i != NumElems; ++i) { 2754 if (Locs[i].first == -1) 2755 continue; 2756 else { 2757 unsigned Idx = (i < NumElems/2) ? 0 : NumElems; 2758 Idx += Locs[i].first * (NumElems/2) + Locs[i].second; 2759 Mask2[i] = DAG.getConstant(Idx, MaskEVT); 2760 } 2761 } 2762 2763 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, 2764 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2765 &Mask2[0], Mask2.size())); 2766 } 2767 2768 // Break it into (shuffle shuffle_hi, shuffle_lo). 2769 Locs.clear(); 2770 SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2771 SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT)); 2772 SmallVector<SDOperand,8> *MaskPtr = &LoMask; 2773 unsigned MaskIdx = 0; 2774 unsigned LoIdx = 0; 2775 unsigned HiIdx = NumElems/2; 2776 for (unsigned i = 0; i != NumElems; ++i) { 2777 if (i == NumElems/2) { 2778 MaskPtr = &HiMask; 2779 MaskIdx = 1; 2780 LoIdx = 0; 2781 HiIdx = NumElems/2; 2782 } 2783 SDOperand Elt = PermMask.getOperand(i); 2784 if (Elt.getOpcode() == ISD::UNDEF) { 2785 Locs[i] = std::make_pair(-1, -1); 2786 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) { 2787 Locs[i] = std::make_pair(MaskIdx, LoIdx); 2788 (*MaskPtr)[LoIdx] = Elt; 2789 LoIdx++; 2790 } else { 2791 Locs[i] = std::make_pair(MaskIdx, HiIdx); 2792 (*MaskPtr)[HiIdx] = Elt; 2793 HiIdx++; 2794 } 2795 } 2796 2797 SDOperand LoShuffle = 2798 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 2799 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2800 &LoMask[0], LoMask.size())); 2801 SDOperand HiShuffle = 2802 DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, 2803 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2804 &HiMask[0], HiMask.size())); 2805 SmallVector<SDOperand, 8> MaskOps; 2806 for (unsigned i = 0; i != NumElems; ++i) { 2807 if (Locs[i].first == -1) { 2808 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); 2809 } else { 2810 unsigned Idx = Locs[i].first * NumElems + Locs[i].second; 2811 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT)); 2812 } 2813 } 2814 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle, 2815 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2816 &MaskOps[0], MaskOps.size())); 2817 } 2818 2819 return SDOperand(); 2820} 2821 2822SDOperand 2823X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 2824 if (!isa<ConstantSDNode>(Op.getOperand(1))) 2825 return SDOperand(); 2826 2827 MVT::ValueType VT = Op.getValueType(); 2828 // TODO: handle v16i8. 2829 if (MVT::getSizeInBits(VT) == 16) { 2830 // Transform it so it match pextrw which produces a 32-bit result. 2831 MVT::ValueType EVT = (MVT::ValueType)(VT+1); 2832 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, 2833 Op.getOperand(0), Op.getOperand(1)); 2834 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, 2835 DAG.getValueType(VT)); 2836 return DAG.getNode(ISD::TRUNCATE, VT, Assert); 2837 } else if (MVT::getSizeInBits(VT) == 32) { 2838 SDOperand Vec = Op.getOperand(0); 2839 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 2840 if (Idx == 0) 2841 return Op; 2842 // SHUFPS the element to the lowest double word, then movss. 2843 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 2844 SmallVector<SDOperand, 8> IdxVec; 2845 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT))); 2846 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 2847 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 2848 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 2849 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2850 &IdxVec[0], IdxVec.size()); 2851 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 2852 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 2853 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 2854 DAG.getConstant(0, getPointerTy())); 2855 } else if (MVT::getSizeInBits(VT) == 64) { 2856 SDOperand Vec = Op.getOperand(0); 2857 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 2858 if (Idx == 0) 2859 return Op; 2860 2861 // UNPCKHPD the element to the lowest double word, then movsd. 2862 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 2863 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 2864 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 2865 SmallVector<SDOperand, 8> IdxVec; 2866 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT))); 2867 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT))); 2868 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2869 &IdxVec[0], IdxVec.size()); 2870 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), 2871 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); 2872 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, 2873 DAG.getConstant(0, getPointerTy())); 2874 } 2875 2876 return SDOperand(); 2877} 2878 2879SDOperand 2880X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { 2881 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 2882 // as its second argument. 2883 MVT::ValueType VT = Op.getValueType(); 2884 MVT::ValueType BaseVT = MVT::getVectorBaseType(VT); 2885 SDOperand N0 = Op.getOperand(0); 2886 SDOperand N1 = Op.getOperand(1); 2887 SDOperand N2 = Op.getOperand(2); 2888 if (MVT::getSizeInBits(BaseVT) == 16) { 2889 if (N1.getValueType() != MVT::i32) 2890 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1); 2891 if (N2.getValueType() != MVT::i32) 2892 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32); 2893 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); 2894 } else if (MVT::getSizeInBits(BaseVT) == 32) { 2895 unsigned Idx = cast<ConstantSDNode>(N2)->getValue(); 2896 if (Idx == 0) { 2897 // Use a movss. 2898 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1); 2899 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4); 2900 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT); 2901 SmallVector<SDOperand, 8> MaskVec; 2902 MaskVec.push_back(DAG.getConstant(4, BaseVT)); 2903 for (unsigned i = 1; i <= 3; ++i) 2904 MaskVec.push_back(DAG.getConstant(i, BaseVT)); 2905 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1, 2906 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, 2907 &MaskVec[0], MaskVec.size())); 2908 } else { 2909 // Use two pinsrw instructions to insert a 32 bit value. 2910 Idx <<= 1; 2911 if (MVT::isFloatingPoint(N1.getValueType())) { 2912 if (ISD::isNON_EXTLoad(N1.Val)) { 2913 // Just load directly from f32mem to GR32. 2914 LoadSDNode *LD = cast<LoadSDNode>(N1); 2915 N1 = DAG.getLoad(MVT::i32, LD->getChain(), LD->getBasePtr(), 2916 LD->getSrcValue(), LD->getSrcValueOffset()); 2917 } else { 2918 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1); 2919 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1); 2920 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1, 2921 DAG.getConstant(0, getPointerTy())); 2922 } 2923 } 2924 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0); 2925 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 2926 DAG.getConstant(Idx, getPointerTy())); 2927 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8)); 2928 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1, 2929 DAG.getConstant(Idx+1, getPointerTy())); 2930 return DAG.getNode(ISD::BIT_CONVERT, VT, N0); 2931 } 2932 } 2933 2934 return SDOperand(); 2935} 2936 2937SDOperand 2938X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2939 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); 2940 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt); 2941} 2942 2943// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2944// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 2945// one of the above mentioned nodes. It has to be wrapped because otherwise 2946// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2947// be used to form addressing mode. These wrapped nodes will be selected 2948// into MOV32ri. 2949SDOperand 2950X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 2951 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2952 SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), 2953 getPointerTy(), 2954 CP->getAlignment()); 2955 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 2956 // With PIC, the address is actually $g + Offset. 2957 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2958 !Subtarget->isPICStyleRIPRel()) { 2959 Result = DAG.getNode(ISD::ADD, getPointerTy(), 2960 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 2961 Result); 2962 } 2963 2964 return Result; 2965} 2966 2967SDOperand 2968X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 2969 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2970 SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); 2971 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 2972 // With PIC, the address is actually $g + Offset. 2973 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2974 !Subtarget->isPICStyleRIPRel()) { 2975 Result = DAG.getNode(ISD::ADD, getPointerTy(), 2976 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 2977 Result); 2978 } 2979 2980 // For Darwin & Mingw32, external and weak symbols are indirect, so we want to 2981 // load the value at address GV, not the value of GV itself. This means that 2982 // the GlobalAddress must be in the base or index register of the address, not 2983 // the GV offset field. Platform check is inside GVRequiresExtraLoad() call 2984 // The same applies for external symbols during PIC codegen 2985 if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false)) 2986 Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0); 2987 2988 return Result; 2989} 2990 2991// Lower ISD::GlobalTLSAddress using the "general dynamic" model 2992static SDOperand 2993LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 2994 const MVT::ValueType PtrVT) { 2995 SDOperand InFlag; 2996 SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, 2997 DAG.getNode(X86ISD::GlobalBaseReg, 2998 PtrVT), InFlag); 2999 InFlag = Chain.getValue(1); 3000 3001 // emit leal symbol@TLSGD(,%ebx,1), %eax 3002 SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); 3003 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3004 GA->getValueType(0), 3005 GA->getOffset()); 3006 SDOperand Ops[] = { Chain, TGA, InFlag }; 3007 SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); 3008 InFlag = Result.getValue(2); 3009 Chain = Result.getValue(1); 3010 3011 // call ___tls_get_addr. This function receives its argument in 3012 // the register EAX. 3013 Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag); 3014 InFlag = Chain.getValue(1); 3015 3016 NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 3017 SDOperand Ops1[] = { Chain, 3018 DAG.getTargetExternalSymbol("___tls_get_addr", 3019 PtrVT), 3020 DAG.getRegister(X86::EAX, PtrVT), 3021 DAG.getRegister(X86::EBX, PtrVT), 3022 InFlag }; 3023 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5); 3024 InFlag = Chain.getValue(1); 3025 3026 return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag); 3027} 3028 3029// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 3030// "local exec" model. 3031static SDOperand 3032LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 3033 const MVT::ValueType PtrVT) { 3034 // Get the Thread Pointer 3035 SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); 3036 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 3037 // exec) 3038 SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), 3039 GA->getValueType(0), 3040 GA->getOffset()); 3041 SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); 3042 3043 if (GA->getGlobal()->isDeclaration()) // initial exec TLS model 3044 Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0); 3045 3046 // The address of the thread local variable is the add of the thread 3047 // pointer with the offset of the variable. 3048 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); 3049} 3050 3051SDOperand 3052X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { 3053 // TODO: implement the "local dynamic" model 3054 // TODO: implement the "initial exec"model for pic executables 3055 assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() && 3056 "TLS not implemented for non-ELF and 64-bit targets"); 3057 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3058 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 3059 // otherwise use the "Local Exec"TLS Model 3060 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 3061 return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy()); 3062 else 3063 return LowerToTLSExecModel(GA, DAG, getPointerTy()); 3064} 3065 3066SDOperand 3067X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { 3068 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 3069 SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 3070 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3071 // With PIC, the address is actually $g + Offset. 3072 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3073 !Subtarget->isPICStyleRIPRel()) { 3074 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3075 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3076 Result); 3077 } 3078 3079 return Result; 3080} 3081 3082SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 3083 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 3084 SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); 3085 Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); 3086 // With PIC, the address is actually $g + Offset. 3087 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 3088 !Subtarget->isPICStyleRIPRel()) { 3089 Result = DAG.getNode(ISD::ADD, getPointerTy(), 3090 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), 3091 Result); 3092 } 3093 3094 return Result; 3095} 3096 3097SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { 3098 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 3099 "Not an i64 shift!"); 3100 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 3101 SDOperand ShOpLo = Op.getOperand(0); 3102 SDOperand ShOpHi = Op.getOperand(1); 3103 SDOperand ShAmt = Op.getOperand(2); 3104 SDOperand Tmp1 = isSRA ? 3105 DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) : 3106 DAG.getConstant(0, MVT::i32); 3107 3108 SDOperand Tmp2, Tmp3; 3109 if (Op.getOpcode() == ISD::SHL_PARTS) { 3110 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt); 3111 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt); 3112 } else { 3113 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt); 3114 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt); 3115 } 3116 3117 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3118 SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, 3119 DAG.getConstant(32, MVT::i8)); 3120 SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)}; 3121 SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1); 3122 3123 SDOperand Hi, Lo; 3124 SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3125 3126 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); 3127 SmallVector<SDOperand, 4> Ops; 3128 if (Op.getOpcode() == ISD::SHL_PARTS) { 3129 Ops.push_back(Tmp2); 3130 Ops.push_back(Tmp3); 3131 Ops.push_back(CC); 3132 Ops.push_back(InFlag); 3133 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3134 InFlag = Hi.getValue(1); 3135 3136 Ops.clear(); 3137 Ops.push_back(Tmp3); 3138 Ops.push_back(Tmp1); 3139 Ops.push_back(CC); 3140 Ops.push_back(InFlag); 3141 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3142 } else { 3143 Ops.push_back(Tmp2); 3144 Ops.push_back(Tmp3); 3145 Ops.push_back(CC); 3146 Ops.push_back(InFlag); 3147 Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3148 InFlag = Lo.getValue(1); 3149 3150 Ops.clear(); 3151 Ops.push_back(Tmp3); 3152 Ops.push_back(Tmp1); 3153 Ops.push_back(CC); 3154 Ops.push_back(InFlag); 3155 Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3156 } 3157 3158 VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); 3159 Ops.clear(); 3160 Ops.push_back(Lo); 3161 Ops.push_back(Hi); 3162 return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size()); 3163} 3164 3165SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 3166 assert(Op.getOperand(0).getValueType() <= MVT::i64 && 3167 Op.getOperand(0).getValueType() >= MVT::i16 && 3168 "Unknown SINT_TO_FP to lower!"); 3169 3170 SDOperand Result; 3171 MVT::ValueType SrcVT = Op.getOperand(0).getValueType(); 3172 unsigned Size = MVT::getSizeInBits(SrcVT)/8; 3173 MachineFunction &MF = DAG.getMachineFunction(); 3174 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); 3175 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3176 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), 3177 StackSlot, NULL, 0); 3178 3179 // Build the FILD 3180 SDVTList Tys; 3181 if (X86ScalarSSE) 3182 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); 3183 else 3184 Tys = DAG.getVTList(MVT::f64, MVT::Other); 3185 SmallVector<SDOperand, 8> Ops; 3186 Ops.push_back(Chain); 3187 Ops.push_back(StackSlot); 3188 Ops.push_back(DAG.getValueType(SrcVT)); 3189 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD, 3190 Tys, &Ops[0], Ops.size()); 3191 3192 if (X86ScalarSSE) { 3193 Chain = Result.getValue(1); 3194 SDOperand InFlag = Result.getValue(2); 3195 3196 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 3197 // shouldn't be necessary except that RFP cannot be live across 3198 // multiple blocks. When stackifier is fixed, they can be uncoupled. 3199 MachineFunction &MF = DAG.getMachineFunction(); 3200 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 3201 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3202 Tys = DAG.getVTList(MVT::Other); 3203 SmallVector<SDOperand, 8> Ops; 3204 Ops.push_back(Chain); 3205 Ops.push_back(Result); 3206 Ops.push_back(StackSlot); 3207 Ops.push_back(DAG.getValueType(Op.getValueType())); 3208 Ops.push_back(InFlag); 3209 Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size()); 3210 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0); 3211 } 3212 3213 return Result; 3214} 3215 3216SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 3217 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 && 3218 "Unknown FP_TO_SINT to lower!"); 3219 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 3220 // stack slot. 3221 MachineFunction &MF = DAG.getMachineFunction(); 3222 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8; 3223 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3224 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3225 3226 unsigned Opc; 3227 switch (Op.getValueType()) { 3228 default: assert(0 && "Invalid FP_TO_SINT to lower!"); 3229 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 3230 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 3231 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 3232 } 3233 3234 SDOperand Chain = DAG.getEntryNode(); 3235 SDOperand Value = Op.getOperand(0); 3236 if (X86ScalarSSE) { 3237 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 3238 Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0); 3239 SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other); 3240 SDOperand Ops[] = { 3241 Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) 3242 }; 3243 Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); 3244 Chain = Value.getValue(1); 3245 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); 3246 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 3247 } 3248 3249 // Build the FP_TO_INT*_IN_MEM 3250 SDOperand Ops[] = { Chain, Value, StackSlot }; 3251 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); 3252 3253 // Load the result. 3254 return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); 3255} 3256 3257SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { 3258 MVT::ValueType VT = Op.getValueType(); 3259 const Type *OpNTy = MVT::getTypeForValueType(VT); 3260 std::vector<Constant*> CV; 3261 if (VT == MVT::f64) { 3262 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)))); 3263 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3264 } else { 3265 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)))); 3266 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3267 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3268 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3269 } 3270 Constant *CS = ConstantStruct::get(CV); 3271 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3272 SDVTList Tys = DAG.getVTList(VT, MVT::Other); 3273 SmallVector<SDOperand, 3> Ops; 3274 Ops.push_back(DAG.getEntryNode()); 3275 Ops.push_back(CPIdx); 3276 Ops.push_back(DAG.getSrcValue(NULL)); 3277 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3278 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); 3279} 3280 3281SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { 3282 MVT::ValueType VT = Op.getValueType(); 3283 const Type *OpNTy = MVT::getTypeForValueType(VT); 3284 std::vector<Constant*> CV; 3285 if (VT == MVT::f64) { 3286 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63))); 3287 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3288 } else { 3289 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31))); 3290 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3291 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3292 CV.push_back(ConstantFP::get(OpNTy, 0.0)); 3293 } 3294 Constant *CS = ConstantStruct::get(CV); 3295 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3296 SDVTList Tys = DAG.getVTList(VT, MVT::Other); 3297 SmallVector<SDOperand, 3> Ops; 3298 Ops.push_back(DAG.getEntryNode()); 3299 Ops.push_back(CPIdx); 3300 Ops.push_back(DAG.getSrcValue(NULL)); 3301 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3302 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask); 3303} 3304 3305SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 3306 SDOperand Op0 = Op.getOperand(0); 3307 SDOperand Op1 = Op.getOperand(1); 3308 MVT::ValueType VT = Op.getValueType(); 3309 MVT::ValueType SrcVT = Op1.getValueType(); 3310 const Type *SrcTy = MVT::getTypeForValueType(SrcVT); 3311 3312 // If second operand is smaller, extend it first. 3313 if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) { 3314 Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1); 3315 SrcVT = VT; 3316 } 3317 3318 // First get the sign bit of second operand. 3319 std::vector<Constant*> CV; 3320 if (SrcVT == MVT::f64) { 3321 CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(1ULL << 63))); 3322 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3323 } else { 3324 CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(1U << 31))); 3325 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3326 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3327 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3328 } 3329 Constant *CS = ConstantStruct::get(CV); 3330 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3331 SDVTList Tys = DAG.getVTList(SrcVT, MVT::Other); 3332 SmallVector<SDOperand, 3> Ops; 3333 Ops.push_back(DAG.getEntryNode()); 3334 Ops.push_back(CPIdx); 3335 Ops.push_back(DAG.getSrcValue(NULL)); 3336 SDOperand Mask1 = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3337 SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); 3338 3339 // Shift sign bit right or left if the two operands have different types. 3340 if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) { 3341 // Op0 is MVT::f32, Op1 is MVT::f64. 3342 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit); 3343 SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit, 3344 DAG.getConstant(32, MVT::i32)); 3345 SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit); 3346 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit, 3347 DAG.getConstant(0, getPointerTy())); 3348 } 3349 3350 // Clear first operand sign bit. 3351 CV.clear(); 3352 if (VT == MVT::f64) { 3353 CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(~(1ULL << 63)))); 3354 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3355 } else { 3356 CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(~(1U << 31)))); 3357 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3358 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3359 CV.push_back(ConstantFP::get(SrcTy, 0.0)); 3360 } 3361 CS = ConstantStruct::get(CV); 3362 CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4); 3363 Tys = DAG.getVTList(VT, MVT::Other); 3364 Ops.clear(); 3365 Ops.push_back(DAG.getEntryNode()); 3366 Ops.push_back(CPIdx); 3367 Ops.push_back(DAG.getSrcValue(NULL)); 3368 SDOperand Mask2 = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size()); 3369 SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); 3370 3371 // Or the value with the sign bit. 3372 return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); 3373} 3374 3375SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG, 3376 SDOperand Chain) { 3377 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 3378 SDOperand Cond; 3379 SDOperand Op0 = Op.getOperand(0); 3380 SDOperand Op1 = Op.getOperand(1); 3381 SDOperand CC = Op.getOperand(2); 3382 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3383 const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3384 const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 3385 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); 3386 unsigned X86CC; 3387 3388 if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, 3389 Op0, Op1, DAG)) { 3390 SDOperand Ops1[] = { Chain, Op0, Op1 }; 3391 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1); 3392 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 3393 return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3394 } 3395 3396 assert(isFP && "Illegal integer SetCC!"); 3397 3398 SDOperand COps[] = { Chain, Op0, Op1 }; 3399 Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1); 3400 3401 switch (SetCCOpcode) { 3402 default: assert(false && "Illegal floating point SetCC!"); 3403 case ISD::SETOEQ: { // !PF & ZF 3404 SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond }; 3405 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 3406 SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8), 3407 Tmp1.getValue(1) }; 3408 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3409 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); 3410 } 3411 case ISD::SETUNE: { // PF | !ZF 3412 SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond }; 3413 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); 3414 SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8), 3415 Tmp1.getValue(1) }; 3416 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); 3417 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); 3418 } 3419 } 3420} 3421 3422SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { 3423 bool addTest = true; 3424 SDOperand Chain = DAG.getEntryNode(); 3425 SDOperand Cond = Op.getOperand(0); 3426 SDOperand CC; 3427 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3428 3429 if (Cond.getOpcode() == ISD::SETCC) 3430 Cond = LowerSETCC(Cond, DAG, Chain); 3431 3432 if (Cond.getOpcode() == X86ISD::SETCC) { 3433 CC = Cond.getOperand(0); 3434 3435 // If condition flag is set by a X86ISD::CMP, then make a copy of it 3436 // (since flag operand cannot be shared). Use it as the condition setting 3437 // operand in place of the X86ISD::SETCC. 3438 // If the X86ISD::SETCC has more than one use, then perhaps it's better 3439 // to use a test instead of duplicating the X86ISD::CMP (for register 3440 // pressure reason)? 3441 SDOperand Cmp = Cond.getOperand(1); 3442 unsigned Opc = Cmp.getOpcode(); 3443 bool IllegalFPCMov = !X86ScalarSSE && 3444 MVT::isFloatingPoint(Op.getValueType()) && 3445 !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended()); 3446 if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) && 3447 !IllegalFPCMov) { 3448 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 3449 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 3450 addTest = false; 3451 } 3452 } 3453 3454 if (addTest) { 3455 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3456 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 3457 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 3458 } 3459 3460 VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); 3461 SmallVector<SDOperand, 4> Ops; 3462 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 3463 // condition is true. 3464 Ops.push_back(Op.getOperand(2)); 3465 Ops.push_back(Op.getOperand(1)); 3466 Ops.push_back(CC); 3467 Ops.push_back(Cond.getValue(1)); 3468 return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); 3469} 3470 3471SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { 3472 bool addTest = true; 3473 SDOperand Chain = Op.getOperand(0); 3474 SDOperand Cond = Op.getOperand(1); 3475 SDOperand Dest = Op.getOperand(2); 3476 SDOperand CC; 3477 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 3478 3479 if (Cond.getOpcode() == ISD::SETCC) 3480 Cond = LowerSETCC(Cond, DAG, Chain); 3481 3482 if (Cond.getOpcode() == X86ISD::SETCC) { 3483 CC = Cond.getOperand(0); 3484 3485 // If condition flag is set by a X86ISD::CMP, then make a copy of it 3486 // (since flag operand cannot be shared). Use it as the condition setting 3487 // operand in place of the X86ISD::SETCC. 3488 // If the X86ISD::SETCC has more than one use, then perhaps it's better 3489 // to use a test instead of duplicating the X86ISD::CMP (for register 3490 // pressure reason)? 3491 SDOperand Cmp = Cond.getOperand(1); 3492 unsigned Opc = Cmp.getOpcode(); 3493 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) { 3494 SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; 3495 Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); 3496 addTest = false; 3497 } 3498 } 3499 3500 if (addTest) { 3501 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 3502 SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; 3503 Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); 3504 } 3505 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), 3506 Cond, Op.getOperand(2), CC, Cond.getValue(1)); 3507} 3508 3509SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 3510 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3511 3512 if (Subtarget->is64Bit()) 3513 return LowerX86_64CCCCallTo(Op, DAG, CallingConv); 3514 else 3515 switch (CallingConv) { 3516 default: 3517 assert(0 && "Unsupported calling convention"); 3518 case CallingConv::Fast: 3519 // TODO: Implement fastcc 3520 // Falls through 3521 case CallingConv::C: 3522 case CallingConv::X86_StdCall: 3523 return LowerCCCCallTo(Op, DAG, CallingConv); 3524 case CallingConv::X86_FastCall: 3525 return LowerFastCCCallTo(Op, DAG, CallingConv); 3526 } 3527} 3528 3529 3530// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 3531// Calls to _alloca is needed to probe the stack when allocating more than 4k 3532// bytes in one go. Touching the stack at 4K increments is necessary to ensure 3533// that the guard pages used by the OS virtual memory manager are allocated in 3534// correct sequence. 3535SDOperand X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 3536 SelectionDAG &DAG) { 3537 assert(Subtarget->isTargetCygMing() && 3538 "This should be used only on Cygwin/Mingw targets"); 3539 3540 // Get the inputs. 3541 SDOperand Chain = Op.getOperand(0); 3542 SDOperand Size = Op.getOperand(1); 3543 // FIXME: Ensure alignment here 3544 3545 TargetLowering::ArgListTy Args; 3546 TargetLowering::ArgListEntry Entry; 3547 MVT::ValueType IntPtr = getPointerTy(); 3548 MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 3549 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 3550 3551 Entry.Node = Size; 3552 Entry.Ty = IntPtrTy; 3553 Entry.isInReg = true; // Should pass in EAX 3554 Args.push_back(Entry); 3555 std::pair<SDOperand, SDOperand> CallResult = 3556 LowerCallTo(Chain, IntPtrTy, false, false, CallingConv::C, false, 3557 DAG.getExternalSymbol("_alloca", IntPtr), Args, DAG); 3558 3559 SDOperand SP = DAG.getCopyFromReg(CallResult.second, X86StackPtr, SPTy); 3560 3561 std::vector<MVT::ValueType> Tys; 3562 Tys.push_back(SPTy); 3563 Tys.push_back(MVT::Other); 3564 SDOperand Ops[2] = { SP, CallResult.second }; 3565 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2); 3566} 3567 3568SDOperand 3569X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 3570 MachineFunction &MF = DAG.getMachineFunction(); 3571 const Function* Fn = MF.getFunction(); 3572 if (Fn->hasExternalLinkage() && 3573 Subtarget->isTargetCygMing() && 3574 Fn->getName() == "main") 3575 MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true); 3576 3577 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 3578 if (Subtarget->is64Bit()) 3579 return LowerX86_64CCCArguments(Op, DAG); 3580 else 3581 switch(CC) { 3582 default: 3583 assert(0 && "Unsupported calling convention"); 3584 case CallingConv::Fast: 3585 // TODO: implement fastcc. 3586 3587 // Falls through 3588 case CallingConv::C: 3589 return LowerCCCArguments(Op, DAG); 3590 case CallingConv::X86_StdCall: 3591 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall); 3592 return LowerCCCArguments(Op, DAG, true); 3593 case CallingConv::X86_FastCall: 3594 MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall); 3595 return LowerFastCCArguments(Op, DAG); 3596 } 3597} 3598 3599SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) { 3600 SDOperand InFlag(0, 0); 3601 SDOperand Chain = Op.getOperand(0); 3602 unsigned Align = 3603 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 3604 if (Align == 0) Align = 1; 3605 3606 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3607 // If not DWORD aligned, call memset if size is less than the threshold. 3608 // It knows how to align to the right boundary first. 3609 if ((Align & 3) != 0 || 3610 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 3611 MVT::ValueType IntPtr = getPointerTy(); 3612 const Type *IntPtrTy = getTargetData()->getIntPtrType(); 3613 TargetLowering::ArgListTy Args; 3614 TargetLowering::ArgListEntry Entry; 3615 Entry.Node = Op.getOperand(1); 3616 Entry.Ty = IntPtrTy; 3617 Args.push_back(Entry); 3618 // Extend the unsigned i8 argument to be an int value for the call. 3619 Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2)); 3620 Entry.Ty = IntPtrTy; 3621 Args.push_back(Entry); 3622 Entry.Node = Op.getOperand(3); 3623 Args.push_back(Entry); 3624 std::pair<SDOperand,SDOperand> CallResult = 3625 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 3626 DAG.getExternalSymbol("memset", IntPtr), Args, DAG); 3627 return CallResult.second; 3628 } 3629 3630 MVT::ValueType AVT; 3631 SDOperand Count; 3632 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3633 unsigned BytesLeft = 0; 3634 bool TwoRepStos = false; 3635 if (ValC) { 3636 unsigned ValReg; 3637 uint64_t Val = ValC->getValue() & 255; 3638 3639 // If the value is a constant, then we can potentially use larger sets. 3640 switch (Align & 3) { 3641 case 2: // WORD aligned 3642 AVT = MVT::i16; 3643 ValReg = X86::AX; 3644 Val = (Val << 8) | Val; 3645 break; 3646 case 0: // DWORD aligned 3647 AVT = MVT::i32; 3648 ValReg = X86::EAX; 3649 Val = (Val << 8) | Val; 3650 Val = (Val << 16) | Val; 3651 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned 3652 AVT = MVT::i64; 3653 ValReg = X86::RAX; 3654 Val = (Val << 32) | Val; 3655 } 3656 break; 3657 default: // Byte aligned 3658 AVT = MVT::i8; 3659 ValReg = X86::AL; 3660 Count = Op.getOperand(3); 3661 break; 3662 } 3663 3664 if (AVT > MVT::i8) { 3665 if (I) { 3666 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 3667 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 3668 BytesLeft = I->getValue() % UBytes; 3669 } else { 3670 assert(AVT >= MVT::i32 && 3671 "Do not use rep;stos if not at least DWORD aligned"); 3672 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 3673 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 3674 TwoRepStos = true; 3675 } 3676 } 3677 3678 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT), 3679 InFlag); 3680 InFlag = Chain.getValue(1); 3681 } else { 3682 AVT = MVT::i8; 3683 Count = Op.getOperand(3); 3684 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag); 3685 InFlag = Chain.getValue(1); 3686 } 3687 3688 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 3689 Count, InFlag); 3690 InFlag = Chain.getValue(1); 3691 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 3692 Op.getOperand(1), InFlag); 3693 InFlag = Chain.getValue(1); 3694 3695 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 3696 SmallVector<SDOperand, 8> Ops; 3697 Ops.push_back(Chain); 3698 Ops.push_back(DAG.getValueType(AVT)); 3699 Ops.push_back(InFlag); 3700 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 3701 3702 if (TwoRepStos) { 3703 InFlag = Chain.getValue(1); 3704 Count = Op.getOperand(3); 3705 MVT::ValueType CVT = Count.getValueType(); 3706 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 3707 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 3708 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 3709 Left, InFlag); 3710 InFlag = Chain.getValue(1); 3711 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 3712 Ops.clear(); 3713 Ops.push_back(Chain); 3714 Ops.push_back(DAG.getValueType(MVT::i8)); 3715 Ops.push_back(InFlag); 3716 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size()); 3717 } else if (BytesLeft) { 3718 // Issue stores for the last 1 - 7 bytes. 3719 SDOperand Value; 3720 unsigned Val = ValC->getValue() & 255; 3721 unsigned Offset = I->getValue() - BytesLeft; 3722 SDOperand DstAddr = Op.getOperand(1); 3723 MVT::ValueType AddrVT = DstAddr.getValueType(); 3724 if (BytesLeft >= 4) { 3725 Val = (Val << 8) | Val; 3726 Val = (Val << 16) | Val; 3727 Value = DAG.getConstant(Val, MVT::i32); 3728 Chain = DAG.getStore(Chain, Value, 3729 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 3730 DAG.getConstant(Offset, AddrVT)), 3731 NULL, 0); 3732 BytesLeft -= 4; 3733 Offset += 4; 3734 } 3735 if (BytesLeft >= 2) { 3736 Value = DAG.getConstant((Val << 8) | Val, MVT::i16); 3737 Chain = DAG.getStore(Chain, Value, 3738 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 3739 DAG.getConstant(Offset, AddrVT)), 3740 NULL, 0); 3741 BytesLeft -= 2; 3742 Offset += 2; 3743 } 3744 if (BytesLeft == 1) { 3745 Value = DAG.getConstant(Val, MVT::i8); 3746 Chain = DAG.getStore(Chain, Value, 3747 DAG.getNode(ISD::ADD, AddrVT, DstAddr, 3748 DAG.getConstant(Offset, AddrVT)), 3749 NULL, 0); 3750 } 3751 } 3752 3753 return Chain; 3754} 3755 3756SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { 3757 SDOperand Chain = Op.getOperand(0); 3758 unsigned Align = 3759 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue(); 3760 if (Align == 0) Align = 1; 3761 3762 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 3763 // If not DWORD aligned, call memcpy if size is less than the threshold. 3764 // It knows how to align to the right boundary first. 3765 if ((Align & 3) != 0 || 3766 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) { 3767 MVT::ValueType IntPtr = getPointerTy(); 3768 TargetLowering::ArgListTy Args; 3769 TargetLowering::ArgListEntry Entry; 3770 Entry.Ty = getTargetData()->getIntPtrType(); 3771 Entry.Node = Op.getOperand(1); Args.push_back(Entry); 3772 Entry.Node = Op.getOperand(2); Args.push_back(Entry); 3773 Entry.Node = Op.getOperand(3); Args.push_back(Entry); 3774 std::pair<SDOperand,SDOperand> CallResult = 3775 LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, 3776 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); 3777 return CallResult.second; 3778 } 3779 3780 MVT::ValueType AVT; 3781 SDOperand Count; 3782 unsigned BytesLeft = 0; 3783 bool TwoRepMovs = false; 3784 switch (Align & 3) { 3785 case 2: // WORD aligned 3786 AVT = MVT::i16; 3787 break; 3788 case 0: // DWORD aligned 3789 AVT = MVT::i32; 3790 if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned 3791 AVT = MVT::i64; 3792 break; 3793 default: // Byte aligned 3794 AVT = MVT::i8; 3795 Count = Op.getOperand(3); 3796 break; 3797 } 3798 3799 if (AVT > MVT::i8) { 3800 if (I) { 3801 unsigned UBytes = MVT::getSizeInBits(AVT) / 8; 3802 Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy()); 3803 BytesLeft = I->getValue() % UBytes; 3804 } else { 3805 assert(AVT >= MVT::i32 && 3806 "Do not use rep;movs if not at least DWORD aligned"); 3807 Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(), 3808 Op.getOperand(3), DAG.getConstant(2, MVT::i8)); 3809 TwoRepMovs = true; 3810 } 3811 } 3812 3813 SDOperand InFlag(0, 0); 3814 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, 3815 Count, InFlag); 3816 InFlag = Chain.getValue(1); 3817 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI, 3818 Op.getOperand(1), InFlag); 3819 InFlag = Chain.getValue(1); 3820 Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI, 3821 Op.getOperand(2), InFlag); 3822 InFlag = Chain.getValue(1); 3823 3824 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 3825 SmallVector<SDOperand, 8> Ops; 3826 Ops.push_back(Chain); 3827 Ops.push_back(DAG.getValueType(AVT)); 3828 Ops.push_back(InFlag); 3829 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 3830 3831 if (TwoRepMovs) { 3832 InFlag = Chain.getValue(1); 3833 Count = Op.getOperand(3); 3834 MVT::ValueType CVT = Count.getValueType(); 3835 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, 3836 DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); 3837 Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, 3838 Left, InFlag); 3839 InFlag = Chain.getValue(1); 3840 Tys = DAG.getVTList(MVT::Other, MVT::Flag); 3841 Ops.clear(); 3842 Ops.push_back(Chain); 3843 Ops.push_back(DAG.getValueType(MVT::i8)); 3844 Ops.push_back(InFlag); 3845 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); 3846 } else if (BytesLeft) { 3847 // Issue loads and stores for the last 1 - 7 bytes. 3848 unsigned Offset = I->getValue() - BytesLeft; 3849 SDOperand DstAddr = Op.getOperand(1); 3850 MVT::ValueType DstVT = DstAddr.getValueType(); 3851 SDOperand SrcAddr = Op.getOperand(2); 3852 MVT::ValueType SrcVT = SrcAddr.getValueType(); 3853 SDOperand Value; 3854 if (BytesLeft >= 4) { 3855 Value = DAG.getLoad(MVT::i32, Chain, 3856 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 3857 DAG.getConstant(Offset, SrcVT)), 3858 NULL, 0); 3859 Chain = Value.getValue(1); 3860 Chain = DAG.getStore(Chain, Value, 3861 DAG.getNode(ISD::ADD, DstVT, DstAddr, 3862 DAG.getConstant(Offset, DstVT)), 3863 NULL, 0); 3864 BytesLeft -= 4; 3865 Offset += 4; 3866 } 3867 if (BytesLeft >= 2) { 3868 Value = DAG.getLoad(MVT::i16, Chain, 3869 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 3870 DAG.getConstant(Offset, SrcVT)), 3871 NULL, 0); 3872 Chain = Value.getValue(1); 3873 Chain = DAG.getStore(Chain, Value, 3874 DAG.getNode(ISD::ADD, DstVT, DstAddr, 3875 DAG.getConstant(Offset, DstVT)), 3876 NULL, 0); 3877 BytesLeft -= 2; 3878 Offset += 2; 3879 } 3880 3881 if (BytesLeft == 1) { 3882 Value = DAG.getLoad(MVT::i8, Chain, 3883 DAG.getNode(ISD::ADD, SrcVT, SrcAddr, 3884 DAG.getConstant(Offset, SrcVT)), 3885 NULL, 0); 3886 Chain = Value.getValue(1); 3887 Chain = DAG.getStore(Chain, Value, 3888 DAG.getNode(ISD::ADD, DstVT, DstAddr, 3889 DAG.getConstant(Offset, DstVT)), 3890 NULL, 0); 3891 } 3892 } 3893 3894 return Chain; 3895} 3896 3897SDOperand 3898X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) { 3899 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); 3900 SDOperand TheOp = Op.getOperand(0); 3901 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheOp, 1); 3902 if (Subtarget->is64Bit()) { 3903 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); 3904 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX, 3905 MVT::i64, Copy1.getValue(2)); 3906 SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2, 3907 DAG.getConstant(32, MVT::i8)); 3908 SDOperand Ops[] = { 3909 DAG.getNode(ISD::OR, MVT::i64, Copy1, Tmp), Copy2.getValue(1) 3910 }; 3911 3912 Tys = DAG.getVTList(MVT::i64, MVT::Other); 3913 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2); 3914 } 3915 3916 SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); 3917 SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::EDX, 3918 MVT::i32, Copy1.getValue(2)); 3919 SDOperand Ops[] = { Copy1, Copy2, Copy2.getValue(1) }; 3920 Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 3921 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 3); 3922} 3923 3924SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { 3925 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 3926 3927 if (!Subtarget->is64Bit()) { 3928 // vastart just stores the address of the VarArgsFrameIndex slot into the 3929 // memory location argument. 3930 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 3931 return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(), 3932 SV->getOffset()); 3933 } 3934 3935 // __va_list_tag: 3936 // gp_offset (0 - 6 * 8) 3937 // fp_offset (48 - 48 + 8 * 16) 3938 // overflow_arg_area (point to parameters coming in memory). 3939 // reg_save_area 3940 SmallVector<SDOperand, 8> MemOps; 3941 SDOperand FIN = Op.getOperand(1); 3942 // Store gp_offset 3943 SDOperand Store = DAG.getStore(Op.getOperand(0), 3944 DAG.getConstant(VarArgsGPOffset, MVT::i32), 3945 FIN, SV->getValue(), SV->getOffset()); 3946 MemOps.push_back(Store); 3947 3948 // Store fp_offset 3949 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 3950 DAG.getConstant(4, getPointerTy())); 3951 Store = DAG.getStore(Op.getOperand(0), 3952 DAG.getConstant(VarArgsFPOffset, MVT::i32), 3953 FIN, SV->getValue(), SV->getOffset()); 3954 MemOps.push_back(Store); 3955 3956 // Store ptr to overflow_arg_area 3957 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 3958 DAG.getConstant(4, getPointerTy())); 3959 SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 3960 Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(), 3961 SV->getOffset()); 3962 MemOps.push_back(Store); 3963 3964 // Store ptr to reg_save_area. 3965 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 3966 DAG.getConstant(8, getPointerTy())); 3967 SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); 3968 Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(), 3969 SV->getOffset()); 3970 MemOps.push_back(Store); 3971 return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); 3972} 3973 3974SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { 3975 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 3976 SDOperand Chain = Op.getOperand(0); 3977 SDOperand DstPtr = Op.getOperand(1); 3978 SDOperand SrcPtr = Op.getOperand(2); 3979 SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3)); 3980 SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4)); 3981 3982 SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr, 3983 SrcSV->getValue(), SrcSV->getOffset()); 3984 Chain = SrcPtr.getValue(1); 3985 for (unsigned i = 0; i < 3; ++i) { 3986 SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr, 3987 SrcSV->getValue(), SrcSV->getOffset()); 3988 Chain = Val.getValue(1); 3989 Chain = DAG.getStore(Chain, Val, DstPtr, 3990 DstSV->getValue(), DstSV->getOffset()); 3991 if (i == 2) 3992 break; 3993 SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr, 3994 DAG.getConstant(8, getPointerTy())); 3995 DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr, 3996 DAG.getConstant(8, getPointerTy())); 3997 } 3998 return Chain; 3999} 4000 4001SDOperand 4002X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 4003 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 4004 switch (IntNo) { 4005 default: return SDOperand(); // Don't custom lower most intrinsics. 4006 // Comparison intrinsics. 4007 case Intrinsic::x86_sse_comieq_ss: 4008 case Intrinsic::x86_sse_comilt_ss: 4009 case Intrinsic::x86_sse_comile_ss: 4010 case Intrinsic::x86_sse_comigt_ss: 4011 case Intrinsic::x86_sse_comige_ss: 4012 case Intrinsic::x86_sse_comineq_ss: 4013 case Intrinsic::x86_sse_ucomieq_ss: 4014 case Intrinsic::x86_sse_ucomilt_ss: 4015 case Intrinsic::x86_sse_ucomile_ss: 4016 case Intrinsic::x86_sse_ucomigt_ss: 4017 case Intrinsic::x86_sse_ucomige_ss: 4018 case Intrinsic::x86_sse_ucomineq_ss: 4019 case Intrinsic::x86_sse2_comieq_sd: 4020 case Intrinsic::x86_sse2_comilt_sd: 4021 case Intrinsic::x86_sse2_comile_sd: 4022 case Intrinsic::x86_sse2_comigt_sd: 4023 case Intrinsic::x86_sse2_comige_sd: 4024 case Intrinsic::x86_sse2_comineq_sd: 4025 case Intrinsic::x86_sse2_ucomieq_sd: 4026 case Intrinsic::x86_sse2_ucomilt_sd: 4027 case Intrinsic::x86_sse2_ucomile_sd: 4028 case Intrinsic::x86_sse2_ucomigt_sd: 4029 case Intrinsic::x86_sse2_ucomige_sd: 4030 case Intrinsic::x86_sse2_ucomineq_sd: { 4031 unsigned Opc = 0; 4032 ISD::CondCode CC = ISD::SETCC_INVALID; 4033 switch (IntNo) { 4034 default: break; 4035 case Intrinsic::x86_sse_comieq_ss: 4036 case Intrinsic::x86_sse2_comieq_sd: 4037 Opc = X86ISD::COMI; 4038 CC = ISD::SETEQ; 4039 break; 4040 case Intrinsic::x86_sse_comilt_ss: 4041 case Intrinsic::x86_sse2_comilt_sd: 4042 Opc = X86ISD::COMI; 4043 CC = ISD::SETLT; 4044 break; 4045 case Intrinsic::x86_sse_comile_ss: 4046 case Intrinsic::x86_sse2_comile_sd: 4047 Opc = X86ISD::COMI; 4048 CC = ISD::SETLE; 4049 break; 4050 case Intrinsic::x86_sse_comigt_ss: 4051 case Intrinsic::x86_sse2_comigt_sd: 4052 Opc = X86ISD::COMI; 4053 CC = ISD::SETGT; 4054 break; 4055 case Intrinsic::x86_sse_comige_ss: 4056 case Intrinsic::x86_sse2_comige_sd: 4057 Opc = X86ISD::COMI; 4058 CC = ISD::SETGE; 4059 break; 4060 case Intrinsic::x86_sse_comineq_ss: 4061 case Intrinsic::x86_sse2_comineq_sd: 4062 Opc = X86ISD::COMI; 4063 CC = ISD::SETNE; 4064 break; 4065 case Intrinsic::x86_sse_ucomieq_ss: 4066 case Intrinsic::x86_sse2_ucomieq_sd: 4067 Opc = X86ISD::UCOMI; 4068 CC = ISD::SETEQ; 4069 break; 4070 case Intrinsic::x86_sse_ucomilt_ss: 4071 case Intrinsic::x86_sse2_ucomilt_sd: 4072 Opc = X86ISD::UCOMI; 4073 CC = ISD::SETLT; 4074 break; 4075 case Intrinsic::x86_sse_ucomile_ss: 4076 case Intrinsic::x86_sse2_ucomile_sd: 4077 Opc = X86ISD::UCOMI; 4078 CC = ISD::SETLE; 4079 break; 4080 case Intrinsic::x86_sse_ucomigt_ss: 4081 case Intrinsic::x86_sse2_ucomigt_sd: 4082 Opc = X86ISD::UCOMI; 4083 CC = ISD::SETGT; 4084 break; 4085 case Intrinsic::x86_sse_ucomige_ss: 4086 case Intrinsic::x86_sse2_ucomige_sd: 4087 Opc = X86ISD::UCOMI; 4088 CC = ISD::SETGE; 4089 break; 4090 case Intrinsic::x86_sse_ucomineq_ss: 4091 case Intrinsic::x86_sse2_ucomineq_sd: 4092 Opc = X86ISD::UCOMI; 4093 CC = ISD::SETNE; 4094 break; 4095 } 4096 4097 unsigned X86CC; 4098 SDOperand LHS = Op.getOperand(1); 4099 SDOperand RHS = Op.getOperand(2); 4100 translateX86CC(CC, true, X86CC, LHS, RHS, DAG); 4101 4102 const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); 4103 SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS }; 4104 SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3); 4105 VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); 4106 SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; 4107 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2); 4108 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); 4109 } 4110 } 4111} 4112 4113SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4114 // Depths > 0 not supported yet! 4115 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4116 return SDOperand(); 4117 4118 // Just load the return address 4119 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4120 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4121} 4122 4123SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4124 // Depths > 0 not supported yet! 4125 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4126 return SDOperand(); 4127 4128 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); 4129 return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, 4130 DAG.getConstant(4, getPointerTy())); 4131} 4132 4133/// LowerOperation - Provide custom lowering hooks for some operations. 4134/// 4135SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 4136 switch (Op.getOpcode()) { 4137 default: assert(0 && "Should not custom lower this!"); 4138 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4139 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4140 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4141 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 4142 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4143 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4144 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4145 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4146 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 4147 case ISD::SHL_PARTS: 4148 case ISD::SRA_PARTS: 4149 case ISD::SRL_PARTS: return LowerShift(Op, DAG); 4150 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4151 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 4152 case ISD::FABS: return LowerFABS(Op, DAG); 4153 case ISD::FNEG: return LowerFNEG(Op, DAG); 4154 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4155 case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode()); 4156 case ISD::SELECT: return LowerSELECT(Op, DAG); 4157 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 4158 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4159 case ISD::CALL: return LowerCALL(Op, DAG); 4160 case ISD::RET: return LowerRET(Op, DAG); 4161 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 4162 case ISD::MEMSET: return LowerMEMSET(Op, DAG); 4163 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG); 4164 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG); 4165 case ISD::VASTART: return LowerVASTART(Op, DAG); 4166 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 4167 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4168 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4169 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4170 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 4171 } 4172 return SDOperand(); 4173} 4174 4175const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 4176 switch (Opcode) { 4177 default: return NULL; 4178 case X86ISD::SHLD: return "X86ISD::SHLD"; 4179 case X86ISD::SHRD: return "X86ISD::SHRD"; 4180 case X86ISD::FAND: return "X86ISD::FAND"; 4181 case X86ISD::FOR: return "X86ISD::FOR"; 4182 case X86ISD::FXOR: return "X86ISD::FXOR"; 4183 case X86ISD::FSRL: return "X86ISD::FSRL"; 4184 case X86ISD::FILD: return "X86ISD::FILD"; 4185 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 4186 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 4187 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 4188 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 4189 case X86ISD::FLD: return "X86ISD::FLD"; 4190 case X86ISD::FST: return "X86ISD::FST"; 4191 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT"; 4192 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT"; 4193 case X86ISD::CALL: return "X86ISD::CALL"; 4194 case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; 4195 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 4196 case X86ISD::CMP: return "X86ISD::CMP"; 4197 case X86ISD::COMI: return "X86ISD::COMI"; 4198 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 4199 case X86ISD::SETCC: return "X86ISD::SETCC"; 4200 case X86ISD::CMOV: return "X86ISD::CMOV"; 4201 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 4202 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 4203 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 4204 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 4205 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK"; 4206 case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA"; 4207 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 4208 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 4209 case X86ISD::S2VEC: return "X86ISD::S2VEC"; 4210 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 4211 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 4212 case X86ISD::FMAX: return "X86ISD::FMAX"; 4213 case X86ISD::FMIN: return "X86ISD::FMIN"; 4214 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 4215 case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER"; 4216 } 4217} 4218 4219// isLegalAddressingMode - Return true if the addressing mode represented 4220// by AM is legal for this target, for a load/store of the specified type. 4221bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 4222 const Type *Ty) const { 4223 // X86 supports extremely general addressing modes. 4224 4225 // X86 allows a sign-extended 32-bit immediate field as a displacement. 4226 if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1) 4227 return false; 4228 4229 if (AM.BaseGV) { 4230 // X86-64 only supports addr of globals in small code model. 4231 if (Subtarget->is64Bit() && 4232 getTargetMachine().getCodeModel() != CodeModel::Small) 4233 return false; 4234 4235 // We can only fold this if we don't need a load either. 4236 if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false)) 4237 return false; 4238 } 4239 4240 switch (AM.Scale) { 4241 case 0: 4242 case 1: 4243 case 2: 4244 case 4: 4245 case 8: 4246 // These scales always work. 4247 break; 4248 case 3: 4249 case 5: 4250 case 9: 4251 // These scales are formed with basereg+scalereg. Only accept if there is 4252 // no basereg yet. 4253 if (AM.HasBaseReg) 4254 return false; 4255 break; 4256 default: // Other stuff never works. 4257 return false; 4258 } 4259 4260 return true; 4261} 4262 4263 4264/// isShuffleMaskLegal - Targets can use this to indicate that they only 4265/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4266/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4267/// are assumed to be legal. 4268bool 4269X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const { 4270 // Only do shuffles on 128-bit vector types for now. 4271 if (MVT::getSizeInBits(VT) == 64) return false; 4272 return (Mask.Val->getNumOperands() <= 4 || 4273 isSplatMask(Mask.Val) || 4274 isPSHUFHW_PSHUFLWMask(Mask.Val) || 4275 X86::isUNPCKLMask(Mask.Val) || 4276 X86::isUNPCKL_v_undef_Mask(Mask.Val) || 4277 X86::isUNPCKH_v_undef_Mask(Mask.Val) || 4278 X86::isUNPCKHMask(Mask.Val)); 4279} 4280 4281bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps, 4282 MVT::ValueType EVT, 4283 SelectionDAG &DAG) const { 4284 unsigned NumElts = BVOps.size(); 4285 // Only do shuffles on 128-bit vector types for now. 4286 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false; 4287 if (NumElts == 2) return true; 4288 if (NumElts == 4) { 4289 return (isMOVLMask(&BVOps[0], 4) || 4290 isCommutedMOVL(&BVOps[0], 4, true) || 4291 isSHUFPMask(&BVOps[0], 4) || 4292 isCommutedSHUFP(&BVOps[0], 4)); 4293 } 4294 return false; 4295} 4296 4297//===----------------------------------------------------------------------===// 4298// X86 Scheduler Hooks 4299//===----------------------------------------------------------------------===// 4300 4301MachineBasicBlock * 4302X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 4303 MachineBasicBlock *BB) { 4304 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4305 switch (MI->getOpcode()) { 4306 default: assert(false && "Unexpected instr type to insert"); 4307 case X86::CMOV_FR32: 4308 case X86::CMOV_FR64: 4309 case X86::CMOV_V4F32: 4310 case X86::CMOV_V2F64: 4311 case X86::CMOV_V2I64: { 4312 // To "insert" a SELECT_CC instruction, we actually have to insert the 4313 // diamond control-flow pattern. The incoming instruction knows the 4314 // destination vreg to set, the condition code register to branch on, the 4315 // true/false values to select between, and a branch opcode to use. 4316 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4317 ilist<MachineBasicBlock>::iterator It = BB; 4318 ++It; 4319 4320 // thisMBB: 4321 // ... 4322 // TrueVal = ... 4323 // cmpTY ccX, r1, r2 4324 // bCC copy1MBB 4325 // fallthrough --> copy0MBB 4326 MachineBasicBlock *thisMBB = BB; 4327 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 4328 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 4329 unsigned Opc = 4330 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 4331 BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB); 4332 MachineFunction *F = BB->getParent(); 4333 F->getBasicBlockList().insert(It, copy0MBB); 4334 F->getBasicBlockList().insert(It, sinkMBB); 4335 // Update machine-CFG edges by first adding all successors of the current 4336 // block to the new block which will contain the Phi node for the select. 4337 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 4338 e = BB->succ_end(); i != e; ++i) 4339 sinkMBB->addSuccessor(*i); 4340 // Next, remove all successors of the current block, and add the true 4341 // and fallthrough blocks as its successors. 4342 while(!BB->succ_empty()) 4343 BB->removeSuccessor(BB->succ_begin()); 4344 BB->addSuccessor(copy0MBB); 4345 BB->addSuccessor(sinkMBB); 4346 4347 // copy0MBB: 4348 // %FalseValue = ... 4349 // # fallthrough to sinkMBB 4350 BB = copy0MBB; 4351 4352 // Update machine-CFG edges 4353 BB->addSuccessor(sinkMBB); 4354 4355 // sinkMBB: 4356 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4357 // ... 4358 BB = sinkMBB; 4359 BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg()) 4360 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4361 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4362 4363 delete MI; // The pseudo instruction is gone now. 4364 return BB; 4365 } 4366 4367 case X86::FP_TO_INT16_IN_MEM: 4368 case X86::FP_TO_INT32_IN_MEM: 4369 case X86::FP_TO_INT64_IN_MEM: { 4370 // Change the floating point control register to use "round towards zero" 4371 // mode when truncating to an integer value. 4372 MachineFunction *F = BB->getParent(); 4373 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2); 4374 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx); 4375 4376 // Load the old value of the high byte of the control word... 4377 unsigned OldCW = 4378 F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass); 4379 addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx); 4380 4381 // Set the high part to be round to zero... 4382 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx) 4383 .addImm(0xC7F); 4384 4385 // Reload the modified control word now... 4386 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 4387 4388 // Restore the memory image of control word to original value 4389 addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx) 4390 .addReg(OldCW); 4391 4392 // Get the X86 opcode to use. 4393 unsigned Opc; 4394 switch (MI->getOpcode()) { 4395 default: assert(0 && "illegal opcode!"); 4396 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break; 4397 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break; 4398 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break; 4399 } 4400 4401 X86AddressMode AM; 4402 MachineOperand &Op = MI->getOperand(0); 4403 if (Op.isRegister()) { 4404 AM.BaseType = X86AddressMode::RegBase; 4405 AM.Base.Reg = Op.getReg(); 4406 } else { 4407 AM.BaseType = X86AddressMode::FrameIndexBase; 4408 AM.Base.FrameIndex = Op.getFrameIndex(); 4409 } 4410 Op = MI->getOperand(1); 4411 if (Op.isImmediate()) 4412 AM.Scale = Op.getImm(); 4413 Op = MI->getOperand(2); 4414 if (Op.isImmediate()) 4415 AM.IndexReg = Op.getImm(); 4416 Op = MI->getOperand(3); 4417 if (Op.isGlobalAddress()) { 4418 AM.GV = Op.getGlobal(); 4419 } else { 4420 AM.Disp = Op.getImm(); 4421 } 4422 addFullAddress(BuildMI(BB, TII->get(Opc)), AM) 4423 .addReg(MI->getOperand(4).getReg()); 4424 4425 // Reload the original control word now. 4426 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx); 4427 4428 delete MI; // The pseudo instruction is gone now. 4429 return BB; 4430 } 4431 } 4432} 4433 4434//===----------------------------------------------------------------------===// 4435// X86 Optimization Hooks 4436//===----------------------------------------------------------------------===// 4437 4438void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 4439 uint64_t Mask, 4440 uint64_t &KnownZero, 4441 uint64_t &KnownOne, 4442 unsigned Depth) const { 4443 unsigned Opc = Op.getOpcode(); 4444 assert((Opc >= ISD::BUILTIN_OP_END || 4445 Opc == ISD::INTRINSIC_WO_CHAIN || 4446 Opc == ISD::INTRINSIC_W_CHAIN || 4447 Opc == ISD::INTRINSIC_VOID) && 4448 "Should use MaskedValueIsZero if you don't know whether Op" 4449 " is a target node!"); 4450 4451 KnownZero = KnownOne = 0; // Don't know anything. 4452 switch (Opc) { 4453 default: break; 4454 case X86ISD::SETCC: 4455 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 4456 break; 4457 } 4458} 4459 4460/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4461/// element of the result of the vector shuffle. 4462static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) { 4463 MVT::ValueType VT = N->getValueType(0); 4464 SDOperand PermMask = N->getOperand(2); 4465 unsigned NumElems = PermMask.getNumOperands(); 4466 SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1); 4467 i %= NumElems; 4468 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4469 return (i == 0) 4470 ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 4471 } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) { 4472 SDOperand Idx = PermMask.getOperand(i); 4473 if (Idx.getOpcode() == ISD::UNDEF) 4474 return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT)); 4475 return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG); 4476 } 4477 return SDOperand(); 4478} 4479 4480/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 4481/// node is a GlobalAddress + an offset. 4482static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) { 4483 unsigned Opc = N->getOpcode(); 4484 if (Opc == X86ISD::Wrapper) { 4485 if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) { 4486 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 4487 return true; 4488 } 4489 } else if (Opc == ISD::ADD) { 4490 SDOperand N1 = N->getOperand(0); 4491 SDOperand N2 = N->getOperand(1); 4492 if (isGAPlusOffset(N1.Val, GA, Offset)) { 4493 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 4494 if (V) { 4495 Offset += V->getSignExtended(); 4496 return true; 4497 } 4498 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 4499 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 4500 if (V) { 4501 Offset += V->getSignExtended(); 4502 return true; 4503 } 4504 } 4505 } 4506 return false; 4507} 4508 4509/// isConsecutiveLoad - Returns true if N is loading from an address of Base 4510/// + Dist * Size. 4511static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size, 4512 MachineFrameInfo *MFI) { 4513 if (N->getOperand(0).Val != Base->getOperand(0).Val) 4514 return false; 4515 4516 SDOperand Loc = N->getOperand(1); 4517 SDOperand BaseLoc = Base->getOperand(1); 4518 if (Loc.getOpcode() == ISD::FrameIndex) { 4519 if (BaseLoc.getOpcode() != ISD::FrameIndex) 4520 return false; 4521 int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex(); 4522 int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 4523 int FS = MFI->getObjectSize(FI); 4524 int BFS = MFI->getObjectSize(BFI); 4525 if (FS != BFS || FS != Size) return false; 4526 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size); 4527 } else { 4528 GlobalValue *GV1 = NULL; 4529 GlobalValue *GV2 = NULL; 4530 int64_t Offset1 = 0; 4531 int64_t Offset2 = 0; 4532 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 4533 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 4534 if (isGA1 && isGA2 && GV1 == GV2) 4535 return Offset1 == (Offset2 + Dist*Size); 4536 } 4537 4538 return false; 4539} 4540 4541static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI, 4542 const X86Subtarget *Subtarget) { 4543 GlobalValue *GV; 4544 int64_t Offset; 4545 if (isGAPlusOffset(Base, GV, Offset)) 4546 return (GV->getAlignment() >= 16 && (Offset % 16) == 0); 4547 else { 4548 assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!"); 4549 int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex(); 4550 if (BFI < 0) 4551 // Fixed objects do not specify alignment, however the offsets are known. 4552 return ((Subtarget->getStackAlignment() % 16) == 0 && 4553 (MFI->getObjectOffset(BFI) % 16) == 0); 4554 else 4555 return MFI->getObjectAlignment(BFI) >= 16; 4556 } 4557 return false; 4558} 4559 4560 4561/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 4562/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 4563/// if the load addresses are consecutive, non-overlapping, and in the right 4564/// order. 4565static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 4566 const X86Subtarget *Subtarget) { 4567 MachineFunction &MF = DAG.getMachineFunction(); 4568 MachineFrameInfo *MFI = MF.getFrameInfo(); 4569 MVT::ValueType VT = N->getValueType(0); 4570 MVT::ValueType EVT = MVT::getVectorBaseType(VT); 4571 SDOperand PermMask = N->getOperand(2); 4572 int NumElems = (int)PermMask.getNumOperands(); 4573 SDNode *Base = NULL; 4574 for (int i = 0; i < NumElems; ++i) { 4575 SDOperand Idx = PermMask.getOperand(i); 4576 if (Idx.getOpcode() == ISD::UNDEF) { 4577 if (!Base) return SDOperand(); 4578 } else { 4579 SDOperand Arg = 4580 getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG); 4581 if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val)) 4582 return SDOperand(); 4583 if (!Base) 4584 Base = Arg.Val; 4585 else if (!isConsecutiveLoad(Arg.Val, Base, 4586 i, MVT::getSizeInBits(EVT)/8,MFI)) 4587 return SDOperand(); 4588 } 4589 } 4590 4591 bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget); 4592 if (isAlign16) { 4593 LoadSDNode *LD = cast<LoadSDNode>(Base); 4594 return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(), 4595 LD->getSrcValueOffset()); 4596 } else { 4597 // Just use movups, it's shorter. 4598 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other); 4599 SmallVector<SDOperand, 3> Ops; 4600 Ops.push_back(Base->getOperand(0)); 4601 Ops.push_back(Base->getOperand(1)); 4602 Ops.push_back(Base->getOperand(2)); 4603 return DAG.getNode(ISD::BIT_CONVERT, VT, 4604 DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size())); 4605 } 4606} 4607 4608/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 4609static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 4610 const X86Subtarget *Subtarget) { 4611 SDOperand Cond = N->getOperand(0); 4612 4613 // If we have SSE[12] support, try to form min/max nodes. 4614 if (Subtarget->hasSSE2() && 4615 (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { 4616 if (Cond.getOpcode() == ISD::SETCC) { 4617 // Get the LHS/RHS of the select. 4618 SDOperand LHS = N->getOperand(1); 4619 SDOperand RHS = N->getOperand(2); 4620 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 4621 4622 unsigned Opcode = 0; 4623 if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) { 4624 switch (CC) { 4625 default: break; 4626 case ISD::SETOLE: // (X <= Y) ? X : Y -> min 4627 case ISD::SETULE: 4628 case ISD::SETLE: 4629 if (!UnsafeFPMath) break; 4630 // FALL THROUGH. 4631 case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min 4632 case ISD::SETLT: 4633 Opcode = X86ISD::FMIN; 4634 break; 4635 4636 case ISD::SETOGT: // (X > Y) ? X : Y -> max 4637 case ISD::SETUGT: 4638 case ISD::SETGT: 4639 if (!UnsafeFPMath) break; 4640 // FALL THROUGH. 4641 case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max 4642 case ISD::SETGE: 4643 Opcode = X86ISD::FMAX; 4644 break; 4645 } 4646 } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) { 4647 switch (CC) { 4648 default: break; 4649 case ISD::SETOGT: // (X > Y) ? Y : X -> min 4650 case ISD::SETUGT: 4651 case ISD::SETGT: 4652 if (!UnsafeFPMath) break; 4653 // FALL THROUGH. 4654 case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min 4655 case ISD::SETGE: 4656 Opcode = X86ISD::FMIN; 4657 break; 4658 4659 case ISD::SETOLE: // (X <= Y) ? Y : X -> max 4660 case ISD::SETULE: 4661 case ISD::SETLE: 4662 if (!UnsafeFPMath) break; 4663 // FALL THROUGH. 4664 case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max 4665 case ISD::SETLT: 4666 Opcode = X86ISD::FMAX; 4667 break; 4668 } 4669 } 4670 4671 if (Opcode) 4672 return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS); 4673 } 4674 4675 } 4676 4677 return SDOperand(); 4678} 4679 4680 4681SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, 4682 DAGCombinerInfo &DCI) const { 4683 SelectionDAG &DAG = DCI.DAG; 4684 switch (N->getOpcode()) { 4685 default: break; 4686 case ISD::VECTOR_SHUFFLE: 4687 return PerformShuffleCombine(N, DAG, Subtarget); 4688 case ISD::SELECT: 4689 return PerformSELECTCombine(N, DAG, Subtarget); 4690 } 4691 4692 return SDOperand(); 4693} 4694 4695//===----------------------------------------------------------------------===// 4696// X86 Inline Assembly Support 4697//===----------------------------------------------------------------------===// 4698 4699/// getConstraintType - Given a constraint letter, return the type of 4700/// constraint it is for this target. 4701X86TargetLowering::ConstraintType 4702X86TargetLowering::getConstraintType(const std::string &Constraint) const { 4703 if (Constraint.size() == 1) { 4704 switch (Constraint[0]) { 4705 case 'A': 4706 case 'r': 4707 case 'R': 4708 case 'l': 4709 case 'q': 4710 case 'Q': 4711 case 'x': 4712 case 'Y': 4713 return C_RegisterClass; 4714 default: 4715 break; 4716 } 4717 } 4718 return TargetLowering::getConstraintType(Constraint); 4719} 4720 4721/// isOperandValidForConstraint - Return the specified operand (possibly 4722/// modified) if the specified SDOperand is valid for the specified target 4723/// constraint letter, otherwise return null. 4724SDOperand X86TargetLowering:: 4725isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) { 4726 switch (Constraint) { 4727 default: break; 4728 case 'I': 4729 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 4730 if (C->getValue() <= 31) 4731 return DAG.getTargetConstant(C->getValue(), Op.getValueType()); 4732 } 4733 return SDOperand(0,0); 4734 case 'N': 4735 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 4736 if (C->getValue() <= 255) 4737 return DAG.getTargetConstant(C->getValue(), Op.getValueType()); 4738 } 4739 return SDOperand(0,0); 4740 case 'i': { 4741 // Literal immediates are always ok. 4742 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) 4743 return DAG.getTargetConstant(CST->getValue(), Op.getValueType()); 4744 4745 // If we are in non-pic codegen mode, we allow the address of a global (with 4746 // an optional displacement) to be used with 'i'. 4747 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 4748 int64_t Offset = 0; 4749 4750 // Match either (GA) or (GA+C) 4751 if (GA) { 4752 Offset = GA->getOffset(); 4753 } else if (Op.getOpcode() == ISD::ADD) { 4754 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 4755 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 4756 if (C && GA) { 4757 Offset = GA->getOffset()+C->getValue(); 4758 } else { 4759 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 4760 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 4761 if (C && GA) 4762 Offset = GA->getOffset()+C->getValue(); 4763 else 4764 C = 0, GA = 0; 4765 } 4766 } 4767 4768 if (GA) { 4769 // If addressing this global requires a load (e.g. in PIC mode), we can't 4770 // match. 4771 if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(), 4772 false)) 4773 return SDOperand(0, 0); 4774 4775 Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), 4776 Offset); 4777 return Op; 4778 } 4779 4780 // Otherwise, not valid for this mode. 4781 return SDOperand(0, 0); 4782 } 4783 } 4784 return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG); 4785} 4786 4787std::vector<unsigned> X86TargetLowering:: 4788getRegClassForInlineAsmConstraint(const std::string &Constraint, 4789 MVT::ValueType VT) const { 4790 if (Constraint.size() == 1) { 4791 // FIXME: not handling fp-stack yet! 4792 switch (Constraint[0]) { // GCC X86 Constraint Letters 4793 default: break; // Unknown constraint letter 4794 case 'A': // EAX/EDX 4795 if (VT == MVT::i32 || VT == MVT::i64) 4796 return make_vector<unsigned>(X86::EAX, X86::EDX, 0); 4797 break; 4798 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode) 4799 case 'Q': // Q_REGS 4800 if (VT == MVT::i32) 4801 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0); 4802 else if (VT == MVT::i16) 4803 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0); 4804 else if (VT == MVT::i8) 4805 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0); 4806 break; 4807 } 4808 } 4809 4810 return std::vector<unsigned>(); 4811} 4812 4813std::pair<unsigned, const TargetRegisterClass*> 4814X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 4815 MVT::ValueType VT) const { 4816 // First, see if this is a constraint that directly corresponds to an LLVM 4817 // register class. 4818 if (Constraint.size() == 1) { 4819 // GCC Constraint Letters 4820 switch (Constraint[0]) { 4821 default: break; 4822 case 'r': // GENERAL_REGS 4823 case 'R': // LEGACY_REGS 4824 case 'l': // INDEX_REGS 4825 if (VT == MVT::i64 && Subtarget->is64Bit()) 4826 return std::make_pair(0U, X86::GR64RegisterClass); 4827 if (VT == MVT::i32) 4828 return std::make_pair(0U, X86::GR32RegisterClass); 4829 else if (VT == MVT::i16) 4830 return std::make_pair(0U, X86::GR16RegisterClass); 4831 else if (VT == MVT::i8) 4832 return std::make_pair(0U, X86::GR8RegisterClass); 4833 break; 4834 case 'y': // MMX_REGS if MMX allowed. 4835 if (!Subtarget->hasMMX()) break; 4836 return std::make_pair(0U, X86::VR64RegisterClass); 4837 break; 4838 case 'Y': // SSE_REGS if SSE2 allowed 4839 if (!Subtarget->hasSSE2()) break; 4840 // FALL THROUGH. 4841 case 'x': // SSE_REGS if SSE1 allowed 4842 if (!Subtarget->hasSSE1()) break; 4843 4844 switch (VT) { 4845 default: break; 4846 // Scalar SSE types. 4847 case MVT::f32: 4848 case MVT::i32: 4849 return std::make_pair(0U, X86::FR32RegisterClass); 4850 case MVT::f64: 4851 case MVT::i64: 4852 return std::make_pair(0U, X86::FR64RegisterClass); 4853 // Vector types. 4854 case MVT::Vector: 4855 case MVT::v16i8: 4856 case MVT::v8i16: 4857 case MVT::v4i32: 4858 case MVT::v2i64: 4859 case MVT::v4f32: 4860 case MVT::v2f64: 4861 return std::make_pair(0U, X86::VR128RegisterClass); 4862 } 4863 break; 4864 } 4865 } 4866 4867 // Use the default implementation in TargetLowering to convert the register 4868 // constraint into a member of a register class. 4869 std::pair<unsigned, const TargetRegisterClass*> Res; 4870 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 4871 4872 // Not found as a standard register? 4873 if (Res.second == 0) { 4874 // GCC calls "st(0)" just plain "st". 4875 if (StringsEqualNoCase("{st}", Constraint)) { 4876 Res.first = X86::ST0; 4877 Res.second = X86::RSTRegisterClass; 4878 } 4879 4880 return Res; 4881 } 4882 4883 // Otherwise, check to see if this is a register class of the wrong value 4884 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 4885 // turn into {ax},{dx}. 4886 if (Res.second->hasType(VT)) 4887 return Res; // Correct type already, nothing to do. 4888 4889 // All of the single-register GCC register classes map their values onto 4890 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 4891 // really want an 8-bit or 32-bit register, map to the appropriate register 4892 // class and return the appropriate register. 4893 if (Res.second != X86::GR16RegisterClass) 4894 return Res; 4895 4896 if (VT == MVT::i8) { 4897 unsigned DestReg = 0; 4898 switch (Res.first) { 4899 default: break; 4900 case X86::AX: DestReg = X86::AL; break; 4901 case X86::DX: DestReg = X86::DL; break; 4902 case X86::CX: DestReg = X86::CL; break; 4903 case X86::BX: DestReg = X86::BL; break; 4904 } 4905 if (DestReg) { 4906 Res.first = DestReg; 4907 Res.second = Res.second = X86::GR8RegisterClass; 4908 } 4909 } else if (VT == MVT::i32) { 4910 unsigned DestReg = 0; 4911 switch (Res.first) { 4912 default: break; 4913 case X86::AX: DestReg = X86::EAX; break; 4914 case X86::DX: DestReg = X86::EDX; break; 4915 case X86::CX: DestReg = X86::ECX; break; 4916 case X86::BX: DestReg = X86::EBX; break; 4917 case X86::SI: DestReg = X86::ESI; break; 4918 case X86::DI: DestReg = X86::EDI; break; 4919 case X86::BP: DestReg = X86::EBP; break; 4920 case X86::SP: DestReg = X86::ESP; break; 4921 } 4922 if (DestReg) { 4923 Res.first = DestReg; 4924 Res.second = Res.second = X86::GR32RegisterClass; 4925 } 4926 } else if (VT == MVT::i64) { 4927 unsigned DestReg = 0; 4928 switch (Res.first) { 4929 default: break; 4930 case X86::AX: DestReg = X86::RAX; break; 4931 case X86::DX: DestReg = X86::RDX; break; 4932 case X86::CX: DestReg = X86::RCX; break; 4933 case X86::BX: DestReg = X86::RBX; break; 4934 case X86::SI: DestReg = X86::RSI; break; 4935 case X86::DI: DestReg = X86::RDI; break; 4936 case X86::BP: DestReg = X86::RBP; break; 4937 case X86::SP: DestReg = X86::RSP; break; 4938 } 4939 if (DestReg) { 4940 Res.first = DestReg; 4941 Res.second = Res.second = X86::GR64RegisterClass; 4942 } 4943 } 4944 4945 return Res; 4946} 4947