X86ISelLowering.cpp revision 07b7f672a066317ffc8224c61dc3b72ce857f24d
1f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// 3f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// The LLVM Compiler Infrastructure 4f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// 5f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// This file is distributed under the University of Illinois Open Source 6f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// License. See LICENSE.TXT for details. 7f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// 8f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette//===----------------------------------------------------------------------===// 9f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// 10f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// This file defines the interfaces that X86 uses to lower LLVM code into a 11f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// selection DAG. 12f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// 13f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette//===----------------------------------------------------------------------===// 14f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 15f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#define DEBUG_TYPE "x86-isel" 16f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "X86.h" 17f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "X86InstrBuilder.h" 18f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "X86ISelLowering.h" 19f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "X86TargetMachine.h" 20f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "X86TargetObjectFile.h" 21f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "Utils/X86ShuffleDecode.h" 22f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CallingConv.h" 23f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Constants.h" 24f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/DerivedTypes.h" 25f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/GlobalAlias.h" 26f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/GlobalVariable.h" 27f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Function.h" 28f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Instructions.h" 29f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Intrinsics.h" 30f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/LLVMContext.h" 31f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/IntrinsicLowering.h" 32f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/MachineFrameInfo.h" 33f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/MachineFunction.h" 34f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/MachineInstrBuilder.h" 35f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/MachineJumpTableInfo.h" 36f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/MachineModuleInfo.h" 37f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/MachineRegisterInfo.h" 38f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/CodeGen/PseudoSourceValue.h" 39f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/MC/MCAsmInfo.h" 40f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/MC/MCContext.h" 41f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/MC/MCExpr.h" 42f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/MC/MCSymbol.h" 43f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/ADT/BitVector.h" 44f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/ADT/SmallSet.h" 45f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/ADT/Statistic.h" 46f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/ADT/StringExtras.h" 47f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/ADT/VectorExtras.h" 48f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Support/CallSite.h" 49f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Support/Debug.h" 50f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Support/Dwarf.h" 51f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Support/ErrorHandling.h" 52f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Support/MathExtras.h" 53f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette#include "llvm/Support/raw_ostream.h" 54f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viveretteusing namespace llvm; 55f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viveretteusing namespace dwarf; 56f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 57f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan ViveretteSTATISTIC(NumTailCalls, "Number of tail calls"); 58f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 59f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette// Forward declarations. 60f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverettestatic SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 61f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue V2); 62f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 63f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverettestatic SDValue Insert128BitVector(SDValue Result, 64f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue Vec, 65f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue Idx, 66f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SelectionDAG &DAG, 67f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette DebugLoc dl); 68f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 69f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverettestatic SDValue Extract128BitVector(SDValue Vec, 70f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue Idx, 71f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SelectionDAG &DAG, 72f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette DebugLoc dl); 73f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 74f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 75f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// sets things up to match to an AVX VEXTRACTF128 instruction or a 76f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// simple subregister reference. Idx is an index in the 128 bits we 77f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// want. It need not be aligned to a 128-bit bounday. That makes 78f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// lowering EXTRACT_VECTOR_ELT operations easier. 79f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverettestatic SDValue Extract128BitVector(SDValue Vec, 80f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue Idx, 81f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SelectionDAG &DAG, 82f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette DebugLoc dl) { 83f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette EVT VT = Vec.getValueType(); 84f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette assert(VT.getSizeInBits() == 256 && "Unexpected vector size!"); 85f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette EVT ElVT = VT.getVectorElementType(); 86f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette int Factor = VT.getSizeInBits()/128; 87f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 88f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette VT.getVectorNumElements()/Factor); 89f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 90f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // Extract from UNDEF is UNDEF. 91f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Vec.getOpcode() == ISD::UNDEF) 92f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return DAG.getNode(ISD::UNDEF, dl, ResultVT); 93f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 94f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (isa<ConstantSDNode>(Idx)) { 95f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 96f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 97f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 98f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // we can match to VEXTRACTF128. 99f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 100f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 101f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // This is the index of the first element of the 128-bit chunk 102f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // we want. 103f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 104f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette * ElemsPerChunk); 105f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 106f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 107f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 108f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette VecIdx); 109f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 110f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return Result; 111f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette } 112f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 113f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return SDValue(); 114f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette} 115f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 116f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// Generate a DAG to put 128-bits into a vector > 128 bits. This 117f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// sets things up to match to an AVX VINSERTF128 instruction or a 118f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// simple superregister reference. Idx is an index in the 128 bits 119f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// we want. It need not be aligned to a 128-bit bounday. That makes 120f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette/// lowering INSERT_VECTOR_ELT operations easier. 121f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverettestatic SDValue Insert128BitVector(SDValue Result, 122f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue Vec, 123f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue Idx, 124f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SelectionDAG &DAG, 125f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette DebugLoc dl) { 126f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (isa<ConstantSDNode>(Idx)) { 127f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette EVT VT = Vec.getValueType(); 128f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette assert(VT.getSizeInBits() == 128 && "Unexpected vector size!"); 129f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 130f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette EVT ElVT = VT.getVectorElementType(); 131f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 132f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette EVT ResultVT = Result.getValueType(); 133f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 134f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // Insert the relevant 128 bits. 135f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 136f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 137f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // This is the index of the first element of the 128-bit chunk 138f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // we want. 139f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 140f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette * ElemsPerChunk); 141f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 142f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 143f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 144f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette VecIdx); 145f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return Result; 146f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette } 147f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 148f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return SDValue(); 149f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette} 150f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 151f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverettestatic TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 152f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 153f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette bool is64Bit = Subtarget->is64Bit(); 154f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 155f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Subtarget->isTargetEnvMacho()) { 156f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (is64Bit) 157f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return new X8664_MachoTargetObjectFile(); 158f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return new TargetLoweringObjectFileMachO(); 159f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette } 160f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 161f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Subtarget->isTargetELF()) 162f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return new TargetLoweringObjectFileELF(); 163f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 164f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette return new TargetLoweringObjectFileCOFF(); 165f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette llvm_unreachable("unknown subtarget type"); 166f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette} 167f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 168f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan ViveretteX86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 169f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette : TargetLowering(TM, createTLOF(TM)) { 170f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette Subtarget = &TM.getSubtarget<X86Subtarget>(); 171f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette X86ScalarSSEf64 = Subtarget->hasXMMInt() || Subtarget->hasAVX(); 172f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette X86ScalarSSEf32 = Subtarget->hasXMM() || Subtarget->hasAVX(); 173f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 174f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 175f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette RegInfo = TM.getRegisterInfo(); 176f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette TD = getTargetData(); 177f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 178f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // Set up the TargetLowering object. 179f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 180f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 181f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // X86 is weird, it always uses i8 for shift amounts and setcc results. 182f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setBooleanContents(ZeroOrOneBooleanContent); 183f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 184f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // For 64-bit since we have so many registers use the ILP scheduler, for 185f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // 32-bit code use the register pressure specific scheduling. 186f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Subtarget->is64Bit()) 187f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setSchedulingPreference(Sched::ILP); 188f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette else 189f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setSchedulingPreference(Sched::RegPressure); 190f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setStackPointerRegisterToSaveRestore(X86StackPtr); 191f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 192f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 193f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // Setup Windows compiler runtime calls. 194f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 195f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 196f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallName(RTLIB::SREM_I64, "_allrem"); 197f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallName(RTLIB::UREM_I64, "_aullrem"); 198f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallName(RTLIB::MUL_I64, "_allmul"); 199f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2"); 200f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2"); 201f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 202f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 203f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 204f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 205f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 206f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C); 207f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C); 208f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette } 209f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 210f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Subtarget->isTargetDarwin()) { 211f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 212f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setUseUnderscoreSetJmp(false); 213f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setUseUnderscoreLongJmp(false); 214f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette } else if (Subtarget->isTargetMingw()) { 215f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // MS runtime is weird: it exports _setjmp, but longjmp! 216f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setUseUnderscoreSetJmp(true); 217f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setUseUnderscoreLongJmp(false); 218f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette } else { 219f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setUseUnderscoreSetJmp(true); 220f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setUseUnderscoreLongJmp(true); 221f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette } 222f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 223f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // Set up the register classes. 224f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette addRegisterClass(MVT::i8, X86::GR8RegisterClass); 225f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette addRegisterClass(MVT::i16, X86::GR16RegisterClass); 226f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette addRegisterClass(MVT::i32, X86::GR32RegisterClass); 227f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette if (Subtarget->is64Bit()) 228f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette addRegisterClass(MVT::i64, X86::GR64RegisterClass); 229f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 230f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 231f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 232f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette // We don't accept any truncstore of integer registers. 233f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setTruncStoreAction(MVT::i64, MVT::i32, Expand); 234f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setTruncStoreAction(MVT::i64, MVT::i16, Expand); 235f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 236f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setTruncStoreAction(MVT::i32, MVT::i16, Expand); 237f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 238f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette setTruncStoreAction(MVT::i16, MVT::i8, Expand); 239f11879ae94e7598cb6ae59fdc13104947b66e3e6Alan Viverette 240 // SETOEQ and SETUNE require checking two conditions. 241 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 242 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 243 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 244 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 245 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 246 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 247 248 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 249 // operation. 250 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 251 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 252 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 253 254 if (Subtarget->is64Bit()) { 255 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 256 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 257 } else if (!UseSoftFloat) { 258 // We have an algorithm for SSE2->double, and we turn this into a 259 // 64-bit FILD followed by conditional FADD for other targets. 260 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 261 // We have an algorithm for SSE2, and we turn this into a 64-bit 262 // FILD for other targets. 263 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 264 } 265 266 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 267 // this operation. 268 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 269 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 270 271 if (!UseSoftFloat) { 272 // SSE has no i16 to fp conversion, only i32 273 if (X86ScalarSSEf32) { 274 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 275 // f32 and f64 cases are Legal, f80 case is not 276 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 277 } else { 278 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 279 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 280 } 281 } else { 282 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 283 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 284 } 285 286 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 287 // are Legal, f80 is custom lowered. 288 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 289 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 290 291 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 292 // this operation. 293 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 294 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 295 296 if (X86ScalarSSEf32) { 297 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 298 // f32 and f64 cases are Legal, f80 case is not 299 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 300 } else { 301 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 302 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 303 } 304 305 // Handle FP_TO_UINT by promoting the destination to a larger signed 306 // conversion. 307 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 308 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 309 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 310 311 if (Subtarget->is64Bit()) { 312 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 313 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 314 } else if (!UseSoftFloat) { 315 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 316 // Expand FP_TO_UINT into a select. 317 // FIXME: We would like to use a Custom expander here eventually to do 318 // the optimal thing for SSE vs. the default expansion in the legalizer. 319 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 320 else 321 // With SSE3 we can use fisttpll to convert to a signed i64; without 322 // SSE, we're stuck with a fistpll. 323 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 324 } 325 326 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 327 if (!X86ScalarSSEf64) { 328 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 329 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 330 if (Subtarget->is64Bit()) { 331 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 332 // Without SSE, i64->f64 goes through memory. 333 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 334 } 335 } 336 337 // Scalar integer divide and remainder are lowered to use operations that 338 // produce two results, to match the available instructions. This exposes 339 // the two-result form to trivial CSE, which is able to combine x/y and x%y 340 // into a single instruction. 341 // 342 // Scalar integer multiply-high is also lowered to use two-result 343 // operations, to match the available instructions. However, plain multiply 344 // (low) operations are left as Legal, as there are single-result 345 // instructions for this in x86. Using the two-result multiply instructions 346 // when both high and low results are needed must be arranged by dagcombine. 347 for (unsigned i = 0, e = 4; i != e; ++i) { 348 MVT VT = IntVTs[i]; 349 setOperationAction(ISD::MULHS, VT, Expand); 350 setOperationAction(ISD::MULHU, VT, Expand); 351 setOperationAction(ISD::SDIV, VT, Expand); 352 setOperationAction(ISD::UDIV, VT, Expand); 353 setOperationAction(ISD::SREM, VT, Expand); 354 setOperationAction(ISD::UREM, VT, Expand); 355 356 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 357 setOperationAction(ISD::ADDC, VT, Custom); 358 setOperationAction(ISD::ADDE, VT, Custom); 359 setOperationAction(ISD::SUBC, VT, Custom); 360 setOperationAction(ISD::SUBE, VT, Custom); 361 } 362 363 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 364 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 365 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 366 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 367 if (Subtarget->is64Bit()) 368 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 369 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 370 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 371 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 372 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 373 setOperationAction(ISD::FREM , MVT::f32 , Expand); 374 setOperationAction(ISD::FREM , MVT::f64 , Expand); 375 setOperationAction(ISD::FREM , MVT::f80 , Expand); 376 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 377 378 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 379 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 380 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 381 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 382 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 383 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 384 if (Subtarget->is64Bit()) { 385 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 386 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 387 } 388 389 if (Subtarget->hasPOPCNT()) { 390 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 391 } else { 392 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 393 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 394 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 395 if (Subtarget->is64Bit()) 396 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 397 } 398 399 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 400 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 401 402 // These should be promoted to a larger select which is supported. 403 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 404 // X86 wants to expand cmov itself. 405 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 406 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 407 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 408 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 409 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 410 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 411 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 412 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 413 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 414 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 415 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 416 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 417 if (Subtarget->is64Bit()) { 418 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 419 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 420 } 421 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 422 423 // Darwin ABI issue. 424 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 425 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 426 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 427 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 428 if (Subtarget->is64Bit()) 429 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 430 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 431 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 432 if (Subtarget->is64Bit()) { 433 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 434 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 435 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 436 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 437 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 438 } 439 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 440 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 441 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 442 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 443 if (Subtarget->is64Bit()) { 444 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 445 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 446 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 447 } 448 449 if (Subtarget->hasXMM()) 450 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 451 452 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 453 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 454 455 // On X86 and X86-64, atomic operations are lowered to locked instructions. 456 // Locked instructions, in turn, have implicit fence semantics (all memory 457 // operations are flushed before issuing the locked instruction, and they 458 // are not buffered), so we can fold away the common pattern of 459 // fence-atomic-fence. 460 setShouldFoldAtomicFences(true); 461 462 // Expand certain atomics 463 for (unsigned i = 0, e = 4; i != e; ++i) { 464 MVT VT = IntVTs[i]; 465 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 466 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 467 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 468 } 469 470 if (!Subtarget->is64Bit()) { 471 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 472 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 473 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 474 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 475 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 476 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 477 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 478 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 479 } 480 481 // FIXME - use subtarget debug flags 482 if (!Subtarget->isTargetDarwin() && 483 !Subtarget->isTargetELF() && 484 !Subtarget->isTargetCygMing()) { 485 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 486 } 487 488 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 489 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 490 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 491 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 492 if (Subtarget->is64Bit()) { 493 setExceptionPointerRegister(X86::RAX); 494 setExceptionSelectorRegister(X86::RDX); 495 } else { 496 setExceptionPointerRegister(X86::EAX); 497 setExceptionSelectorRegister(X86::EDX); 498 } 499 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 500 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 501 502 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 503 504 setOperationAction(ISD::TRAP, MVT::Other, Legal); 505 506 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 507 setOperationAction(ISD::VASTART , MVT::Other, Custom); 508 setOperationAction(ISD::VAEND , MVT::Other, Expand); 509 if (Subtarget->is64Bit()) { 510 setOperationAction(ISD::VAARG , MVT::Other, Custom); 511 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 512 } else { 513 setOperationAction(ISD::VAARG , MVT::Other, Expand); 514 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 515 } 516 517 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 518 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 519 setOperationAction(ISD::DYNAMIC_STACKALLOC, 520 (Subtarget->is64Bit() ? MVT::i64 : MVT::i32), 521 (Subtarget->isTargetCOFF() 522 && !Subtarget->isTargetEnvMacho() 523 ? Custom : Expand)); 524 525 if (!UseSoftFloat && X86ScalarSSEf64) { 526 // f32 and f64 use SSE. 527 // Set up the FP register classes. 528 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 529 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 530 531 // Use ANDPD to simulate FABS. 532 setOperationAction(ISD::FABS , MVT::f64, Custom); 533 setOperationAction(ISD::FABS , MVT::f32, Custom); 534 535 // Use XORP to simulate FNEG. 536 setOperationAction(ISD::FNEG , MVT::f64, Custom); 537 setOperationAction(ISD::FNEG , MVT::f32, Custom); 538 539 // Use ANDPD and ORPD to simulate FCOPYSIGN. 540 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 541 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 542 543 // Lower this to FGETSIGNx86 plus an AND. 544 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 545 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 546 547 // We don't support sin/cos/fmod 548 setOperationAction(ISD::FSIN , MVT::f64, Expand); 549 setOperationAction(ISD::FCOS , MVT::f64, Expand); 550 setOperationAction(ISD::FSIN , MVT::f32, Expand); 551 setOperationAction(ISD::FCOS , MVT::f32, Expand); 552 553 // Expand FP immediates into loads from the stack, except for the special 554 // cases we handle. 555 addLegalFPImmediate(APFloat(+0.0)); // xorpd 556 addLegalFPImmediate(APFloat(+0.0f)); // xorps 557 } else if (!UseSoftFloat && X86ScalarSSEf32) { 558 // Use SSE for f32, x87 for f64. 559 // Set up the FP register classes. 560 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 561 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 562 563 // Use ANDPS to simulate FABS. 564 setOperationAction(ISD::FABS , MVT::f32, Custom); 565 566 // Use XORP to simulate FNEG. 567 setOperationAction(ISD::FNEG , MVT::f32, Custom); 568 569 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 570 571 // Use ANDPS and ORPS to simulate FCOPYSIGN. 572 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 573 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 574 575 // We don't support sin/cos/fmod 576 setOperationAction(ISD::FSIN , MVT::f32, Expand); 577 setOperationAction(ISD::FCOS , MVT::f32, Expand); 578 579 // Special cases we handle for FP constants. 580 addLegalFPImmediate(APFloat(+0.0f)); // xorps 581 addLegalFPImmediate(APFloat(+0.0)); // FLD0 582 addLegalFPImmediate(APFloat(+1.0)); // FLD1 583 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 584 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 585 586 if (!UnsafeFPMath) { 587 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 588 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 589 } 590 } else if (!UseSoftFloat) { 591 // f32 and f64 in x87. 592 // Set up the FP register classes. 593 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 594 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 595 596 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 597 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 598 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 599 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 600 601 if (!UnsafeFPMath) { 602 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 603 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 604 } 605 addLegalFPImmediate(APFloat(+0.0)); // FLD0 606 addLegalFPImmediate(APFloat(+1.0)); // FLD1 607 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 608 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 609 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 610 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 611 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 612 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 613 } 614 615 // We don't support FMA. 616 setOperationAction(ISD::FMA, MVT::f64, Expand); 617 setOperationAction(ISD::FMA, MVT::f32, Expand); 618 619 // Long double always uses X87. 620 if (!UseSoftFloat) { 621 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 622 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 623 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 624 { 625 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 626 addLegalFPImmediate(TmpFlt); // FLD0 627 TmpFlt.changeSign(); 628 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 629 630 bool ignored; 631 APFloat TmpFlt2(+1.0); 632 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 633 &ignored); 634 addLegalFPImmediate(TmpFlt2); // FLD1 635 TmpFlt2.changeSign(); 636 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 637 } 638 639 if (!UnsafeFPMath) { 640 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 641 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 642 } 643 644 setOperationAction(ISD::FMA, MVT::f80, Expand); 645 } 646 647 // Always use a library call for pow. 648 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 649 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 650 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 651 652 setOperationAction(ISD::FLOG, MVT::f80, Expand); 653 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 654 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 655 setOperationAction(ISD::FEXP, MVT::f80, Expand); 656 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 657 658 // First set operation action for all vector types to either promote 659 // (for widening) or expand (for scalarization). Then we will selectively 660 // turn on ones that can be effectively codegen'd. 661 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 662 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 663 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 664 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 665 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 666 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 667 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 668 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 669 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 670 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 671 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 672 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 673 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 674 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 675 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 676 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 677 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 678 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 679 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 680 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 681 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 682 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 683 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 684 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 685 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 686 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 687 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 688 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 689 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 690 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 691 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 692 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 693 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 694 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 695 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 696 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 697 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 698 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 699 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 700 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 701 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 702 setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand); 703 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 704 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 705 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 706 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 707 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 708 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 709 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 710 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 711 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 712 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 713 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 714 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 715 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 716 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 717 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 718 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 719 setTruncStoreAction((MVT::SimpleValueType)VT, 720 (MVT::SimpleValueType)InnerVT, Expand); 721 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 722 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 723 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 724 } 725 726 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 727 // with -msoft-float, disable use of MMX as well. 728 if (!UseSoftFloat && Subtarget->hasMMX()) { 729 addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass); 730 // No operations on x86mmx supported, everything uses intrinsics. 731 } 732 733 // MMX-sized vectors (other than x86mmx) are expected to be expanded 734 // into smaller operations. 735 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 736 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 737 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 738 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 739 setOperationAction(ISD::AND, MVT::v8i8, Expand); 740 setOperationAction(ISD::AND, MVT::v4i16, Expand); 741 setOperationAction(ISD::AND, MVT::v2i32, Expand); 742 setOperationAction(ISD::AND, MVT::v1i64, Expand); 743 setOperationAction(ISD::OR, MVT::v8i8, Expand); 744 setOperationAction(ISD::OR, MVT::v4i16, Expand); 745 setOperationAction(ISD::OR, MVT::v2i32, Expand); 746 setOperationAction(ISD::OR, MVT::v1i64, Expand); 747 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 748 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 749 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 750 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 751 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 752 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 753 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 754 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 755 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 756 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 757 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 758 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 759 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 760 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 761 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 762 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 763 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 764 765 if (!UseSoftFloat && Subtarget->hasXMM()) { 766 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 767 768 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 769 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 770 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 771 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 772 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 773 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 774 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 775 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 776 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 777 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 778 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 779 setOperationAction(ISD::VSETCC, MVT::v4f32, Custom); 780 } 781 782 if (!UseSoftFloat && Subtarget->hasXMMInt()) { 783 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 784 785 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 786 // registers cannot be used even for integer operations. 787 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 788 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 789 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 790 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 791 792 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 793 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 794 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 795 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 796 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 797 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 798 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 799 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 800 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 801 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 802 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 803 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 804 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 805 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 806 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 807 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 808 809 setOperationAction(ISD::VSETCC, MVT::v2f64, Custom); 810 setOperationAction(ISD::VSETCC, MVT::v16i8, Custom); 811 setOperationAction(ISD::VSETCC, MVT::v8i16, Custom); 812 setOperationAction(ISD::VSETCC, MVT::v4i32, Custom); 813 814 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 815 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 816 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 817 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 818 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 819 820 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); 821 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom); 822 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); 823 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); 824 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 825 826 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 827 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { 828 EVT VT = (MVT::SimpleValueType)i; 829 // Do not attempt to custom lower non-power-of-2 vectors 830 if (!isPowerOf2_32(VT.getVectorNumElements())) 831 continue; 832 // Do not attempt to custom lower non-128-bit vectors 833 if (!VT.is128BitVector()) 834 continue; 835 setOperationAction(ISD::BUILD_VECTOR, 836 VT.getSimpleVT().SimpleTy, Custom); 837 setOperationAction(ISD::VECTOR_SHUFFLE, 838 VT.getSimpleVT().SimpleTy, Custom); 839 setOperationAction(ISD::EXTRACT_VECTOR_ELT, 840 VT.getSimpleVT().SimpleTy, Custom); 841 } 842 843 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 844 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 845 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 846 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 847 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 848 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 849 850 if (Subtarget->is64Bit()) { 851 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 852 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 853 } 854 855 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 856 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) { 857 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 858 EVT VT = SVT; 859 860 // Do not attempt to promote non-128-bit vectors 861 if (!VT.is128BitVector()) 862 continue; 863 864 setOperationAction(ISD::AND, SVT, Promote); 865 AddPromotedToType (ISD::AND, SVT, MVT::v2i64); 866 setOperationAction(ISD::OR, SVT, Promote); 867 AddPromotedToType (ISD::OR, SVT, MVT::v2i64); 868 setOperationAction(ISD::XOR, SVT, Promote); 869 AddPromotedToType (ISD::XOR, SVT, MVT::v2i64); 870 setOperationAction(ISD::LOAD, SVT, Promote); 871 AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64); 872 setOperationAction(ISD::SELECT, SVT, Promote); 873 AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64); 874 } 875 876 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 877 878 // Custom lower v2i64 and v2f64 selects. 879 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 880 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 881 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 882 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 883 884 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 885 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 886 } 887 888 if (Subtarget->hasSSE41() || Subtarget->hasAVX()) { 889 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 890 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 891 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 892 setOperationAction(ISD::FRINT, MVT::f32, Legal); 893 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 894 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 895 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 896 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 897 setOperationAction(ISD::FRINT, MVT::f64, Legal); 898 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 899 900 // FIXME: Do we need to handle scalar-to-vector here? 901 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 902 903 // Can turn SHL into an integer multiply. 904 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 905 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 906 907 // i8 and i16 vectors are custom , because the source register and source 908 // source memory operand types are not the same width. f32 vectors are 909 // custom since the immediate controlling the insert encodes additional 910 // information. 911 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 912 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 913 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 914 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 915 916 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 917 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 918 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 919 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 920 921 if (Subtarget->is64Bit()) { 922 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 923 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 924 } 925 } 926 927 if (Subtarget->hasSSE2() || Subtarget->hasAVX()) { 928 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 929 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 930 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 931 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 932 933 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 934 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 935 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 936 937 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 938 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 939 } 940 941 if (Subtarget->hasSSE42() || Subtarget->hasAVX()) 942 setOperationAction(ISD::VSETCC, MVT::v2i64, Custom); 943 944 if (!UseSoftFloat && Subtarget->hasAVX()) { 945 addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); 946 addRegisterClass(MVT::v16i16, X86::VR256RegisterClass); 947 addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); 948 addRegisterClass(MVT::v8f32, X86::VR256RegisterClass); 949 addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); 950 addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); 951 952 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 953 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 954 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 955 956 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 957 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 958 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 959 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 960 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 961 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 962 963 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 964 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 965 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 966 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 967 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 968 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 969 970 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 971 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 972 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 973 974 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f64, Custom); 975 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i64, Custom); 976 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 977 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 978 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i8, Custom); 979 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i16, Custom); 980 981 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 982 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 983 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 984 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 985 986 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 987 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 988 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 989 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 990 991 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 992 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 993 994 setOperationAction(ISD::VSETCC, MVT::v32i8, Custom); 995 setOperationAction(ISD::VSETCC, MVT::v16i16, Custom); 996 setOperationAction(ISD::VSETCC, MVT::v8i32, Custom); 997 setOperationAction(ISD::VSETCC, MVT::v4i64, Custom); 998 999 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1000 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1001 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1002 1003 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1004 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1005 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1006 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1007 1008 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1009 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1010 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1011 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1012 1013 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1014 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1015 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1016 // Don't lower v32i8 because there is no 128-bit byte mul 1017 1018 // Custom lower several nodes for 256-bit types. 1019 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1020 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1021 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 1022 EVT VT = SVT; 1023 1024 // Extract subvector is special because the value type 1025 // (result) is 128-bit but the source is 256-bit wide. 1026 if (VT.is128BitVector()) 1027 setOperationAction(ISD::EXTRACT_SUBVECTOR, SVT, Custom); 1028 1029 // Do not attempt to custom lower other non-256-bit vectors 1030 if (!VT.is256BitVector()) 1031 continue; 1032 1033 setOperationAction(ISD::BUILD_VECTOR, SVT, Custom); 1034 setOperationAction(ISD::VECTOR_SHUFFLE, SVT, Custom); 1035 setOperationAction(ISD::INSERT_VECTOR_ELT, SVT, Custom); 1036 setOperationAction(ISD::EXTRACT_VECTOR_ELT, SVT, Custom); 1037 setOperationAction(ISD::SCALAR_TO_VECTOR, SVT, Custom); 1038 setOperationAction(ISD::INSERT_SUBVECTOR, SVT, Custom); 1039 } 1040 1041 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1042 for (unsigned i = (unsigned)MVT::v32i8; i != (unsigned)MVT::v4i64; ++i) { 1043 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 1044 EVT VT = SVT; 1045 1046 // Do not attempt to promote non-256-bit vectors 1047 if (!VT.is256BitVector()) 1048 continue; 1049 1050 setOperationAction(ISD::AND, SVT, Promote); 1051 AddPromotedToType (ISD::AND, SVT, MVT::v4i64); 1052 setOperationAction(ISD::OR, SVT, Promote); 1053 AddPromotedToType (ISD::OR, SVT, MVT::v4i64); 1054 setOperationAction(ISD::XOR, SVT, Promote); 1055 AddPromotedToType (ISD::XOR, SVT, MVT::v4i64); 1056 setOperationAction(ISD::LOAD, SVT, Promote); 1057 AddPromotedToType (ISD::LOAD, SVT, MVT::v4i64); 1058 setOperationAction(ISD::SELECT, SVT, Promote); 1059 AddPromotedToType (ISD::SELECT, SVT, MVT::v4i64); 1060 } 1061 } 1062 1063 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1064 // of this type with custom code. 1065 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1066 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT++) { 1067 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, Custom); 1068 } 1069 1070 // We want to custom lower some of our intrinsics. 1071 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1072 1073 1074 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1075 // handle type legalization for these operations here. 1076 // 1077 // FIXME: We really should do custom legalization for addition and 1078 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1079 // than generic legalization for 64-bit multiplication-with-overflow, though. 1080 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1081 // Add/Sub/Mul with overflow operations are custom lowered. 1082 MVT VT = IntVTs[i]; 1083 setOperationAction(ISD::SADDO, VT, Custom); 1084 setOperationAction(ISD::UADDO, VT, Custom); 1085 setOperationAction(ISD::SSUBO, VT, Custom); 1086 setOperationAction(ISD::USUBO, VT, Custom); 1087 setOperationAction(ISD::SMULO, VT, Custom); 1088 setOperationAction(ISD::UMULO, VT, Custom); 1089 } 1090 1091 // There are no 8-bit 3-address imul/mul instructions 1092 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1093 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1094 1095 if (!Subtarget->is64Bit()) { 1096 // These libcalls are not available in 32-bit. 1097 setLibcallName(RTLIB::SHL_I128, 0); 1098 setLibcallName(RTLIB::SRL_I128, 0); 1099 setLibcallName(RTLIB::SRA_I128, 0); 1100 } 1101 1102 // We have target-specific dag combine patterns for the following nodes: 1103 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1104 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1105 setTargetDAGCombine(ISD::BUILD_VECTOR); 1106 setTargetDAGCombine(ISD::SELECT); 1107 setTargetDAGCombine(ISD::SHL); 1108 setTargetDAGCombine(ISD::SRA); 1109 setTargetDAGCombine(ISD::SRL); 1110 setTargetDAGCombine(ISD::OR); 1111 setTargetDAGCombine(ISD::AND); 1112 setTargetDAGCombine(ISD::ADD); 1113 setTargetDAGCombine(ISD::SUB); 1114 setTargetDAGCombine(ISD::STORE); 1115 setTargetDAGCombine(ISD::ZERO_EXTEND); 1116 setTargetDAGCombine(ISD::SINT_TO_FP); 1117 if (Subtarget->is64Bit()) 1118 setTargetDAGCombine(ISD::MUL); 1119 1120 computeRegisterProperties(); 1121 1122 // On Darwin, -Os means optimize for size without hurting performance, 1123 // do not reduce the limit. 1124 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1125 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1126 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1127 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1128 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1129 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1130 setPrefLoopAlignment(16); 1131 benefitFromCodePlacementOpt = true; 1132 1133 setPrefFunctionAlignment(4); 1134} 1135 1136 1137MVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const { 1138 return MVT::i8; 1139} 1140 1141 1142/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1143/// the desired ByVal argument alignment. 1144static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1145 if (MaxAlign == 16) 1146 return; 1147 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1148 if (VTy->getBitWidth() == 128) 1149 MaxAlign = 16; 1150 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1151 unsigned EltAlign = 0; 1152 getMaxByValAlign(ATy->getElementType(), EltAlign); 1153 if (EltAlign > MaxAlign) 1154 MaxAlign = EltAlign; 1155 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1156 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1157 unsigned EltAlign = 0; 1158 getMaxByValAlign(STy->getElementType(i), EltAlign); 1159 if (EltAlign > MaxAlign) 1160 MaxAlign = EltAlign; 1161 if (MaxAlign == 16) 1162 break; 1163 } 1164 } 1165 return; 1166} 1167 1168/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1169/// function arguments in the caller parameter area. For X86, aggregates 1170/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1171/// are at 4-byte boundaries. 1172unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1173 if (Subtarget->is64Bit()) { 1174 // Max of 8 and alignment of type. 1175 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1176 if (TyAlign > 8) 1177 return TyAlign; 1178 return 8; 1179 } 1180 1181 unsigned Align = 4; 1182 if (Subtarget->hasXMM()) 1183 getMaxByValAlign(Ty, Align); 1184 return Align; 1185} 1186 1187/// getOptimalMemOpType - Returns the target specific optimal type for load 1188/// and store operations as a result of memset, memcpy, and memmove 1189/// lowering. If DstAlign is zero that means it's safe to destination 1190/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1191/// means there isn't a need to check it against alignment requirement, 1192/// probably because the source does not need to be loaded. If 1193/// 'NonScalarIntSafe' is true, that means it's safe to return a 1194/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1195/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1196/// constant so it does not need to be loaded. 1197/// It returns EVT::Other if the type should be determined using generic 1198/// target-independent logic. 1199EVT 1200X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1201 unsigned DstAlign, unsigned SrcAlign, 1202 bool NonScalarIntSafe, 1203 bool MemcpyStrSrc, 1204 MachineFunction &MF) const { 1205 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1206 // linux. This is because the stack realignment code can't handle certain 1207 // cases like PR2962. This should be removed when PR2962 is fixed. 1208 const Function *F = MF.getFunction(); 1209 if (NonScalarIntSafe && 1210 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1211 if (Size >= 16 && 1212 (Subtarget->isUnalignedMemAccessFast() || 1213 ((DstAlign == 0 || DstAlign >= 16) && 1214 (SrcAlign == 0 || SrcAlign >= 16))) && 1215 Subtarget->getStackAlignment() >= 16) { 1216 if (Subtarget->hasSSE2()) 1217 return MVT::v4i32; 1218 if (Subtarget->hasSSE1()) 1219 return MVT::v4f32; 1220 } else if (!MemcpyStrSrc && Size >= 8 && 1221 !Subtarget->is64Bit() && 1222 Subtarget->getStackAlignment() >= 8 && 1223 Subtarget->hasXMMInt()) { 1224 // Do not use f64 to lower memcpy if source is string constant. It's 1225 // better to use i32 to avoid the loads. 1226 return MVT::f64; 1227 } 1228 } 1229 if (Subtarget->is64Bit() && Size >= 8) 1230 return MVT::i64; 1231 return MVT::i32; 1232} 1233 1234/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1235/// current function. The returned value is a member of the 1236/// MachineJumpTableInfo::JTEntryKind enum. 1237unsigned X86TargetLowering::getJumpTableEncoding() const { 1238 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1239 // symbol. 1240 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1241 Subtarget->isPICStyleGOT()) 1242 return MachineJumpTableInfo::EK_Custom32; 1243 1244 // Otherwise, use the normal jump table encoding heuristics. 1245 return TargetLowering::getJumpTableEncoding(); 1246} 1247 1248const MCExpr * 1249X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1250 const MachineBasicBlock *MBB, 1251 unsigned uid,MCContext &Ctx) const{ 1252 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1253 Subtarget->isPICStyleGOT()); 1254 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1255 // entries. 1256 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1257 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1258} 1259 1260/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1261/// jumptable. 1262SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1263 SelectionDAG &DAG) const { 1264 if (!Subtarget->is64Bit()) 1265 // This doesn't have DebugLoc associated with it, but is not really the 1266 // same as a Register. 1267 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1268 return Table; 1269} 1270 1271/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1272/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1273/// MCExpr. 1274const MCExpr *X86TargetLowering:: 1275getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1276 MCContext &Ctx) const { 1277 // X86-64 uses RIP relative addressing based on the jump table label. 1278 if (Subtarget->isPICStyleRIPRel()) 1279 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1280 1281 // Otherwise, the reference is relative to the PIC base. 1282 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1283} 1284 1285// FIXME: Why this routine is here? Move to RegInfo! 1286std::pair<const TargetRegisterClass*, uint8_t> 1287X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1288 const TargetRegisterClass *RRC = 0; 1289 uint8_t Cost = 1; 1290 switch (VT.getSimpleVT().SimpleTy) { 1291 default: 1292 return TargetLowering::findRepresentativeClass(VT); 1293 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1294 RRC = (Subtarget->is64Bit() 1295 ? X86::GR64RegisterClass : X86::GR32RegisterClass); 1296 break; 1297 case MVT::x86mmx: 1298 RRC = X86::VR64RegisterClass; 1299 break; 1300 case MVT::f32: case MVT::f64: 1301 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1302 case MVT::v4f32: case MVT::v2f64: 1303 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1304 case MVT::v4f64: 1305 RRC = X86::VR128RegisterClass; 1306 break; 1307 } 1308 return std::make_pair(RRC, Cost); 1309} 1310 1311bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1312 unsigned &Offset) const { 1313 if (!Subtarget->isTargetLinux()) 1314 return false; 1315 1316 if (Subtarget->is64Bit()) { 1317 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1318 Offset = 0x28; 1319 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1320 AddressSpace = 256; 1321 else 1322 AddressSpace = 257; 1323 } else { 1324 // %gs:0x14 on i386 1325 Offset = 0x14; 1326 AddressSpace = 256; 1327 } 1328 return true; 1329} 1330 1331 1332//===----------------------------------------------------------------------===// 1333// Return Value Calling Convention Implementation 1334//===----------------------------------------------------------------------===// 1335 1336#include "X86GenCallingConv.inc" 1337 1338bool 1339X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1340 MachineFunction &MF, bool isVarArg, 1341 const SmallVectorImpl<ISD::OutputArg> &Outs, 1342 LLVMContext &Context) const { 1343 SmallVector<CCValAssign, 16> RVLocs; 1344 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1345 RVLocs, Context); 1346 return CCInfo.CheckReturn(Outs, RetCC_X86); 1347} 1348 1349SDValue 1350X86TargetLowering::LowerReturn(SDValue Chain, 1351 CallingConv::ID CallConv, bool isVarArg, 1352 const SmallVectorImpl<ISD::OutputArg> &Outs, 1353 const SmallVectorImpl<SDValue> &OutVals, 1354 DebugLoc dl, SelectionDAG &DAG) const { 1355 MachineFunction &MF = DAG.getMachineFunction(); 1356 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1357 1358 SmallVector<CCValAssign, 16> RVLocs; 1359 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1360 RVLocs, *DAG.getContext()); 1361 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1362 1363 // Add the regs to the liveout set for the function. 1364 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1365 for (unsigned i = 0; i != RVLocs.size(); ++i) 1366 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1367 MRI.addLiveOut(RVLocs[i].getLocReg()); 1368 1369 SDValue Flag; 1370 1371 SmallVector<SDValue, 6> RetOps; 1372 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1373 // Operand #1 = Bytes To Pop 1374 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1375 MVT::i16)); 1376 1377 // Copy the result values into the output registers. 1378 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1379 CCValAssign &VA = RVLocs[i]; 1380 assert(VA.isRegLoc() && "Can only return in registers!"); 1381 SDValue ValToCopy = OutVals[i]; 1382 EVT ValVT = ValToCopy.getValueType(); 1383 1384 // If this is x86-64, and we disabled SSE, we can't return FP values, 1385 // or SSE or MMX vectors. 1386 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1387 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1388 (Subtarget->is64Bit() && !Subtarget->hasXMM())) { 1389 report_fatal_error("SSE register return with SSE disabled"); 1390 } 1391 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1392 // llvm-gcc has never done it right and no one has noticed, so this 1393 // should be OK for now. 1394 if (ValVT == MVT::f64 && 1395 (Subtarget->is64Bit() && !Subtarget->hasXMMInt())) 1396 report_fatal_error("SSE2 register return with SSE2 disabled"); 1397 1398 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1399 // the RET instruction and handled by the FP Stackifier. 1400 if (VA.getLocReg() == X86::ST0 || 1401 VA.getLocReg() == X86::ST1) { 1402 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1403 // change the value to the FP stack register class. 1404 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1405 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1406 RetOps.push_back(ValToCopy); 1407 // Don't emit a copytoreg. 1408 continue; 1409 } 1410 1411 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1412 // which is returned in RAX / RDX. 1413 if (Subtarget->is64Bit()) { 1414 if (ValVT == MVT::x86mmx) { 1415 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1416 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1417 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1418 ValToCopy); 1419 // If we don't have SSE2 available, convert to v4f32 so the generated 1420 // register is legal. 1421 if (!Subtarget->hasSSE2()) 1422 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1423 } 1424 } 1425 } 1426 1427 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1428 Flag = Chain.getValue(1); 1429 } 1430 1431 // The x86-64 ABI for returning structs by value requires that we copy 1432 // the sret argument into %rax for the return. We saved the argument into 1433 // a virtual register in the entry block, so now we copy the value out 1434 // and into %rax. 1435 if (Subtarget->is64Bit() && 1436 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1437 MachineFunction &MF = DAG.getMachineFunction(); 1438 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1439 unsigned Reg = FuncInfo->getSRetReturnReg(); 1440 assert(Reg && 1441 "SRetReturnReg should have been set in LowerFormalArguments()."); 1442 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1443 1444 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1445 Flag = Chain.getValue(1); 1446 1447 // RAX now acts like a return value. 1448 MRI.addLiveOut(X86::RAX); 1449 } 1450 1451 RetOps[0] = Chain; // Update chain. 1452 1453 // Add the flag if we have it. 1454 if (Flag.getNode()) 1455 RetOps.push_back(Flag); 1456 1457 return DAG.getNode(X86ISD::RET_FLAG, dl, 1458 MVT::Other, &RetOps[0], RetOps.size()); 1459} 1460 1461bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const { 1462 if (N->getNumValues() != 1) 1463 return false; 1464 if (!N->hasNUsesOfValue(1, 0)) 1465 return false; 1466 1467 SDNode *Copy = *N->use_begin(); 1468 if (Copy->getOpcode() != ISD::CopyToReg && 1469 Copy->getOpcode() != ISD::FP_EXTEND) 1470 return false; 1471 1472 bool HasRet = false; 1473 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1474 UI != UE; ++UI) { 1475 if (UI->getOpcode() != X86ISD::RET_FLAG) 1476 return false; 1477 HasRet = true; 1478 } 1479 1480 return HasRet; 1481} 1482 1483EVT 1484X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1485 ISD::NodeType ExtendKind) const { 1486 MVT ReturnMVT; 1487 // TODO: Is this also valid on 32-bit? 1488 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1489 ReturnMVT = MVT::i8; 1490 else 1491 ReturnMVT = MVT::i32; 1492 1493 EVT MinVT = getRegisterType(Context, ReturnMVT); 1494 return VT.bitsLT(MinVT) ? MinVT : VT; 1495} 1496 1497/// LowerCallResult - Lower the result values of a call into the 1498/// appropriate copies out of appropriate physical registers. 1499/// 1500SDValue 1501X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1502 CallingConv::ID CallConv, bool isVarArg, 1503 const SmallVectorImpl<ISD::InputArg> &Ins, 1504 DebugLoc dl, SelectionDAG &DAG, 1505 SmallVectorImpl<SDValue> &InVals) const { 1506 1507 // Assign locations to each value returned by this call. 1508 SmallVector<CCValAssign, 16> RVLocs; 1509 bool Is64Bit = Subtarget->is64Bit(); 1510 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1511 getTargetMachine(), RVLocs, *DAG.getContext()); 1512 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1513 1514 // Copy all of the result registers out of their specified physreg. 1515 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1516 CCValAssign &VA = RVLocs[i]; 1517 EVT CopyVT = VA.getValVT(); 1518 1519 // If this is x86-64, and we disabled SSE, we can't return FP values 1520 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1521 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasXMM())) { 1522 report_fatal_error("SSE register return with SSE disabled"); 1523 } 1524 1525 SDValue Val; 1526 1527 // If this is a call to a function that returns an fp value on the floating 1528 // point stack, we must guarantee the the value is popped from the stack, so 1529 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1530 // if the return value is not used. We use the FpPOP_RETVAL instruction 1531 // instead. 1532 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1533 // If we prefer to use the value in xmm registers, copy it out as f80 and 1534 // use a truncate to move it from fp stack reg to xmm reg. 1535 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1536 SDValue Ops[] = { Chain, InFlag }; 1537 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1538 MVT::Other, MVT::Glue, Ops, 2), 1); 1539 Val = Chain.getValue(0); 1540 1541 // Round the f80 to the right size, which also moves it to the appropriate 1542 // xmm register. 1543 if (CopyVT != VA.getValVT()) 1544 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1545 // This truncation won't change the value. 1546 DAG.getIntPtrConstant(1)); 1547 } else { 1548 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1549 CopyVT, InFlag).getValue(1); 1550 Val = Chain.getValue(0); 1551 } 1552 InFlag = Chain.getValue(2); 1553 InVals.push_back(Val); 1554 } 1555 1556 return Chain; 1557} 1558 1559 1560//===----------------------------------------------------------------------===// 1561// C & StdCall & Fast Calling Convention implementation 1562//===----------------------------------------------------------------------===// 1563// StdCall calling convention seems to be standard for many Windows' API 1564// routines and around. It differs from C calling convention just a little: 1565// callee should clean up the stack, not caller. Symbols should be also 1566// decorated in some fancy way :) It doesn't support any vector arguments. 1567// For info on fast calling convention see Fast Calling Convention (tail call) 1568// implementation LowerX86_32FastCCCallTo. 1569 1570/// CallIsStructReturn - Determines whether a call uses struct return 1571/// semantics. 1572static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1573 if (Outs.empty()) 1574 return false; 1575 1576 return Outs[0].Flags.isSRet(); 1577} 1578 1579/// ArgsAreStructReturn - Determines whether a function uses struct 1580/// return semantics. 1581static bool 1582ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1583 if (Ins.empty()) 1584 return false; 1585 1586 return Ins[0].Flags.isSRet(); 1587} 1588 1589/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1590/// by "Src" to address "Dst" with size and alignment information specified by 1591/// the specific parameter attribute. The copy will be passed as a byval 1592/// function parameter. 1593static SDValue 1594CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1595 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1596 DebugLoc dl) { 1597 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1598 1599 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1600 /*isVolatile*/false, /*AlwaysInline=*/true, 1601 MachinePointerInfo(), MachinePointerInfo()); 1602} 1603 1604/// IsTailCallConvention - Return true if the calling convention is one that 1605/// supports tail call optimization. 1606static bool IsTailCallConvention(CallingConv::ID CC) { 1607 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1608} 1609 1610bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1611 if (!CI->isTailCall()) 1612 return false; 1613 1614 CallSite CS(CI); 1615 CallingConv::ID CalleeCC = CS.getCallingConv(); 1616 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1617 return false; 1618 1619 return true; 1620} 1621 1622/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1623/// a tailcall target by changing its ABI. 1624static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) { 1625 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1626} 1627 1628SDValue 1629X86TargetLowering::LowerMemArgument(SDValue Chain, 1630 CallingConv::ID CallConv, 1631 const SmallVectorImpl<ISD::InputArg> &Ins, 1632 DebugLoc dl, SelectionDAG &DAG, 1633 const CCValAssign &VA, 1634 MachineFrameInfo *MFI, 1635 unsigned i) const { 1636 // Create the nodes corresponding to a load from this parameter slot. 1637 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1638 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv); 1639 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1640 EVT ValVT; 1641 1642 // If value is passed by pointer we have address passed instead of the value 1643 // itself. 1644 if (VA.getLocInfo() == CCValAssign::Indirect) 1645 ValVT = VA.getLocVT(); 1646 else 1647 ValVT = VA.getValVT(); 1648 1649 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1650 // changed with more analysis. 1651 // In case of tail call optimization mark all arguments mutable. Since they 1652 // could be overwritten by lowering of arguments in case of a tail call. 1653 if (Flags.isByVal()) { 1654 unsigned Bytes = Flags.getByValSize(); 1655 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1656 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1657 return DAG.getFrameIndex(FI, getPointerTy()); 1658 } else { 1659 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1660 VA.getLocMemOffset(), isImmutable); 1661 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1662 return DAG.getLoad(ValVT, dl, Chain, FIN, 1663 MachinePointerInfo::getFixedStack(FI), 1664 false, false, 0); 1665 } 1666} 1667 1668SDValue 1669X86TargetLowering::LowerFormalArguments(SDValue Chain, 1670 CallingConv::ID CallConv, 1671 bool isVarArg, 1672 const SmallVectorImpl<ISD::InputArg> &Ins, 1673 DebugLoc dl, 1674 SelectionDAG &DAG, 1675 SmallVectorImpl<SDValue> &InVals) 1676 const { 1677 MachineFunction &MF = DAG.getMachineFunction(); 1678 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1679 1680 const Function* Fn = MF.getFunction(); 1681 if (Fn->hasExternalLinkage() && 1682 Subtarget->isTargetCygMing() && 1683 Fn->getName() == "main") 1684 FuncInfo->setForceFramePointer(true); 1685 1686 MachineFrameInfo *MFI = MF.getFrameInfo(); 1687 bool Is64Bit = Subtarget->is64Bit(); 1688 bool IsWin64 = Subtarget->isTargetWin64(); 1689 1690 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1691 "Var args not supported with calling convention fastcc or ghc"); 1692 1693 // Assign locations to all of the incoming arguments. 1694 SmallVector<CCValAssign, 16> ArgLocs; 1695 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1696 ArgLocs, *DAG.getContext()); 1697 1698 // Allocate shadow area for Win64 1699 if (IsWin64) { 1700 CCInfo.AllocateStack(32, 8); 1701 } 1702 1703 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1704 1705 unsigned LastVal = ~0U; 1706 SDValue ArgValue; 1707 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1708 CCValAssign &VA = ArgLocs[i]; 1709 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1710 // places. 1711 assert(VA.getValNo() != LastVal && 1712 "Don't support value assigned to multiple locs yet"); 1713 LastVal = VA.getValNo(); 1714 1715 if (VA.isRegLoc()) { 1716 EVT RegVT = VA.getLocVT(); 1717 TargetRegisterClass *RC = NULL; 1718 if (RegVT == MVT::i32) 1719 RC = X86::GR32RegisterClass; 1720 else if (Is64Bit && RegVT == MVT::i64) 1721 RC = X86::GR64RegisterClass; 1722 else if (RegVT == MVT::f32) 1723 RC = X86::FR32RegisterClass; 1724 else if (RegVT == MVT::f64) 1725 RC = X86::FR64RegisterClass; 1726 else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) 1727 RC = X86::VR256RegisterClass; 1728 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) 1729 RC = X86::VR128RegisterClass; 1730 else if (RegVT == MVT::x86mmx) 1731 RC = X86::VR64RegisterClass; 1732 else 1733 llvm_unreachable("Unknown argument type!"); 1734 1735 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1736 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1737 1738 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1739 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1740 // right size. 1741 if (VA.getLocInfo() == CCValAssign::SExt) 1742 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1743 DAG.getValueType(VA.getValVT())); 1744 else if (VA.getLocInfo() == CCValAssign::ZExt) 1745 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1746 DAG.getValueType(VA.getValVT())); 1747 else if (VA.getLocInfo() == CCValAssign::BCvt) 1748 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1749 1750 if (VA.isExtInLoc()) { 1751 // Handle MMX values passed in XMM regs. 1752 if (RegVT.isVector()) { 1753 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1754 ArgValue); 1755 } else 1756 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1757 } 1758 } else { 1759 assert(VA.isMemLoc()); 1760 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1761 } 1762 1763 // If value is passed via pointer - do a load. 1764 if (VA.getLocInfo() == CCValAssign::Indirect) 1765 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1766 MachinePointerInfo(), false, false, 0); 1767 1768 InVals.push_back(ArgValue); 1769 } 1770 1771 // The x86-64 ABI for returning structs by value requires that we copy 1772 // the sret argument into %rax for the return. Save the argument into 1773 // a virtual register so that we can access it from the return points. 1774 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1775 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1776 unsigned Reg = FuncInfo->getSRetReturnReg(); 1777 if (!Reg) { 1778 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1779 FuncInfo->setSRetReturnReg(Reg); 1780 } 1781 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1782 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1783 } 1784 1785 unsigned StackSize = CCInfo.getNextStackOffset(); 1786 // Align stack specially for tail calls. 1787 if (FuncIsMadeTailCallSafe(CallConv)) 1788 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1789 1790 // If the function takes variable number of arguments, make a frame index for 1791 // the start of the first vararg value... for expansion of llvm.va_start. 1792 if (isVarArg) { 1793 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1794 CallConv != CallingConv::X86_ThisCall)) { 1795 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1796 } 1797 if (Is64Bit) { 1798 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1799 1800 // FIXME: We should really autogenerate these arrays 1801 static const unsigned GPR64ArgRegsWin64[] = { 1802 X86::RCX, X86::RDX, X86::R8, X86::R9 1803 }; 1804 static const unsigned GPR64ArgRegs64Bit[] = { 1805 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1806 }; 1807 static const unsigned XMMArgRegs64Bit[] = { 1808 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1809 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1810 }; 1811 const unsigned *GPR64ArgRegs; 1812 unsigned NumXMMRegs = 0; 1813 1814 if (IsWin64) { 1815 // The XMM registers which might contain var arg parameters are shadowed 1816 // in their paired GPR. So we only need to save the GPR to their home 1817 // slots. 1818 TotalNumIntRegs = 4; 1819 GPR64ArgRegs = GPR64ArgRegsWin64; 1820 } else { 1821 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1822 GPR64ArgRegs = GPR64ArgRegs64Bit; 1823 1824 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, TotalNumXMMRegs); 1825 } 1826 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1827 TotalNumIntRegs); 1828 1829 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 1830 assert(!(NumXMMRegs && !Subtarget->hasXMM()) && 1831 "SSE register cannot be used when SSE is disabled!"); 1832 assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) && 1833 "SSE register cannot be used when SSE is disabled!"); 1834 if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasXMM()) 1835 // Kernel mode asks for SSE to be disabled, so don't push them 1836 // on the stack. 1837 TotalNumXMMRegs = 0; 1838 1839 if (IsWin64) { 1840 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 1841 // Get to the caller-allocated home save location. Add 8 to account 1842 // for the return address. 1843 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 1844 FuncInfo->setRegSaveFrameIndex( 1845 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 1846 // Fixup to set vararg frame on shadow area (4 x i64). 1847 if (NumIntRegs < 4) 1848 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 1849 } else { 1850 // For X86-64, if there are vararg parameters that are passed via 1851 // registers, then we must store them to their spots on the stack so they 1852 // may be loaded by deferencing the result of va_next. 1853 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 1854 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 1855 FuncInfo->setRegSaveFrameIndex( 1856 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 1857 false)); 1858 } 1859 1860 // Store the integer parameter registers. 1861 SmallVector<SDValue, 8> MemOps; 1862 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 1863 getPointerTy()); 1864 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 1865 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1866 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 1867 DAG.getIntPtrConstant(Offset)); 1868 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 1869 X86::GR64RegisterClass); 1870 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1871 SDValue Store = 1872 DAG.getStore(Val.getValue(1), dl, Val, FIN, 1873 MachinePointerInfo::getFixedStack( 1874 FuncInfo->getRegSaveFrameIndex(), Offset), 1875 false, false, 0); 1876 MemOps.push_back(Store); 1877 Offset += 8; 1878 } 1879 1880 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 1881 // Now store the XMM (fp + vector) parameter registers. 1882 SmallVector<SDValue, 11> SaveXMMOps; 1883 SaveXMMOps.push_back(Chain); 1884 1885 unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass); 1886 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 1887 SaveXMMOps.push_back(ALVal); 1888 1889 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1890 FuncInfo->getRegSaveFrameIndex())); 1891 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1892 FuncInfo->getVarArgsFPOffset())); 1893 1894 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 1895 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 1896 X86::VR128RegisterClass); 1897 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 1898 SaveXMMOps.push_back(Val); 1899 } 1900 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 1901 MVT::Other, 1902 &SaveXMMOps[0], SaveXMMOps.size())); 1903 } 1904 1905 if (!MemOps.empty()) 1906 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1907 &MemOps[0], MemOps.size()); 1908 } 1909 } 1910 1911 // Some CCs need callee pop. 1912 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) { 1913 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 1914 } else { 1915 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 1916 // If this is an sret function, the return should pop the hidden pointer. 1917 if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins)) 1918 FuncInfo->setBytesToPopOnReturn(4); 1919 } 1920 1921 if (!Is64Bit) { 1922 // RegSaveFrameIndex is X86-64 only. 1923 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 1924 if (CallConv == CallingConv::X86_FastCall || 1925 CallConv == CallingConv::X86_ThisCall) 1926 // fastcc functions can't have varargs. 1927 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 1928 } 1929 1930 return Chain; 1931} 1932 1933SDValue 1934X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 1935 SDValue StackPtr, SDValue Arg, 1936 DebugLoc dl, SelectionDAG &DAG, 1937 const CCValAssign &VA, 1938 ISD::ArgFlagsTy Flags) const { 1939 unsigned LocMemOffset = VA.getLocMemOffset(); 1940 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1941 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1942 if (Flags.isByVal()) 1943 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1944 1945 return DAG.getStore(Chain, dl, Arg, PtrOff, 1946 MachinePointerInfo::getStack(LocMemOffset), 1947 false, false, 0); 1948} 1949 1950/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 1951/// optimization is performed and it is required. 1952SDValue 1953X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 1954 SDValue &OutRetAddr, SDValue Chain, 1955 bool IsTailCall, bool Is64Bit, 1956 int FPDiff, DebugLoc dl) const { 1957 // Adjust the Return address stack slot. 1958 EVT VT = getPointerTy(); 1959 OutRetAddr = getReturnAddressFrameIndex(DAG); 1960 1961 // Load the "old" Return address. 1962 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 1963 false, false, 0); 1964 return SDValue(OutRetAddr.getNode(), 1); 1965} 1966 1967/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 1968/// optimization is performed and it is required (FPDiff!=0). 1969static SDValue 1970EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 1971 SDValue Chain, SDValue RetAddrFrIdx, 1972 bool Is64Bit, int FPDiff, DebugLoc dl) { 1973 // Store the return address to the appropriate stack slot. 1974 if (!FPDiff) return Chain; 1975 // Calculate the new stack slot for the return address. 1976 int SlotSize = Is64Bit ? 8 : 4; 1977 int NewReturnAddrFI = 1978 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 1979 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 1980 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1981 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 1982 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 1983 false, false, 0); 1984 return Chain; 1985} 1986 1987SDValue 1988X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1989 CallingConv::ID CallConv, bool isVarArg, 1990 bool &isTailCall, 1991 const SmallVectorImpl<ISD::OutputArg> &Outs, 1992 const SmallVectorImpl<SDValue> &OutVals, 1993 const SmallVectorImpl<ISD::InputArg> &Ins, 1994 DebugLoc dl, SelectionDAG &DAG, 1995 SmallVectorImpl<SDValue> &InVals) const { 1996 MachineFunction &MF = DAG.getMachineFunction(); 1997 bool Is64Bit = Subtarget->is64Bit(); 1998 bool IsWin64 = Subtarget->isTargetWin64(); 1999 bool IsStructRet = CallIsStructReturn(Outs); 2000 bool IsSibcall = false; 2001 2002 if (isTailCall) { 2003 // Check if it's really possible to do a tail call. 2004 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2005 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 2006 Outs, OutVals, Ins, DAG); 2007 2008 // Sibcalls are automatically detected tailcalls which do not require 2009 // ABI changes. 2010 if (!GuaranteedTailCallOpt && isTailCall) 2011 IsSibcall = true; 2012 2013 if (isTailCall) 2014 ++NumTailCalls; 2015 } 2016 2017 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2018 "Var args not supported with calling convention fastcc or ghc"); 2019 2020 // Analyze operands of the call, assigning locations to each operand. 2021 SmallVector<CCValAssign, 16> ArgLocs; 2022 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2023 ArgLocs, *DAG.getContext()); 2024 2025 // Allocate shadow area for Win64 2026 if (IsWin64) { 2027 CCInfo.AllocateStack(32, 8); 2028 } 2029 2030 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2031 2032 // Get a count of how many bytes are to be pushed on the stack. 2033 unsigned NumBytes = CCInfo.getNextStackOffset(); 2034 if (IsSibcall) 2035 // This is a sibcall. The memory operands are available in caller's 2036 // own caller's stack. 2037 NumBytes = 0; 2038 else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv)) 2039 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2040 2041 int FPDiff = 0; 2042 if (isTailCall && !IsSibcall) { 2043 // Lower arguments at fp - stackoffset + fpdiff. 2044 unsigned NumBytesCallerPushed = 2045 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 2046 FPDiff = NumBytesCallerPushed - NumBytes; 2047 2048 // Set the delta of movement of the returnaddr stackslot. 2049 // But only set if delta is greater than previous delta. 2050 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 2051 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 2052 } 2053 2054 if (!IsSibcall) 2055 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2056 2057 SDValue RetAddrFrIdx; 2058 // Load return address for tail calls. 2059 if (isTailCall && FPDiff) 2060 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2061 Is64Bit, FPDiff, dl); 2062 2063 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2064 SmallVector<SDValue, 8> MemOpChains; 2065 SDValue StackPtr; 2066 2067 // Walk the register/memloc assignments, inserting copies/loads. In the case 2068 // of tail call optimization arguments are handle later. 2069 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2070 CCValAssign &VA = ArgLocs[i]; 2071 EVT RegVT = VA.getLocVT(); 2072 SDValue Arg = OutVals[i]; 2073 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2074 bool isByVal = Flags.isByVal(); 2075 2076 // Promote the value if needed. 2077 switch (VA.getLocInfo()) { 2078 default: llvm_unreachable("Unknown loc info!"); 2079 case CCValAssign::Full: break; 2080 case CCValAssign::SExt: 2081 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2082 break; 2083 case CCValAssign::ZExt: 2084 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2085 break; 2086 case CCValAssign::AExt: 2087 if (RegVT.isVector() && RegVT.getSizeInBits() == 128) { 2088 // Special case: passing MMX values in XMM registers. 2089 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2090 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2091 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2092 } else 2093 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2094 break; 2095 case CCValAssign::BCvt: 2096 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2097 break; 2098 case CCValAssign::Indirect: { 2099 // Store the argument. 2100 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2101 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2102 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2103 MachinePointerInfo::getFixedStack(FI), 2104 false, false, 0); 2105 Arg = SpillSlot; 2106 break; 2107 } 2108 } 2109 2110 if (VA.isRegLoc()) { 2111 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2112 if (isVarArg && IsWin64) { 2113 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2114 // shadow reg if callee is a varargs function. 2115 unsigned ShadowReg = 0; 2116 switch (VA.getLocReg()) { 2117 case X86::XMM0: ShadowReg = X86::RCX; break; 2118 case X86::XMM1: ShadowReg = X86::RDX; break; 2119 case X86::XMM2: ShadowReg = X86::R8; break; 2120 case X86::XMM3: ShadowReg = X86::R9; break; 2121 } 2122 if (ShadowReg) 2123 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2124 } 2125 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2126 assert(VA.isMemLoc()); 2127 if (StackPtr.getNode() == 0) 2128 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2129 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2130 dl, DAG, VA, Flags)); 2131 } 2132 } 2133 2134 if (!MemOpChains.empty()) 2135 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2136 &MemOpChains[0], MemOpChains.size()); 2137 2138 // Build a sequence of copy-to-reg nodes chained together with token chain 2139 // and flag operands which copy the outgoing args into registers. 2140 SDValue InFlag; 2141 // Tail call byval lowering might overwrite argument registers so in case of 2142 // tail call optimization the copies to registers are lowered later. 2143 if (!isTailCall) 2144 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2145 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2146 RegsToPass[i].second, InFlag); 2147 InFlag = Chain.getValue(1); 2148 } 2149 2150 if (Subtarget->isPICStyleGOT()) { 2151 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2152 // GOT pointer. 2153 if (!isTailCall) { 2154 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, 2155 DAG.getNode(X86ISD::GlobalBaseReg, 2156 DebugLoc(), getPointerTy()), 2157 InFlag); 2158 InFlag = Chain.getValue(1); 2159 } else { 2160 // If we are tail calling and generating PIC/GOT style code load the 2161 // address of the callee into ECX. The value in ecx is used as target of 2162 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2163 // for tail calls on PIC/GOT architectures. Normally we would just put the 2164 // address of GOT into ebx and then call target@PLT. But for tail calls 2165 // ebx would be restored (since ebx is callee saved) before jumping to the 2166 // target@PLT. 2167 2168 // Note: The actual moving to ECX is done further down. 2169 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2170 if (G && !G->getGlobal()->hasHiddenVisibility() && 2171 !G->getGlobal()->hasProtectedVisibility()) 2172 Callee = LowerGlobalAddress(Callee, DAG); 2173 else if (isa<ExternalSymbolSDNode>(Callee)) 2174 Callee = LowerExternalSymbol(Callee, DAG); 2175 } 2176 } 2177 2178 if (Is64Bit && isVarArg && !IsWin64) { 2179 // From AMD64 ABI document: 2180 // For calls that may call functions that use varargs or stdargs 2181 // (prototype-less calls or calls to functions containing ellipsis (...) in 2182 // the declaration) %al is used as hidden argument to specify the number 2183 // of SSE registers used. The contents of %al do not need to match exactly 2184 // the number of registers, but must be an ubound on the number of SSE 2185 // registers used and is in the range 0 - 8 inclusive. 2186 2187 // Count the number of XMM registers allocated. 2188 static const unsigned XMMArgRegs[] = { 2189 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2190 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2191 }; 2192 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2193 assert((Subtarget->hasXMM() || !NumXMMRegs) 2194 && "SSE registers cannot be used when SSE is disabled"); 2195 2196 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, 2197 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 2198 InFlag = Chain.getValue(1); 2199 } 2200 2201 2202 // For tail calls lower the arguments to the 'real' stack slot. 2203 if (isTailCall) { 2204 // Force all the incoming stack arguments to be loaded from the stack 2205 // before any new outgoing arguments are stored to the stack, because the 2206 // outgoing stack slots may alias the incoming argument stack slots, and 2207 // the alias isn't otherwise explicit. This is slightly more conservative 2208 // than necessary, because it means that each store effectively depends 2209 // on every argument instead of just those arguments it would clobber. 2210 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2211 2212 SmallVector<SDValue, 8> MemOpChains2; 2213 SDValue FIN; 2214 int FI = 0; 2215 // Do not flag preceding copytoreg stuff together with the following stuff. 2216 InFlag = SDValue(); 2217 if (GuaranteedTailCallOpt) { 2218 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2219 CCValAssign &VA = ArgLocs[i]; 2220 if (VA.isRegLoc()) 2221 continue; 2222 assert(VA.isMemLoc()); 2223 SDValue Arg = OutVals[i]; 2224 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2225 // Create frame index. 2226 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2227 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2228 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2229 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2230 2231 if (Flags.isByVal()) { 2232 // Copy relative to framepointer. 2233 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2234 if (StackPtr.getNode() == 0) 2235 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2236 getPointerTy()); 2237 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2238 2239 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2240 ArgChain, 2241 Flags, DAG, dl)); 2242 } else { 2243 // Store relative to framepointer. 2244 MemOpChains2.push_back( 2245 DAG.getStore(ArgChain, dl, Arg, FIN, 2246 MachinePointerInfo::getFixedStack(FI), 2247 false, false, 0)); 2248 } 2249 } 2250 } 2251 2252 if (!MemOpChains2.empty()) 2253 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2254 &MemOpChains2[0], MemOpChains2.size()); 2255 2256 // Copy arguments to their registers. 2257 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2258 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2259 RegsToPass[i].second, InFlag); 2260 InFlag = Chain.getValue(1); 2261 } 2262 InFlag =SDValue(); 2263 2264 // Store the return address to the appropriate stack slot. 2265 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2266 FPDiff, dl); 2267 } 2268 2269 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2270 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2271 // In the 64-bit large code model, we have to make all calls 2272 // through a register, since the call instruction's 32-bit 2273 // pc-relative offset may not be large enough to hold the whole 2274 // address. 2275 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2276 // If the callee is a GlobalAddress node (quite common, every direct call 2277 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2278 // it. 2279 2280 // We should use extra load for direct calls to dllimported functions in 2281 // non-JIT mode. 2282 const GlobalValue *GV = G->getGlobal(); 2283 if (!GV->hasDLLImportLinkage()) { 2284 unsigned char OpFlags = 0; 2285 bool ExtraLoad = false; 2286 unsigned WrapperKind = ISD::DELETED_NODE; 2287 2288 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2289 // external symbols most go through the PLT in PIC mode. If the symbol 2290 // has hidden or protected visibility, or if it is static or local, then 2291 // we don't need to use the PLT - we can directly call it. 2292 if (Subtarget->isTargetELF() && 2293 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2294 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2295 OpFlags = X86II::MO_PLT; 2296 } else if (Subtarget->isPICStyleStubAny() && 2297 (GV->isDeclaration() || GV->isWeakForLinker()) && 2298 (!Subtarget->getTargetTriple().isMacOSX() || 2299 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2300 // PC-relative references to external symbols should go through $stub, 2301 // unless we're building with the leopard linker or later, which 2302 // automatically synthesizes these stubs. 2303 OpFlags = X86II::MO_DARWIN_STUB; 2304 } else if (Subtarget->isPICStyleRIPRel() && 2305 isa<Function>(GV) && 2306 cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) { 2307 // If the function is marked as non-lazy, generate an indirect call 2308 // which loads from the GOT directly. This avoids runtime overhead 2309 // at the cost of eager binding (and one extra byte of encoding). 2310 OpFlags = X86II::MO_GOTPCREL; 2311 WrapperKind = X86ISD::WrapperRIP; 2312 ExtraLoad = true; 2313 } 2314 2315 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2316 G->getOffset(), OpFlags); 2317 2318 // Add a wrapper if needed. 2319 if (WrapperKind != ISD::DELETED_NODE) 2320 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2321 // Add extra indirection if needed. 2322 if (ExtraLoad) 2323 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2324 MachinePointerInfo::getGOT(), 2325 false, false, 0); 2326 } 2327 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2328 unsigned char OpFlags = 0; 2329 2330 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2331 // external symbols should go through the PLT. 2332 if (Subtarget->isTargetELF() && 2333 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2334 OpFlags = X86II::MO_PLT; 2335 } else if (Subtarget->isPICStyleStubAny() && 2336 (!Subtarget->getTargetTriple().isMacOSX() || 2337 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2338 // PC-relative references to external symbols should go through $stub, 2339 // unless we're building with the leopard linker or later, which 2340 // automatically synthesizes these stubs. 2341 OpFlags = X86II::MO_DARWIN_STUB; 2342 } 2343 2344 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2345 OpFlags); 2346 } 2347 2348 // Returns a chain & a flag for retval copy to use. 2349 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2350 SmallVector<SDValue, 8> Ops; 2351 2352 if (!IsSibcall && isTailCall) { 2353 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2354 DAG.getIntPtrConstant(0, true), InFlag); 2355 InFlag = Chain.getValue(1); 2356 } 2357 2358 Ops.push_back(Chain); 2359 Ops.push_back(Callee); 2360 2361 if (isTailCall) 2362 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2363 2364 // Add argument registers to the end of the list so that they are known live 2365 // into the call. 2366 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2367 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2368 RegsToPass[i].second.getValueType())); 2369 2370 // Add an implicit use GOT pointer in EBX. 2371 if (!isTailCall && Subtarget->isPICStyleGOT()) 2372 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 2373 2374 // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. 2375 if (Is64Bit && isVarArg && !IsWin64) 2376 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 2377 2378 if (InFlag.getNode()) 2379 Ops.push_back(InFlag); 2380 2381 if (isTailCall) { 2382 // We used to do: 2383 //// If this is the first return lowered for this function, add the regs 2384 //// to the liveout set for the function. 2385 // This isn't right, although it's probably harmless on x86; liveouts 2386 // should be computed from returns not tail calls. Consider a void 2387 // function making a tail call to a function returning int. 2388 return DAG.getNode(X86ISD::TC_RETURN, dl, 2389 NodeTys, &Ops[0], Ops.size()); 2390 } 2391 2392 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2393 InFlag = Chain.getValue(1); 2394 2395 // Create the CALLSEQ_END node. 2396 unsigned NumBytesForCalleeToPush; 2397 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) 2398 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2399 else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet) 2400 // If this is a call to a struct-return function, the callee 2401 // pops the hidden struct pointer, so we have to push it back. 2402 // This is common for Darwin/X86, Linux & Mingw32 targets. 2403 NumBytesForCalleeToPush = 4; 2404 else 2405 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2406 2407 // Returns a flag for retval copy to use. 2408 if (!IsSibcall) { 2409 Chain = DAG.getCALLSEQ_END(Chain, 2410 DAG.getIntPtrConstant(NumBytes, true), 2411 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2412 true), 2413 InFlag); 2414 InFlag = Chain.getValue(1); 2415 } 2416 2417 // Handle result values, copying them out of physregs into vregs that we 2418 // return. 2419 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2420 Ins, dl, DAG, InVals); 2421} 2422 2423 2424//===----------------------------------------------------------------------===// 2425// Fast Calling Convention (tail call) implementation 2426//===----------------------------------------------------------------------===// 2427 2428// Like std call, callee cleans arguments, convention except that ECX is 2429// reserved for storing the tail called function address. Only 2 registers are 2430// free for argument passing (inreg). Tail call optimization is performed 2431// provided: 2432// * tailcallopt is enabled 2433// * caller/callee are fastcc 2434// On X86_64 architecture with GOT-style position independent code only local 2435// (within module) calls are supported at the moment. 2436// To keep the stack aligned according to platform abi the function 2437// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2438// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2439// If a tail called function callee has more arguments than the caller the 2440// caller needs to make sure that there is room to move the RETADDR to. This is 2441// achieved by reserving an area the size of the argument delta right after the 2442// original REtADDR, but before the saved framepointer or the spilled registers 2443// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2444// stack layout: 2445// arg1 2446// arg2 2447// RETADDR 2448// [ new RETADDR 2449// move area ] 2450// (possible EBP) 2451// ESI 2452// EDI 2453// local1 .. 2454 2455/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2456/// for a 16 byte align requirement. 2457unsigned 2458X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2459 SelectionDAG& DAG) const { 2460 MachineFunction &MF = DAG.getMachineFunction(); 2461 const TargetMachine &TM = MF.getTarget(); 2462 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2463 unsigned StackAlignment = TFI.getStackAlignment(); 2464 uint64_t AlignMask = StackAlignment - 1; 2465 int64_t Offset = StackSize; 2466 uint64_t SlotSize = TD->getPointerSize(); 2467 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2468 // Number smaller than 12 so just add the difference. 2469 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2470 } else { 2471 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2472 Offset = ((~AlignMask) & Offset) + StackAlignment + 2473 (StackAlignment-SlotSize); 2474 } 2475 return Offset; 2476} 2477 2478/// MatchingStackOffset - Return true if the given stack call argument is 2479/// already available in the same position (relatively) of the caller's 2480/// incoming argument stack. 2481static 2482bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2483 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2484 const X86InstrInfo *TII) { 2485 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2486 int FI = INT_MAX; 2487 if (Arg.getOpcode() == ISD::CopyFromReg) { 2488 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2489 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2490 return false; 2491 MachineInstr *Def = MRI->getVRegDef(VR); 2492 if (!Def) 2493 return false; 2494 if (!Flags.isByVal()) { 2495 if (!TII->isLoadFromStackSlot(Def, FI)) 2496 return false; 2497 } else { 2498 unsigned Opcode = Def->getOpcode(); 2499 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2500 Def->getOperand(1).isFI()) { 2501 FI = Def->getOperand(1).getIndex(); 2502 Bytes = Flags.getByValSize(); 2503 } else 2504 return false; 2505 } 2506 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2507 if (Flags.isByVal()) 2508 // ByVal argument is passed in as a pointer but it's now being 2509 // dereferenced. e.g. 2510 // define @foo(%struct.X* %A) { 2511 // tail call @bar(%struct.X* byval %A) 2512 // } 2513 return false; 2514 SDValue Ptr = Ld->getBasePtr(); 2515 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2516 if (!FINode) 2517 return false; 2518 FI = FINode->getIndex(); 2519 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2520 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2521 FI = FINode->getIndex(); 2522 Bytes = Flags.getByValSize(); 2523 } else 2524 return false; 2525 2526 assert(FI != INT_MAX); 2527 if (!MFI->isFixedObjectIndex(FI)) 2528 return false; 2529 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2530} 2531 2532/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2533/// for tail call optimization. Targets which want to do tail call 2534/// optimization should implement this function. 2535bool 2536X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2537 CallingConv::ID CalleeCC, 2538 bool isVarArg, 2539 bool isCalleeStructRet, 2540 bool isCallerStructRet, 2541 const SmallVectorImpl<ISD::OutputArg> &Outs, 2542 const SmallVectorImpl<SDValue> &OutVals, 2543 const SmallVectorImpl<ISD::InputArg> &Ins, 2544 SelectionDAG& DAG) const { 2545 if (!IsTailCallConvention(CalleeCC) && 2546 CalleeCC != CallingConv::C) 2547 return false; 2548 2549 // If -tailcallopt is specified, make fastcc functions tail-callable. 2550 const MachineFunction &MF = DAG.getMachineFunction(); 2551 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2552 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2553 bool CCMatch = CallerCC == CalleeCC; 2554 2555 if (GuaranteedTailCallOpt) { 2556 if (IsTailCallConvention(CalleeCC) && CCMatch) 2557 return true; 2558 return false; 2559 } 2560 2561 // Look for obvious safe cases to perform tail call optimization that do not 2562 // require ABI changes. This is what gcc calls sibcall. 2563 2564 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2565 // emit a special epilogue. 2566 if (RegInfo->needsStackRealignment(MF)) 2567 return false; 2568 2569 // Also avoid sibcall optimization if either caller or callee uses struct 2570 // return semantics. 2571 if (isCalleeStructRet || isCallerStructRet) 2572 return false; 2573 2574 // An stdcall caller is expected to clean up its arguments; the callee 2575 // isn't going to do that. 2576 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2577 return false; 2578 2579 // Do not sibcall optimize vararg calls unless all arguments are passed via 2580 // registers. 2581 if (isVarArg && !Outs.empty()) { 2582 2583 // Optimizing for varargs on Win64 is unlikely to be safe without 2584 // additional testing. 2585 if (Subtarget->isTargetWin64()) 2586 return false; 2587 2588 SmallVector<CCValAssign, 16> ArgLocs; 2589 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2590 getTargetMachine(), ArgLocs, *DAG.getContext()); 2591 2592 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2593 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2594 if (!ArgLocs[i].isRegLoc()) 2595 return false; 2596 } 2597 2598 // If the call result is in ST0 / ST1, it needs to be popped off the x87 stack. 2599 // Therefore if it's not used by the call it is not safe to optimize this into 2600 // a sibcall. 2601 bool Unused = false; 2602 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2603 if (!Ins[i].Used) { 2604 Unused = true; 2605 break; 2606 } 2607 } 2608 if (Unused) { 2609 SmallVector<CCValAssign, 16> RVLocs; 2610 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2611 getTargetMachine(), RVLocs, *DAG.getContext()); 2612 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2613 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2614 CCValAssign &VA = RVLocs[i]; 2615 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2616 return false; 2617 } 2618 } 2619 2620 // If the calling conventions do not match, then we'd better make sure the 2621 // results are returned in the same way as what the caller expects. 2622 if (!CCMatch) { 2623 SmallVector<CCValAssign, 16> RVLocs1; 2624 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2625 getTargetMachine(), RVLocs1, *DAG.getContext()); 2626 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2627 2628 SmallVector<CCValAssign, 16> RVLocs2; 2629 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2630 getTargetMachine(), RVLocs2, *DAG.getContext()); 2631 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2632 2633 if (RVLocs1.size() != RVLocs2.size()) 2634 return false; 2635 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2636 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2637 return false; 2638 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2639 return false; 2640 if (RVLocs1[i].isRegLoc()) { 2641 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2642 return false; 2643 } else { 2644 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2645 return false; 2646 } 2647 } 2648 } 2649 2650 // If the callee takes no arguments then go on to check the results of the 2651 // call. 2652 if (!Outs.empty()) { 2653 // Check if stack adjustment is needed. For now, do not do this if any 2654 // argument is passed on the stack. 2655 SmallVector<CCValAssign, 16> ArgLocs; 2656 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2657 getTargetMachine(), ArgLocs, *DAG.getContext()); 2658 2659 // Allocate shadow area for Win64 2660 if (Subtarget->isTargetWin64()) { 2661 CCInfo.AllocateStack(32, 8); 2662 } 2663 2664 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2665 if (CCInfo.getNextStackOffset()) { 2666 MachineFunction &MF = DAG.getMachineFunction(); 2667 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2668 return false; 2669 2670 // Check if the arguments are already laid out in the right way as 2671 // the caller's fixed stack objects. 2672 MachineFrameInfo *MFI = MF.getFrameInfo(); 2673 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2674 const X86InstrInfo *TII = 2675 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2676 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2677 CCValAssign &VA = ArgLocs[i]; 2678 SDValue Arg = OutVals[i]; 2679 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2680 if (VA.getLocInfo() == CCValAssign::Indirect) 2681 return false; 2682 if (!VA.isRegLoc()) { 2683 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2684 MFI, MRI, TII)) 2685 return false; 2686 } 2687 } 2688 } 2689 2690 // If the tailcall address may be in a register, then make sure it's 2691 // possible to register allocate for it. In 32-bit, the call address can 2692 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2693 // callee-saved registers are restored. These happen to be the same 2694 // registers used to pass 'inreg' arguments so watch out for those. 2695 if (!Subtarget->is64Bit() && 2696 !isa<GlobalAddressSDNode>(Callee) && 2697 !isa<ExternalSymbolSDNode>(Callee)) { 2698 unsigned NumInRegs = 0; 2699 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2700 CCValAssign &VA = ArgLocs[i]; 2701 if (!VA.isRegLoc()) 2702 continue; 2703 unsigned Reg = VA.getLocReg(); 2704 switch (Reg) { 2705 default: break; 2706 case X86::EAX: case X86::EDX: case X86::ECX: 2707 if (++NumInRegs == 3) 2708 return false; 2709 break; 2710 } 2711 } 2712 } 2713 } 2714 2715 return true; 2716} 2717 2718FastISel * 2719X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 2720 return X86::createFastISel(funcInfo); 2721} 2722 2723 2724//===----------------------------------------------------------------------===// 2725// Other Lowering Hooks 2726//===----------------------------------------------------------------------===// 2727 2728static bool MayFoldLoad(SDValue Op) { 2729 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2730} 2731 2732static bool MayFoldIntoStore(SDValue Op) { 2733 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2734} 2735 2736static bool isTargetShuffle(unsigned Opcode) { 2737 switch(Opcode) { 2738 default: return false; 2739 case X86ISD::PSHUFD: 2740 case X86ISD::PSHUFHW: 2741 case X86ISD::PSHUFLW: 2742 case X86ISD::SHUFPD: 2743 case X86ISD::PALIGN: 2744 case X86ISD::SHUFPS: 2745 case X86ISD::MOVLHPS: 2746 case X86ISD::MOVLHPD: 2747 case X86ISD::MOVHLPS: 2748 case X86ISD::MOVLPS: 2749 case X86ISD::MOVLPD: 2750 case X86ISD::MOVSHDUP: 2751 case X86ISD::MOVSLDUP: 2752 case X86ISD::MOVDDUP: 2753 case X86ISD::MOVSS: 2754 case X86ISD::MOVSD: 2755 case X86ISD::UNPCKLPS: 2756 case X86ISD::UNPCKLPD: 2757 case X86ISD::VUNPCKLPSY: 2758 case X86ISD::VUNPCKLPDY: 2759 case X86ISD::PUNPCKLWD: 2760 case X86ISD::PUNPCKLBW: 2761 case X86ISD::PUNPCKLDQ: 2762 case X86ISD::PUNPCKLQDQ: 2763 case X86ISD::UNPCKHPS: 2764 case X86ISD::UNPCKHPD: 2765 case X86ISD::VUNPCKHPSY: 2766 case X86ISD::VUNPCKHPDY: 2767 case X86ISD::PUNPCKHWD: 2768 case X86ISD::PUNPCKHBW: 2769 case X86ISD::PUNPCKHDQ: 2770 case X86ISD::PUNPCKHQDQ: 2771 case X86ISD::VPERMILPS: 2772 case X86ISD::VPERMILPSY: 2773 case X86ISD::VPERMILPD: 2774 case X86ISD::VPERMILPDY: 2775 case X86ISD::VPERM2F128: 2776 return true; 2777 } 2778 return false; 2779} 2780 2781static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2782 SDValue V1, SelectionDAG &DAG) { 2783 switch(Opc) { 2784 default: llvm_unreachable("Unknown x86 shuffle node"); 2785 case X86ISD::MOVSHDUP: 2786 case X86ISD::MOVSLDUP: 2787 case X86ISD::MOVDDUP: 2788 return DAG.getNode(Opc, dl, VT, V1); 2789 } 2790 2791 return SDValue(); 2792} 2793 2794static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2795 SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { 2796 switch(Opc) { 2797 default: llvm_unreachable("Unknown x86 shuffle node"); 2798 case X86ISD::PSHUFD: 2799 case X86ISD::PSHUFHW: 2800 case X86ISD::PSHUFLW: 2801 case X86ISD::VPERMILPS: 2802 case X86ISD::VPERMILPSY: 2803 case X86ISD::VPERMILPD: 2804 case X86ISD::VPERMILPDY: 2805 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2806 } 2807 2808 return SDValue(); 2809} 2810 2811static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2812 SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { 2813 switch(Opc) { 2814 default: llvm_unreachable("Unknown x86 shuffle node"); 2815 case X86ISD::PALIGN: 2816 case X86ISD::SHUFPD: 2817 case X86ISD::SHUFPS: 2818 case X86ISD::VPERM2F128: 2819 return DAG.getNode(Opc, dl, VT, V1, V2, 2820 DAG.getConstant(TargetMask, MVT::i8)); 2821 } 2822 return SDValue(); 2823} 2824 2825static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2826 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2827 switch(Opc) { 2828 default: llvm_unreachable("Unknown x86 shuffle node"); 2829 case X86ISD::MOVLHPS: 2830 case X86ISD::MOVLHPD: 2831 case X86ISD::MOVHLPS: 2832 case X86ISD::MOVLPS: 2833 case X86ISD::MOVLPD: 2834 case X86ISD::MOVSS: 2835 case X86ISD::MOVSD: 2836 case X86ISD::UNPCKLPS: 2837 case X86ISD::UNPCKLPD: 2838 case X86ISD::VUNPCKLPSY: 2839 case X86ISD::VUNPCKLPDY: 2840 case X86ISD::PUNPCKLWD: 2841 case X86ISD::PUNPCKLBW: 2842 case X86ISD::PUNPCKLDQ: 2843 case X86ISD::PUNPCKLQDQ: 2844 case X86ISD::UNPCKHPS: 2845 case X86ISD::UNPCKHPD: 2846 case X86ISD::VUNPCKHPSY: 2847 case X86ISD::VUNPCKHPDY: 2848 case X86ISD::PUNPCKHWD: 2849 case X86ISD::PUNPCKHBW: 2850 case X86ISD::PUNPCKHDQ: 2851 case X86ISD::PUNPCKHQDQ: 2852 return DAG.getNode(Opc, dl, VT, V1, V2); 2853 } 2854 return SDValue(); 2855} 2856 2857SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2858 MachineFunction &MF = DAG.getMachineFunction(); 2859 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2860 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2861 2862 if (ReturnAddrIndex == 0) { 2863 // Set up a frame object for the return address. 2864 uint64_t SlotSize = TD->getPointerSize(); 2865 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 2866 false); 2867 FuncInfo->setRAIndex(ReturnAddrIndex); 2868 } 2869 2870 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2871} 2872 2873 2874bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 2875 bool hasSymbolicDisplacement) { 2876 // Offset should fit into 32 bit immediate field. 2877 if (!isInt<32>(Offset)) 2878 return false; 2879 2880 // If we don't have a symbolic displacement - we don't have any extra 2881 // restrictions. 2882 if (!hasSymbolicDisplacement) 2883 return true; 2884 2885 // FIXME: Some tweaks might be needed for medium code model. 2886 if (M != CodeModel::Small && M != CodeModel::Kernel) 2887 return false; 2888 2889 // For small code model we assume that latest object is 16MB before end of 31 2890 // bits boundary. We may also accept pretty large negative constants knowing 2891 // that all objects are in the positive half of address space. 2892 if (M == CodeModel::Small && Offset < 16*1024*1024) 2893 return true; 2894 2895 // For kernel code model we know that all object resist in the negative half 2896 // of 32bits address space. We may not accept negative offsets, since they may 2897 // be just off and we may accept pretty large positive ones. 2898 if (M == CodeModel::Kernel && Offset > 0) 2899 return true; 2900 2901 return false; 2902} 2903 2904/// isCalleePop - Determines whether the callee is required to pop its 2905/// own arguments. Callee pop is necessary to support tail calls. 2906bool X86::isCalleePop(CallingConv::ID CallingConv, 2907 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 2908 if (IsVarArg) 2909 return false; 2910 2911 switch (CallingConv) { 2912 default: 2913 return false; 2914 case CallingConv::X86_StdCall: 2915 return !is64Bit; 2916 case CallingConv::X86_FastCall: 2917 return !is64Bit; 2918 case CallingConv::X86_ThisCall: 2919 return !is64Bit; 2920 case CallingConv::Fast: 2921 return TailCallOpt; 2922 case CallingConv::GHC: 2923 return TailCallOpt; 2924 } 2925} 2926 2927/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 2928/// specific condition code, returning the condition code and the LHS/RHS of the 2929/// comparison to make. 2930static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2931 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 2932 if (!isFP) { 2933 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2934 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2935 // X > -1 -> X == 0, jump !sign. 2936 RHS = DAG.getConstant(0, RHS.getValueType()); 2937 return X86::COND_NS; 2938 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2939 // X < 0 -> X == 0, jump on sign. 2940 return X86::COND_S; 2941 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 2942 // X < 1 -> X <= 0 2943 RHS = DAG.getConstant(0, RHS.getValueType()); 2944 return X86::COND_LE; 2945 } 2946 } 2947 2948 switch (SetCCOpcode) { 2949 default: llvm_unreachable("Invalid integer condition!"); 2950 case ISD::SETEQ: return X86::COND_E; 2951 case ISD::SETGT: return X86::COND_G; 2952 case ISD::SETGE: return X86::COND_GE; 2953 case ISD::SETLT: return X86::COND_L; 2954 case ISD::SETLE: return X86::COND_LE; 2955 case ISD::SETNE: return X86::COND_NE; 2956 case ISD::SETULT: return X86::COND_B; 2957 case ISD::SETUGT: return X86::COND_A; 2958 case ISD::SETULE: return X86::COND_BE; 2959 case ISD::SETUGE: return X86::COND_AE; 2960 } 2961 } 2962 2963 // First determine if it is required or is profitable to flip the operands. 2964 2965 // If LHS is a foldable load, but RHS is not, flip the condition. 2966 if (ISD::isNON_EXTLoad(LHS.getNode()) && 2967 !ISD::isNON_EXTLoad(RHS.getNode())) { 2968 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 2969 std::swap(LHS, RHS); 2970 } 2971 2972 switch (SetCCOpcode) { 2973 default: break; 2974 case ISD::SETOLT: 2975 case ISD::SETOLE: 2976 case ISD::SETUGT: 2977 case ISD::SETUGE: 2978 std::swap(LHS, RHS); 2979 break; 2980 } 2981 2982 // On a floating point condition, the flags are set as follows: 2983 // ZF PF CF op 2984 // 0 | 0 | 0 | X > Y 2985 // 0 | 0 | 1 | X < Y 2986 // 1 | 0 | 0 | X == Y 2987 // 1 | 1 | 1 | unordered 2988 switch (SetCCOpcode) { 2989 default: llvm_unreachable("Condcode should be pre-legalized away"); 2990 case ISD::SETUEQ: 2991 case ISD::SETEQ: return X86::COND_E; 2992 case ISD::SETOLT: // flipped 2993 case ISD::SETOGT: 2994 case ISD::SETGT: return X86::COND_A; 2995 case ISD::SETOLE: // flipped 2996 case ISD::SETOGE: 2997 case ISD::SETGE: return X86::COND_AE; 2998 case ISD::SETUGT: // flipped 2999 case ISD::SETULT: 3000 case ISD::SETLT: return X86::COND_B; 3001 case ISD::SETUGE: // flipped 3002 case ISD::SETULE: 3003 case ISD::SETLE: return X86::COND_BE; 3004 case ISD::SETONE: 3005 case ISD::SETNE: return X86::COND_NE; 3006 case ISD::SETUO: return X86::COND_P; 3007 case ISD::SETO: return X86::COND_NP; 3008 case ISD::SETOEQ: 3009 case ISD::SETUNE: return X86::COND_INVALID; 3010 } 3011} 3012 3013/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3014/// code. Current x86 isa includes the following FP cmov instructions: 3015/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3016static bool hasFPCMov(unsigned X86CC) { 3017 switch (X86CC) { 3018 default: 3019 return false; 3020 case X86::COND_B: 3021 case X86::COND_BE: 3022 case X86::COND_E: 3023 case X86::COND_P: 3024 case X86::COND_A: 3025 case X86::COND_AE: 3026 case X86::COND_NE: 3027 case X86::COND_NP: 3028 return true; 3029 } 3030} 3031 3032/// isFPImmLegal - Returns true if the target can instruction select the 3033/// specified FP immediate natively. If false, the legalizer will 3034/// materialize the FP immediate as a load from a constant pool. 3035bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3036 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3037 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3038 return true; 3039 } 3040 return false; 3041} 3042 3043/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3044/// the specified range (L, H]. 3045static bool isUndefOrInRange(int Val, int Low, int Hi) { 3046 return (Val < 0) || (Val >= Low && Val < Hi); 3047} 3048 3049/// isUndefOrInRange - Return true if every element in Mask, begining 3050/// from position Pos and ending in Pos+Size, falls within the specified 3051/// range (L, L+Pos]. or is undef. 3052static bool isUndefOrInRange(const SmallVectorImpl<int> &Mask, 3053 int Pos, int Size, int Low, int Hi) { 3054 for (int i = Pos, e = Pos+Size; i != e; ++i) 3055 if (!isUndefOrInRange(Mask[i], Low, Hi)) 3056 return false; 3057 return true; 3058} 3059 3060/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3061/// specified value. 3062static bool isUndefOrEqual(int Val, int CmpVal) { 3063 if (Val < 0 || Val == CmpVal) 3064 return true; 3065 return false; 3066} 3067 3068/// isSequentialOrUndefInRange - Return true if every element in Mask, begining 3069/// from position Pos and ending in Pos+Size, falls within the specified 3070/// sequential range (L, L+Pos]. or is undef. 3071static bool isSequentialOrUndefInRange(const SmallVectorImpl<int> &Mask, 3072 int Pos, int Size, int Low) { 3073 for (int i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3074 if (!isUndefOrEqual(Mask[i], Low)) 3075 return false; 3076 return true; 3077} 3078 3079/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3080/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3081/// the second operand. 3082static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3083 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3084 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3085 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3086 return (Mask[0] < 2 && Mask[1] < 2); 3087 return false; 3088} 3089 3090bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) { 3091 SmallVector<int, 8> M; 3092 N->getMask(M); 3093 return ::isPSHUFDMask(M, N->getValueType(0)); 3094} 3095 3096/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3097/// is suitable for input to PSHUFHW. 3098static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3099 if (VT != MVT::v8i16) 3100 return false; 3101 3102 // Lower quadword copied in order or undef. 3103 for (int i = 0; i != 4; ++i) 3104 if (Mask[i] >= 0 && Mask[i] != i) 3105 return false; 3106 3107 // Upper quadword shuffled. 3108 for (int i = 4; i != 8; ++i) 3109 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7)) 3110 return false; 3111 3112 return true; 3113} 3114 3115bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) { 3116 SmallVector<int, 8> M; 3117 N->getMask(M); 3118 return ::isPSHUFHWMask(M, N->getValueType(0)); 3119} 3120 3121/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3122/// is suitable for input to PSHUFLW. 3123static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3124 if (VT != MVT::v8i16) 3125 return false; 3126 3127 // Upper quadword copied in order. 3128 for (int i = 4; i != 8; ++i) 3129 if (Mask[i] >= 0 && Mask[i] != i) 3130 return false; 3131 3132 // Lower quadword shuffled. 3133 for (int i = 0; i != 4; ++i) 3134 if (Mask[i] >= 4) 3135 return false; 3136 3137 return true; 3138} 3139 3140bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) { 3141 SmallVector<int, 8> M; 3142 N->getMask(M); 3143 return ::isPSHUFLWMask(M, N->getValueType(0)); 3144} 3145 3146/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3147/// is suitable for input to PALIGNR. 3148static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT, 3149 bool hasSSSE3) { 3150 int i, e = VT.getVectorNumElements(); 3151 if (VT.getSizeInBits() != 128 && VT.getSizeInBits() != 64) 3152 return false; 3153 3154 // Do not handle v2i64 / v2f64 shuffles with palignr. 3155 if (e < 4 || !hasSSSE3) 3156 return false; 3157 3158 for (i = 0; i != e; ++i) 3159 if (Mask[i] >= 0) 3160 break; 3161 3162 // All undef, not a palignr. 3163 if (i == e) 3164 return false; 3165 3166 // Make sure we're shifting in the right direction. 3167 if (Mask[i] <= i) 3168 return false; 3169 3170 int s = Mask[i] - i; 3171 3172 // Check the rest of the elements to see if they are consecutive. 3173 for (++i; i != e; ++i) { 3174 int m = Mask[i]; 3175 if (m >= 0 && m != s+i) 3176 return false; 3177 } 3178 return true; 3179} 3180 3181/// isVSHUFPSYMask - Return true if the specified VECTOR_SHUFFLE operand 3182/// specifies a shuffle of elements that is suitable for input to 256-bit 3183/// VSHUFPSY. 3184static bool isVSHUFPSYMask(const SmallVectorImpl<int> &Mask, EVT VT, 3185 const X86Subtarget *Subtarget) { 3186 int NumElems = VT.getVectorNumElements(); 3187 3188 if (!Subtarget->hasAVX() || VT.getSizeInBits() != 256) 3189 return false; 3190 3191 if (NumElems != 8) 3192 return false; 3193 3194 // VSHUFPSY divides the resulting vector into 4 chunks. 3195 // The sources are also splitted into 4 chunks, and each destination 3196 // chunk must come from a different source chunk. 3197 // 3198 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3199 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3200 // 3201 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3202 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3203 // 3204 int QuarterSize = NumElems/4; 3205 int HalfSize = QuarterSize*2; 3206 for (int i = 0; i < QuarterSize; ++i) 3207 if (!isUndefOrInRange(Mask[i], 0, HalfSize)) 3208 return false; 3209 for (int i = QuarterSize; i < QuarterSize*2; ++i) 3210 if (!isUndefOrInRange(Mask[i], NumElems, NumElems+HalfSize)) 3211 return false; 3212 3213 // The mask of the second half must be the same as the first but with 3214 // the appropriate offsets. This works in the same way as VPERMILPS 3215 // works with masks. 3216 for (int i = QuarterSize*2; i < QuarterSize*3; ++i) { 3217 if (!isUndefOrInRange(Mask[i], HalfSize, NumElems)) 3218 return false; 3219 int FstHalfIdx = i-HalfSize; 3220 if (Mask[FstHalfIdx] < 0) 3221 continue; 3222 if (!isUndefOrEqual(Mask[i], Mask[FstHalfIdx]+HalfSize)) 3223 return false; 3224 } 3225 for (int i = QuarterSize*3; i < NumElems; ++i) { 3226 if (!isUndefOrInRange(Mask[i], NumElems+HalfSize, NumElems*2)) 3227 return false; 3228 int FstHalfIdx = i-HalfSize; 3229 if (Mask[FstHalfIdx] < 0) 3230 continue; 3231 if (!isUndefOrEqual(Mask[i], Mask[FstHalfIdx]+HalfSize)) 3232 return false; 3233 3234 } 3235 3236 return true; 3237} 3238 3239/// getShuffleVSHUFPSYImmediate - Return the appropriate immediate to shuffle 3240/// the specified VECTOR_MASK mask with VSHUFPSY instruction. 3241static unsigned getShuffleVSHUFPSYImmediate(SDNode *N) { 3242 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3243 EVT VT = SVOp->getValueType(0); 3244 int NumElems = VT.getVectorNumElements(); 3245 3246 assert(NumElems == 8 && VT.getSizeInBits() == 256 && 3247 "Only supports v8i32 and v8f32 types"); 3248 3249 int HalfSize = NumElems/2; 3250 unsigned Mask = 0; 3251 for (int i = 0; i != NumElems ; ++i) { 3252 if (SVOp->getMaskElt(i) < 0) 3253 continue; 3254 // The mask of the first half must be equal to the second one. 3255 unsigned Shamt = (i%HalfSize)*2; 3256 unsigned Elt = SVOp->getMaskElt(i) % HalfSize; 3257 Mask |= Elt << Shamt; 3258 } 3259 3260 return Mask; 3261} 3262 3263/// isVSHUFPDYMask - Return true if the specified VECTOR_SHUFFLE operand 3264/// specifies a shuffle of elements that is suitable for input to 256-bit 3265/// VSHUFPDY. This shuffle doesn't have the same restriction as the PS 3266/// version and the mask of the second half isn't binded with the first 3267/// one. 3268static bool isVSHUFPDYMask(const SmallVectorImpl<int> &Mask, EVT VT, 3269 const X86Subtarget *Subtarget) { 3270 int NumElems = VT.getVectorNumElements(); 3271 3272 if (!Subtarget->hasAVX() || VT.getSizeInBits() != 256) 3273 return false; 3274 3275 if (NumElems != 4) 3276 return false; 3277 3278 // VSHUFPSY divides the resulting vector into 4 chunks. 3279 // The sources are also splitted into 4 chunks, and each destination 3280 // chunk must come from a different source chunk. 3281 // 3282 // SRC1 => X3 X2 X1 X0 3283 // SRC2 => Y3 Y2 Y1 Y0 3284 // 3285 // DST => Y2..Y3, X2..X3, Y1..Y0, X1..X0 3286 // 3287 int QuarterSize = NumElems/4; 3288 int HalfSize = QuarterSize*2; 3289 for (int i = 0; i < QuarterSize; ++i) 3290 if (!isUndefOrInRange(Mask[i], 0, HalfSize)) 3291 return false; 3292 for (int i = QuarterSize; i < QuarterSize*2; ++i) 3293 if (!isUndefOrInRange(Mask[i], NumElems, NumElems+HalfSize)) 3294 return false; 3295 for (int i = QuarterSize*2; i < QuarterSize*3; ++i) 3296 if (!isUndefOrInRange(Mask[i], HalfSize, NumElems)) 3297 return false; 3298 for (int i = QuarterSize*3; i < NumElems; ++i) 3299 if (!isUndefOrInRange(Mask[i], NumElems+HalfSize, NumElems*2)) 3300 return false; 3301 3302 return true; 3303} 3304 3305/// getShuffleVSHUFPDYImmediate - Return the appropriate immediate to shuffle 3306/// the specified VECTOR_MASK mask with VSHUFPDY instruction. 3307static unsigned getShuffleVSHUFPDYImmediate(SDNode *N) { 3308 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3309 EVT VT = SVOp->getValueType(0); 3310 int NumElems = VT.getVectorNumElements(); 3311 3312 assert(NumElems == 4 && VT.getSizeInBits() == 256 && 3313 "Only supports v4i64 and v4f64 types"); 3314 3315 int HalfSize = NumElems/2; 3316 unsigned Mask = 0; 3317 for (int i = 0; i != NumElems ; ++i) { 3318 if (SVOp->getMaskElt(i) < 0) 3319 continue; 3320 int Elt = SVOp->getMaskElt(i) % HalfSize; 3321 Mask |= Elt << i; 3322 } 3323 3324 return Mask; 3325} 3326 3327/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3328/// specifies a shuffle of elements that is suitable for input to 128-bit 3329/// SHUFPS and SHUFPD. 3330static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3331 int NumElems = VT.getVectorNumElements(); 3332 3333 if (VT.getSizeInBits() != 128) 3334 return false; 3335 3336 if (NumElems != 2 && NumElems != 4) 3337 return false; 3338 3339 int Half = NumElems / 2; 3340 for (int i = 0; i < Half; ++i) 3341 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 3342 return false; 3343 for (int i = Half; i < NumElems; ++i) 3344 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 3345 return false; 3346 3347 return true; 3348} 3349 3350bool X86::isSHUFPMask(ShuffleVectorSDNode *N) { 3351 SmallVector<int, 8> M; 3352 N->getMask(M); 3353 return ::isSHUFPMask(M, N->getValueType(0)); 3354} 3355 3356/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 3357/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 3358/// half elements to come from vector 1 (which would equal the dest.) and 3359/// the upper half to come from vector 2. 3360static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3361 int NumElems = VT.getVectorNumElements(); 3362 3363 if (NumElems != 2 && NumElems != 4) 3364 return false; 3365 3366 int Half = NumElems / 2; 3367 for (int i = 0; i < Half; ++i) 3368 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 3369 return false; 3370 for (int i = Half; i < NumElems; ++i) 3371 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 3372 return false; 3373 return true; 3374} 3375 3376static bool isCommutedSHUFP(ShuffleVectorSDNode *N) { 3377 SmallVector<int, 8> M; 3378 N->getMask(M); 3379 return isCommutedSHUFPMask(M, N->getValueType(0)); 3380} 3381 3382/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3383/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3384bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) { 3385 EVT VT = N->getValueType(0); 3386 unsigned NumElems = VT.getVectorNumElements(); 3387 3388 if (VT.getSizeInBits() != 128) 3389 return false; 3390 3391 if (NumElems != 4) 3392 return false; 3393 3394 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3395 return isUndefOrEqual(N->getMaskElt(0), 6) && 3396 isUndefOrEqual(N->getMaskElt(1), 7) && 3397 isUndefOrEqual(N->getMaskElt(2), 2) && 3398 isUndefOrEqual(N->getMaskElt(3), 3); 3399} 3400 3401/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3402/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3403/// <2, 3, 2, 3> 3404bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) { 3405 EVT VT = N->getValueType(0); 3406 unsigned NumElems = VT.getVectorNumElements(); 3407 3408 if (VT.getSizeInBits() != 128) 3409 return false; 3410 3411 if (NumElems != 4) 3412 return false; 3413 3414 return isUndefOrEqual(N->getMaskElt(0), 2) && 3415 isUndefOrEqual(N->getMaskElt(1), 3) && 3416 isUndefOrEqual(N->getMaskElt(2), 2) && 3417 isUndefOrEqual(N->getMaskElt(3), 3); 3418} 3419 3420/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3421/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3422bool X86::isMOVLPMask(ShuffleVectorSDNode *N) { 3423 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3424 3425 if (NumElems != 2 && NumElems != 4) 3426 return false; 3427 3428 for (unsigned i = 0; i < NumElems/2; ++i) 3429 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems)) 3430 return false; 3431 3432 for (unsigned i = NumElems/2; i < NumElems; ++i) 3433 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3434 return false; 3435 3436 return true; 3437} 3438 3439/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3440/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3441bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) { 3442 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3443 3444 if ((NumElems != 2 && NumElems != 4) 3445 || N->getValueType(0).getSizeInBits() > 128) 3446 return false; 3447 3448 for (unsigned i = 0; i < NumElems/2; ++i) 3449 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3450 return false; 3451 3452 for (unsigned i = 0; i < NumElems/2; ++i) 3453 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems)) 3454 return false; 3455 3456 return true; 3457} 3458 3459/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3460/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3461static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3462 bool V2IsSplat = false) { 3463 int NumElts = VT.getVectorNumElements(); 3464 3465 assert((VT.is128BitVector() || VT.is256BitVector()) && 3466 "Unsupported vector type for unpckh"); 3467 3468 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8) 3469 return false; 3470 3471 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3472 // independently on 128-bit lanes. 3473 unsigned NumLanes = VT.getSizeInBits()/128; 3474 unsigned NumLaneElts = NumElts/NumLanes; 3475 3476 unsigned Start = 0; 3477 unsigned End = NumLaneElts; 3478 for (unsigned s = 0; s < NumLanes; ++s) { 3479 for (unsigned i = Start, j = s * NumLaneElts; 3480 i != End; 3481 i += 2, ++j) { 3482 int BitI = Mask[i]; 3483 int BitI1 = Mask[i+1]; 3484 if (!isUndefOrEqual(BitI, j)) 3485 return false; 3486 if (V2IsSplat) { 3487 if (!isUndefOrEqual(BitI1, NumElts)) 3488 return false; 3489 } else { 3490 if (!isUndefOrEqual(BitI1, j + NumElts)) 3491 return false; 3492 } 3493 } 3494 // Process the next 128 bits. 3495 Start += NumLaneElts; 3496 End += NumLaneElts; 3497 } 3498 3499 return true; 3500} 3501 3502bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3503 SmallVector<int, 8> M; 3504 N->getMask(M); 3505 return ::isUNPCKLMask(M, N->getValueType(0), V2IsSplat); 3506} 3507 3508/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3509/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3510static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT, 3511 bool V2IsSplat = false) { 3512 int NumElts = VT.getVectorNumElements(); 3513 3514 assert((VT.is128BitVector() || VT.is256BitVector()) && 3515 "Unsupported vector type for unpckh"); 3516 3517 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8) 3518 return false; 3519 3520 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3521 // independently on 128-bit lanes. 3522 unsigned NumLanes = VT.getSizeInBits()/128; 3523 unsigned NumLaneElts = NumElts/NumLanes; 3524 3525 unsigned Start = 0; 3526 unsigned End = NumLaneElts; 3527 for (unsigned l = 0; l != NumLanes; ++l) { 3528 for (unsigned i = Start, j = (l*NumLaneElts)+NumLaneElts/2; 3529 i != End; i += 2, ++j) { 3530 int BitI = Mask[i]; 3531 int BitI1 = Mask[i+1]; 3532 if (!isUndefOrEqual(BitI, j)) 3533 return false; 3534 if (V2IsSplat) { 3535 if (isUndefOrEqual(BitI1, NumElts)) 3536 return false; 3537 } else { 3538 if (!isUndefOrEqual(BitI1, j+NumElts)) 3539 return false; 3540 } 3541 } 3542 // Process the next 128 bits. 3543 Start += NumLaneElts; 3544 End += NumLaneElts; 3545 } 3546 return true; 3547} 3548 3549bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3550 SmallVector<int, 8> M; 3551 N->getMask(M); 3552 return ::isUNPCKHMask(M, N->getValueType(0), V2IsSplat); 3553} 3554 3555/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3556/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3557/// <0, 0, 1, 1> 3558static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3559 int NumElems = VT.getVectorNumElements(); 3560 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3561 return false; 3562 3563 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3564 // independently on 128-bit lanes. 3565 unsigned NumLanes = VT.getSizeInBits() / 128; 3566 unsigned NumLaneElts = NumElems / NumLanes; 3567 3568 for (unsigned s = 0; s < NumLanes; ++s) { 3569 for (unsigned i = s * NumLaneElts, j = s * NumLaneElts; 3570 i != NumLaneElts * (s + 1); 3571 i += 2, ++j) { 3572 int BitI = Mask[i]; 3573 int BitI1 = Mask[i+1]; 3574 3575 if (!isUndefOrEqual(BitI, j)) 3576 return false; 3577 if (!isUndefOrEqual(BitI1, j)) 3578 return false; 3579 } 3580 } 3581 3582 return true; 3583} 3584 3585bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N) { 3586 SmallVector<int, 8> M; 3587 N->getMask(M); 3588 return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0)); 3589} 3590 3591/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3592/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3593/// <2, 2, 3, 3> 3594static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3595 int NumElems = VT.getVectorNumElements(); 3596 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3597 return false; 3598 3599 for (int i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 3600 int BitI = Mask[i]; 3601 int BitI1 = Mask[i+1]; 3602 if (!isUndefOrEqual(BitI, j)) 3603 return false; 3604 if (!isUndefOrEqual(BitI1, j)) 3605 return false; 3606 } 3607 return true; 3608} 3609 3610bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N) { 3611 SmallVector<int, 8> M; 3612 N->getMask(M); 3613 return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0)); 3614} 3615 3616/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3617/// specifies a shuffle of elements that is suitable for input to MOVSS, 3618/// MOVSD, and MOVD, i.e. setting the lowest element. 3619static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3620 if (VT.getVectorElementType().getSizeInBits() < 32) 3621 return false; 3622 3623 int NumElts = VT.getVectorNumElements(); 3624 3625 if (!isUndefOrEqual(Mask[0], NumElts)) 3626 return false; 3627 3628 for (int i = 1; i < NumElts; ++i) 3629 if (!isUndefOrEqual(Mask[i], i)) 3630 return false; 3631 3632 return true; 3633} 3634 3635bool X86::isMOVLMask(ShuffleVectorSDNode *N) { 3636 SmallVector<int, 8> M; 3637 N->getMask(M); 3638 return ::isMOVLMask(M, N->getValueType(0)); 3639} 3640 3641/// isVPERM2F128Mask - Match 256-bit shuffles where the elements are considered 3642/// as permutations between 128-bit chunks or halves. As an example: this 3643/// shuffle bellow: 3644/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3645/// The first half comes from the second half of V1 and the second half from the 3646/// the second half of V2. 3647static bool isVPERM2F128Mask(const SmallVectorImpl<int> &Mask, EVT VT, 3648 const X86Subtarget *Subtarget) { 3649 if (!Subtarget->hasAVX() || VT.getSizeInBits() != 256) 3650 return false; 3651 3652 // The shuffle result is divided into half A and half B. In total the two 3653 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3654 // B must come from C, D, E or F. 3655 int HalfSize = VT.getVectorNumElements()/2; 3656 bool MatchA = false, MatchB = false; 3657 3658 // Check if A comes from one of C, D, E, F. 3659 for (int Half = 0; Half < 4; ++Half) { 3660 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3661 MatchA = true; 3662 break; 3663 } 3664 } 3665 3666 // Check if B comes from one of C, D, E, F. 3667 for (int Half = 0; Half < 4; ++Half) { 3668 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3669 MatchB = true; 3670 break; 3671 } 3672 } 3673 3674 return MatchA && MatchB; 3675} 3676 3677/// getShuffleVPERM2F128Immediate - Return the appropriate immediate to shuffle 3678/// the specified VECTOR_MASK mask with VPERM2F128 instructions. 3679static unsigned getShuffleVPERM2F128Immediate(SDNode *N) { 3680 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3681 EVT VT = SVOp->getValueType(0); 3682 3683 int HalfSize = VT.getVectorNumElements()/2; 3684 3685 int FstHalf = 0, SndHalf = 0; 3686 for (int i = 0; i < HalfSize; ++i) { 3687 if (SVOp->getMaskElt(i) > 0) { 3688 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3689 break; 3690 } 3691 } 3692 for (int i = HalfSize; i < HalfSize*2; ++i) { 3693 if (SVOp->getMaskElt(i) > 0) { 3694 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3695 break; 3696 } 3697 } 3698 3699 return (FstHalf | (SndHalf << 4)); 3700} 3701 3702/// isVPERMILPDMask - Return true if the specified VECTOR_SHUFFLE operand 3703/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3704/// Note that VPERMIL mask matching is different depending whether theunderlying 3705/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3706/// to the same elements of the low, but to the higher half of the source. 3707/// In VPERMILPD the two lanes could be shuffled independently of each other 3708/// with the same restriction that lanes can't be crossed. 3709static bool isVPERMILPDMask(const SmallVectorImpl<int> &Mask, EVT VT, 3710 const X86Subtarget *Subtarget) { 3711 int NumElts = VT.getVectorNumElements(); 3712 int NumLanes = VT.getSizeInBits()/128; 3713 3714 if (!Subtarget->hasAVX()) 3715 return false; 3716 3717 // Match any permutation of 128-bit vector with 64-bit types 3718 if (NumLanes == 1 && NumElts != 2) 3719 return false; 3720 3721 // Only match 256-bit with 32 types 3722 if (VT.getSizeInBits() == 256 && NumElts != 4) 3723 return false; 3724 3725 // The mask on the high lane is independent of the low. Both can match 3726 // any element in inside its own lane, but can't cross. 3727 int LaneSize = NumElts/NumLanes; 3728 for (int l = 0; l < NumLanes; ++l) 3729 for (int i = l*LaneSize; i < LaneSize*(l+1); ++i) { 3730 int LaneStart = l*LaneSize; 3731 if (!isUndefOrInRange(Mask[i], LaneStart, LaneStart+LaneSize)) 3732 return false; 3733 } 3734 3735 return true; 3736} 3737 3738/// isVPERMILPSMask - Return true if the specified VECTOR_SHUFFLE operand 3739/// specifies a shuffle of elements that is suitable for input to VPERMILPS*. 3740/// Note that VPERMIL mask matching is different depending whether theunderlying 3741/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3742/// to the same elements of the low, but to the higher half of the source. 3743/// In VPERMILPD the two lanes could be shuffled independently of each other 3744/// with the same restriction that lanes can't be crossed. 3745static bool isVPERMILPSMask(const SmallVectorImpl<int> &Mask, EVT VT, 3746 const X86Subtarget *Subtarget) { 3747 unsigned NumElts = VT.getVectorNumElements(); 3748 unsigned NumLanes = VT.getSizeInBits()/128; 3749 3750 if (!Subtarget->hasAVX()) 3751 return false; 3752 3753 // Match any permutation of 128-bit vector with 32-bit types 3754 if (NumLanes == 1 && NumElts != 4) 3755 return false; 3756 3757 // Only match 256-bit with 32 types 3758 if (VT.getSizeInBits() == 256 && NumElts != 8) 3759 return false; 3760 3761 // The mask on the high lane should be the same as the low. Actually, 3762 // they can differ if any of the corresponding index in a lane is undef 3763 // and the other stays in range. 3764 int LaneSize = NumElts/NumLanes; 3765 for (int i = 0; i < LaneSize; ++i) { 3766 int HighElt = i+LaneSize; 3767 bool HighValid = isUndefOrInRange(Mask[HighElt], LaneSize, NumElts); 3768 bool LowValid = isUndefOrInRange(Mask[i], 0, LaneSize); 3769 3770 if (!HighValid || !LowValid) 3771 return false; 3772 if (Mask[i] < 0 || Mask[HighElt] < 0) 3773 continue; 3774 if (Mask[HighElt]-Mask[i] != LaneSize) 3775 return false; 3776 } 3777 3778 return true; 3779} 3780 3781/// getShuffleVPERMILPSImmediate - Return the appropriate immediate to shuffle 3782/// the specified VECTOR_MASK mask with VPERMILPS* instructions. 3783static unsigned getShuffleVPERMILPSImmediate(SDNode *N) { 3784 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3785 EVT VT = SVOp->getValueType(0); 3786 3787 int NumElts = VT.getVectorNumElements(); 3788 int NumLanes = VT.getSizeInBits()/128; 3789 int LaneSize = NumElts/NumLanes; 3790 3791 // Although the mask is equal for both lanes do it twice to get the cases 3792 // where a mask will match because the same mask element is undef on the 3793 // first half but valid on the second. This would get pathological cases 3794 // such as: shuffle <u, 0, 1, 2, 4, 4, 5, 6>, which is completely valid. 3795 unsigned Mask = 0; 3796 for (int l = 0; l < NumLanes; ++l) { 3797 for (int i = 0; i < LaneSize; ++i) { 3798 int MaskElt = SVOp->getMaskElt(i+(l*LaneSize)); 3799 if (MaskElt < 0) 3800 continue; 3801 if (MaskElt >= LaneSize) 3802 MaskElt -= LaneSize; 3803 Mask |= MaskElt << (i*2); 3804 } 3805 } 3806 3807 return Mask; 3808} 3809 3810/// getShuffleVPERMILPDImmediate - Return the appropriate immediate to shuffle 3811/// the specified VECTOR_MASK mask with VPERMILPD* instructions. 3812static unsigned getShuffleVPERMILPDImmediate(SDNode *N) { 3813 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3814 EVT VT = SVOp->getValueType(0); 3815 3816 int NumElts = VT.getVectorNumElements(); 3817 int NumLanes = VT.getSizeInBits()/128; 3818 3819 unsigned Mask = 0; 3820 int LaneSize = NumElts/NumLanes; 3821 for (int l = 0; l < NumLanes; ++l) 3822 for (int i = l*LaneSize; i < LaneSize*(l+1); ++i) { 3823 int MaskElt = SVOp->getMaskElt(i); 3824 if (MaskElt < 0) 3825 continue; 3826 Mask |= (MaskElt-l*LaneSize) << i; 3827 } 3828 3829 return Mask; 3830} 3831 3832/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 3833/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3834/// element of vector 2 and the other elements to come from vector 1 in order. 3835static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3836 bool V2IsSplat = false, bool V2IsUndef = false) { 3837 int NumOps = VT.getVectorNumElements(); 3838 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3839 return false; 3840 3841 if (!isUndefOrEqual(Mask[0], 0)) 3842 return false; 3843 3844 for (int i = 1; i < NumOps; ++i) 3845 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3846 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3847 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3848 return false; 3849 3850 return true; 3851} 3852 3853static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false, 3854 bool V2IsUndef = false) { 3855 SmallVector<int, 8> M; 3856 N->getMask(M); 3857 return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef); 3858} 3859 3860/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3861/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3862/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3863bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N, 3864 const X86Subtarget *Subtarget) { 3865 if (!Subtarget->hasSSE3() && !Subtarget->hasAVX()) 3866 return false; 3867 3868 // The second vector must be undef 3869 if (N->getOperand(1).getOpcode() != ISD::UNDEF) 3870 return false; 3871 3872 EVT VT = N->getValueType(0); 3873 unsigned NumElems = VT.getVectorNumElements(); 3874 3875 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3876 (VT.getSizeInBits() == 256 && NumElems != 8)) 3877 return false; 3878 3879 // "i+1" is the value the indexed mask element must have 3880 for (unsigned i = 0; i < NumElems; i += 2) 3881 if (!isUndefOrEqual(N->getMaskElt(i), i+1) || 3882 !isUndefOrEqual(N->getMaskElt(i+1), i+1)) 3883 return false; 3884 3885 return true; 3886} 3887 3888/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3889/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3890/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3891bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N, 3892 const X86Subtarget *Subtarget) { 3893 if (!Subtarget->hasSSE3() && !Subtarget->hasAVX()) 3894 return false; 3895 3896 // The second vector must be undef 3897 if (N->getOperand(1).getOpcode() != ISD::UNDEF) 3898 return false; 3899 3900 EVT VT = N->getValueType(0); 3901 unsigned NumElems = VT.getVectorNumElements(); 3902 3903 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3904 (VT.getSizeInBits() == 256 && NumElems != 8)) 3905 return false; 3906 3907 // "i" is the value the indexed mask element must have 3908 for (unsigned i = 0; i < NumElems; i += 2) 3909 if (!isUndefOrEqual(N->getMaskElt(i), i) || 3910 !isUndefOrEqual(N->getMaskElt(i+1), i)) 3911 return false; 3912 3913 return true; 3914} 3915 3916/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3917/// specifies a shuffle of elements that is suitable for input to MOVDDUP. 3918bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) { 3919 int e = N->getValueType(0).getVectorNumElements() / 2; 3920 3921 for (int i = 0; i < e; ++i) 3922 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3923 return false; 3924 for (int i = 0; i < e; ++i) 3925 if (!isUndefOrEqual(N->getMaskElt(e+i), i)) 3926 return false; 3927 return true; 3928} 3929 3930/// isVEXTRACTF128Index - Return true if the specified 3931/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3932/// suitable for input to VEXTRACTF128. 3933bool X86::isVEXTRACTF128Index(SDNode *N) { 3934 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3935 return false; 3936 3937 // The index should be aligned on a 128-bit boundary. 3938 uint64_t Index = 3939 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3940 3941 unsigned VL = N->getValueType(0).getVectorNumElements(); 3942 unsigned VBits = N->getValueType(0).getSizeInBits(); 3943 unsigned ElSize = VBits / VL; 3944 bool Result = (Index * ElSize) % 128 == 0; 3945 3946 return Result; 3947} 3948 3949/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3950/// operand specifies a subvector insert that is suitable for input to 3951/// VINSERTF128. 3952bool X86::isVINSERTF128Index(SDNode *N) { 3953 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3954 return false; 3955 3956 // The index should be aligned on a 128-bit boundary. 3957 uint64_t Index = 3958 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3959 3960 unsigned VL = N->getValueType(0).getVectorNumElements(); 3961 unsigned VBits = N->getValueType(0).getSizeInBits(); 3962 unsigned ElSize = VBits / VL; 3963 bool Result = (Index * ElSize) % 128 == 0; 3964 3965 return Result; 3966} 3967 3968/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3969/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3970unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 3971 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3972 int NumOperands = SVOp->getValueType(0).getVectorNumElements(); 3973 3974 unsigned Shift = (NumOperands == 4) ? 2 : 1; 3975 unsigned Mask = 0; 3976 for (int i = 0; i < NumOperands; ++i) { 3977 int Val = SVOp->getMaskElt(NumOperands-i-1); 3978 if (Val < 0) Val = 0; 3979 if (Val >= NumOperands) Val -= NumOperands; 3980 Mask |= Val; 3981 if (i != NumOperands - 1) 3982 Mask <<= Shift; 3983 } 3984 return Mask; 3985} 3986 3987/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3988/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3989unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 3990 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3991 unsigned Mask = 0; 3992 // 8 nodes, but we only care about the last 4. 3993 for (unsigned i = 7; i >= 4; --i) { 3994 int Val = SVOp->getMaskElt(i); 3995 if (Val >= 0) 3996 Mask |= (Val - 4); 3997 if (i != 4) 3998 Mask <<= 2; 3999 } 4000 return Mask; 4001} 4002 4003/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4004/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4005unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 4006 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 4007 unsigned Mask = 0; 4008 // 8 nodes, but we only care about the first 4. 4009 for (int i = 3; i >= 0; --i) { 4010 int Val = SVOp->getMaskElt(i); 4011 if (Val >= 0) 4012 Mask |= Val; 4013 if (i != 0) 4014 Mask <<= 2; 4015 } 4016 return Mask; 4017} 4018 4019/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4020/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4021unsigned X86::getShufflePALIGNRImmediate(SDNode *N) { 4022 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 4023 EVT VVT = N->getValueType(0); 4024 unsigned EltSize = VVT.getVectorElementType().getSizeInBits() >> 3; 4025 int Val = 0; 4026 4027 unsigned i, e; 4028 for (i = 0, e = VVT.getVectorNumElements(); i != e; ++i) { 4029 Val = SVOp->getMaskElt(i); 4030 if (Val >= 0) 4031 break; 4032 } 4033 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4034 return (Val - i) * EltSize; 4035} 4036 4037/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4038/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4039/// instructions. 4040unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4041 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4042 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4043 4044 uint64_t Index = 4045 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4046 4047 EVT VecVT = N->getOperand(0).getValueType(); 4048 EVT ElVT = VecVT.getVectorElementType(); 4049 4050 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4051 return Index / NumElemsPerChunk; 4052} 4053 4054/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4055/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4056/// instructions. 4057unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4058 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4059 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4060 4061 uint64_t Index = 4062 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4063 4064 EVT VecVT = N->getValueType(0); 4065 EVT ElVT = VecVT.getVectorElementType(); 4066 4067 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4068 return Index / NumElemsPerChunk; 4069} 4070 4071/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4072/// constant +0.0. 4073bool X86::isZeroNode(SDValue Elt) { 4074 return ((isa<ConstantSDNode>(Elt) && 4075 cast<ConstantSDNode>(Elt)->isNullValue()) || 4076 (isa<ConstantFPSDNode>(Elt) && 4077 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4078} 4079 4080/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4081/// their permute mask. 4082static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4083 SelectionDAG &DAG) { 4084 EVT VT = SVOp->getValueType(0); 4085 unsigned NumElems = VT.getVectorNumElements(); 4086 SmallVector<int, 8> MaskVec; 4087 4088 for (unsigned i = 0; i != NumElems; ++i) { 4089 int idx = SVOp->getMaskElt(i); 4090 if (idx < 0) 4091 MaskVec.push_back(idx); 4092 else if (idx < (int)NumElems) 4093 MaskVec.push_back(idx + NumElems); 4094 else 4095 MaskVec.push_back(idx - NumElems); 4096 } 4097 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4098 SVOp->getOperand(0), &MaskVec[0]); 4099} 4100 4101/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 4102/// the two vector operands have swapped position. 4103static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) { 4104 unsigned NumElems = VT.getVectorNumElements(); 4105 for (unsigned i = 0; i != NumElems; ++i) { 4106 int idx = Mask[i]; 4107 if (idx < 0) 4108 continue; 4109 else if (idx < (int)NumElems) 4110 Mask[i] = idx + NumElems; 4111 else 4112 Mask[i] = idx - NumElems; 4113 } 4114} 4115 4116/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4117/// match movhlps. The lower half elements should come from upper half of 4118/// V1 (and in order), and the upper half elements should come from the upper 4119/// half of V2 (and in order). 4120static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) { 4121 EVT VT = Op->getValueType(0); 4122 if (VT.getSizeInBits() != 128) 4123 return false; 4124 if (VT.getVectorNumElements() != 4) 4125 return false; 4126 for (unsigned i = 0, e = 2; i != e; ++i) 4127 if (!isUndefOrEqual(Op->getMaskElt(i), i+2)) 4128 return false; 4129 for (unsigned i = 2; i != 4; ++i) 4130 if (!isUndefOrEqual(Op->getMaskElt(i), i+4)) 4131 return false; 4132 return true; 4133} 4134 4135/// isScalarLoadToVector - Returns true if the node is a scalar load that 4136/// is promoted to a vector. It also returns the LoadSDNode by reference if 4137/// required. 4138static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4139 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4140 return false; 4141 N = N->getOperand(0).getNode(); 4142 if (!ISD::isNON_EXTLoad(N)) 4143 return false; 4144 if (LD) 4145 *LD = cast<LoadSDNode>(N); 4146 return true; 4147} 4148 4149/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4150/// match movlp{s|d}. The lower half elements should come from lower half of 4151/// V1 (and in order), and the upper half elements should come from the upper 4152/// half of V2 (and in order). And since V1 will become the source of the 4153/// MOVLP, it must be either a vector load or a scalar load to vector. 4154static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4155 ShuffleVectorSDNode *Op) { 4156 EVT VT = Op->getValueType(0); 4157 if (VT.getSizeInBits() != 128) 4158 return false; 4159 4160 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4161 return false; 4162 // Is V2 is a vector load, don't do this transformation. We will try to use 4163 // load folding shufps op. 4164 if (ISD::isNON_EXTLoad(V2)) 4165 return false; 4166 4167 unsigned NumElems = VT.getVectorNumElements(); 4168 4169 if (NumElems != 2 && NumElems != 4) 4170 return false; 4171 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4172 if (!isUndefOrEqual(Op->getMaskElt(i), i)) 4173 return false; 4174 for (unsigned i = NumElems/2; i != NumElems; ++i) 4175 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems)) 4176 return false; 4177 return true; 4178} 4179 4180/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4181/// all the same. 4182static bool isSplatVector(SDNode *N) { 4183 if (N->getOpcode() != ISD::BUILD_VECTOR) 4184 return false; 4185 4186 SDValue SplatValue = N->getOperand(0); 4187 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4188 if (N->getOperand(i) != SplatValue) 4189 return false; 4190 return true; 4191} 4192 4193/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4194/// to an zero vector. 4195/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4196static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4197 SDValue V1 = N->getOperand(0); 4198 SDValue V2 = N->getOperand(1); 4199 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4200 for (unsigned i = 0; i != NumElems; ++i) { 4201 int Idx = N->getMaskElt(i); 4202 if (Idx >= (int)NumElems) { 4203 unsigned Opc = V2.getOpcode(); 4204 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4205 continue; 4206 if (Opc != ISD::BUILD_VECTOR || 4207 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4208 return false; 4209 } else if (Idx >= 0) { 4210 unsigned Opc = V1.getOpcode(); 4211 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4212 continue; 4213 if (Opc != ISD::BUILD_VECTOR || 4214 !X86::isZeroNode(V1.getOperand(Idx))) 4215 return false; 4216 } 4217 } 4218 return true; 4219} 4220 4221/// getZeroVector - Returns a vector of specified type with all zero elements. 4222/// 4223static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG, 4224 DebugLoc dl) { 4225 assert(VT.isVector() && "Expected a vector type"); 4226 4227 // Always build SSE zero vectors as <4 x i32> bitcasted 4228 // to their dest type. This ensures they get CSE'd. 4229 SDValue Vec; 4230 if (VT.getSizeInBits() == 128) { // SSE 4231 if (HasSSE2) { // SSE2 4232 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4233 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4234 } else { // SSE1 4235 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4236 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4237 } 4238 } else if (VT.getSizeInBits() == 256) { // AVX 4239 // 256-bit logic and arithmetic instructions in AVX are 4240 // all floating-point, no support for integer ops. Default 4241 // to emitting fp zeroed vectors then. 4242 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4243 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4244 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4245 } 4246 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4247} 4248 4249/// getOnesVector - Returns a vector of specified type with all bits set. 4250/// Always build ones vectors as <4 x i32>. For 256-bit types, use two 4251/// <4 x i32> inserted in a <8 x i32> appropriately. Then bitcast to their 4252/// original type, ensuring they get CSE'd. 4253static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 4254 assert(VT.isVector() && "Expected a vector type"); 4255 assert((VT.is128BitVector() || VT.is256BitVector()) 4256 && "Expected a 128-bit or 256-bit vector type"); 4257 4258 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4259 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, 4260 Cst, Cst, Cst, Cst); 4261 4262 if (VT.is256BitVector()) { 4263 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32), 4264 Vec, DAG.getConstant(0, MVT::i32), DAG, dl); 4265 Vec = Insert128BitVector(InsV, Vec, 4266 DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl); 4267 } 4268 4269 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4270} 4271 4272/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4273/// that point to V2 points to its first element. 4274static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 4275 EVT VT = SVOp->getValueType(0); 4276 unsigned NumElems = VT.getVectorNumElements(); 4277 4278 bool Changed = false; 4279 SmallVector<int, 8> MaskVec; 4280 SVOp->getMask(MaskVec); 4281 4282 for (unsigned i = 0; i != NumElems; ++i) { 4283 if (MaskVec[i] > (int)NumElems) { 4284 MaskVec[i] = NumElems; 4285 Changed = true; 4286 } 4287 } 4288 if (Changed) 4289 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0), 4290 SVOp->getOperand(1), &MaskVec[0]); 4291 return SDValue(SVOp, 0); 4292} 4293 4294/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4295/// operation of specified width. 4296static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4297 SDValue V2) { 4298 unsigned NumElems = VT.getVectorNumElements(); 4299 SmallVector<int, 8> Mask; 4300 Mask.push_back(NumElems); 4301 for (unsigned i = 1; i != NumElems; ++i) 4302 Mask.push_back(i); 4303 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4304} 4305 4306/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4307static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4308 SDValue V2) { 4309 unsigned NumElems = VT.getVectorNumElements(); 4310 SmallVector<int, 8> Mask; 4311 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4312 Mask.push_back(i); 4313 Mask.push_back(i + NumElems); 4314 } 4315 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4316} 4317 4318/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4319static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4320 SDValue V2) { 4321 unsigned NumElems = VT.getVectorNumElements(); 4322 unsigned Half = NumElems/2; 4323 SmallVector<int, 8> Mask; 4324 for (unsigned i = 0; i != Half; ++i) { 4325 Mask.push_back(i + Half); 4326 Mask.push_back(i + NumElems + Half); 4327 } 4328 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4329} 4330 4331// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4332// a generic shuffle instruction because the target has no such instructions. 4333// Generate shuffles which repeat i16 and i8 several times until they can be 4334// represented by v4f32 and then be manipulated by target suported shuffles. 4335static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4336 EVT VT = V.getValueType(); 4337 int NumElems = VT.getVectorNumElements(); 4338 DebugLoc dl = V.getDebugLoc(); 4339 4340 while (NumElems > 4) { 4341 if (EltNo < NumElems/2) { 4342 V = getUnpackl(DAG, dl, VT, V, V); 4343 } else { 4344 V = getUnpackh(DAG, dl, VT, V, V); 4345 EltNo -= NumElems/2; 4346 } 4347 NumElems >>= 1; 4348 } 4349 return V; 4350} 4351 4352/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4353static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4354 EVT VT = V.getValueType(); 4355 DebugLoc dl = V.getDebugLoc(); 4356 assert((VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256) 4357 && "Vector size not supported"); 4358 4359 if (VT.getSizeInBits() == 128) { 4360 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4361 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4362 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4363 &SplatMask[0]); 4364 } else { 4365 // To use VPERMILPS to splat scalars, the second half of indicies must 4366 // refer to the higher part, which is a duplication of the lower one, 4367 // because VPERMILPS can only handle in-lane permutations. 4368 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4369 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4370 4371 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4372 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4373 &SplatMask[0]); 4374 } 4375 4376 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4377} 4378 4379/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4380static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4381 EVT SrcVT = SV->getValueType(0); 4382 SDValue V1 = SV->getOperand(0); 4383 DebugLoc dl = SV->getDebugLoc(); 4384 4385 int EltNo = SV->getSplatIndex(); 4386 int NumElems = SrcVT.getVectorNumElements(); 4387 unsigned Size = SrcVT.getSizeInBits(); 4388 4389 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4390 "Unknown how to promote splat for type"); 4391 4392 // Extract the 128-bit part containing the splat element and update 4393 // the splat element index when it refers to the higher register. 4394 if (Size == 256) { 4395 unsigned Idx = (EltNo > NumElems/2) ? NumElems/2 : 0; 4396 V1 = Extract128BitVector(V1, DAG.getConstant(Idx, MVT::i32), DAG, dl); 4397 if (Idx > 0) 4398 EltNo -= NumElems/2; 4399 } 4400 4401 // All i16 and i8 vector types can't be used directly by a generic shuffle 4402 // instruction because the target has no such instruction. Generate shuffles 4403 // which repeat i16 and i8 several times until they fit in i32, and then can 4404 // be manipulated by target suported shuffles. 4405 EVT EltVT = SrcVT.getVectorElementType(); 4406 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4407 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4408 4409 // Recreate the 256-bit vector and place the same 128-bit vector 4410 // into the low and high part. This is necessary because we want 4411 // to use VPERM* to shuffle the vectors 4412 if (Size == 256) { 4413 SDValue InsV = Insert128BitVector(DAG.getUNDEF(SrcVT), V1, 4414 DAG.getConstant(0, MVT::i32), DAG, dl); 4415 V1 = Insert128BitVector(InsV, V1, 4416 DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); 4417 } 4418 4419 return getLegalSplat(DAG, V1, EltNo); 4420} 4421 4422/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4423/// vector of zero or undef vector. This produces a shuffle where the low 4424/// element of V2 is swizzled into the zero/undef vector, landing at element 4425/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4426static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4427 bool isZero, bool HasSSE2, 4428 SelectionDAG &DAG) { 4429 EVT VT = V2.getValueType(); 4430 SDValue V1 = isZero 4431 ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4432 unsigned NumElems = VT.getVectorNumElements(); 4433 SmallVector<int, 16> MaskVec; 4434 for (unsigned i = 0; i != NumElems; ++i) 4435 // If this is the insertion idx, put the low elt of V2 here. 4436 MaskVec.push_back(i == Idx ? NumElems : i); 4437 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4438} 4439 4440/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4441/// element of the result of the vector shuffle. 4442static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, 4443 unsigned Depth) { 4444 if (Depth == 6) 4445 return SDValue(); // Limit search depth. 4446 4447 SDValue V = SDValue(N, 0); 4448 EVT VT = V.getValueType(); 4449 unsigned Opcode = V.getOpcode(); 4450 4451 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4452 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4453 Index = SV->getMaskElt(Index); 4454 4455 if (Index < 0) 4456 return DAG.getUNDEF(VT.getVectorElementType()); 4457 4458 int NumElems = VT.getVectorNumElements(); 4459 SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1); 4460 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1); 4461 } 4462 4463 // Recurse into target specific vector shuffles to find scalars. 4464 if (isTargetShuffle(Opcode)) { 4465 int NumElems = VT.getVectorNumElements(); 4466 SmallVector<unsigned, 16> ShuffleMask; 4467 SDValue ImmN; 4468 4469 switch(Opcode) { 4470 case X86ISD::SHUFPS: 4471 case X86ISD::SHUFPD: 4472 ImmN = N->getOperand(N->getNumOperands()-1); 4473 DecodeSHUFPSMask(NumElems, 4474 cast<ConstantSDNode>(ImmN)->getZExtValue(), 4475 ShuffleMask); 4476 break; 4477 case X86ISD::PUNPCKHBW: 4478 case X86ISD::PUNPCKHWD: 4479 case X86ISD::PUNPCKHDQ: 4480 case X86ISD::PUNPCKHQDQ: 4481 DecodePUNPCKHMask(NumElems, ShuffleMask); 4482 break; 4483 case X86ISD::UNPCKHPS: 4484 case X86ISD::UNPCKHPD: 4485 case X86ISD::VUNPCKHPSY: 4486 case X86ISD::VUNPCKHPDY: 4487 DecodeUNPCKHPMask(NumElems, ShuffleMask); 4488 break; 4489 case X86ISD::PUNPCKLBW: 4490 case X86ISD::PUNPCKLWD: 4491 case X86ISD::PUNPCKLDQ: 4492 case X86ISD::PUNPCKLQDQ: 4493 DecodePUNPCKLMask(VT, ShuffleMask); 4494 break; 4495 case X86ISD::UNPCKLPS: 4496 case X86ISD::UNPCKLPD: 4497 case X86ISD::VUNPCKLPSY: 4498 case X86ISD::VUNPCKLPDY: 4499 DecodeUNPCKLPMask(VT, ShuffleMask); 4500 break; 4501 case X86ISD::MOVHLPS: 4502 DecodeMOVHLPSMask(NumElems, ShuffleMask); 4503 break; 4504 case X86ISD::MOVLHPS: 4505 DecodeMOVLHPSMask(NumElems, ShuffleMask); 4506 break; 4507 case X86ISD::PSHUFD: 4508 ImmN = N->getOperand(N->getNumOperands()-1); 4509 DecodePSHUFMask(NumElems, 4510 cast<ConstantSDNode>(ImmN)->getZExtValue(), 4511 ShuffleMask); 4512 break; 4513 case X86ISD::PSHUFHW: 4514 ImmN = N->getOperand(N->getNumOperands()-1); 4515 DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4516 ShuffleMask); 4517 break; 4518 case X86ISD::PSHUFLW: 4519 ImmN = N->getOperand(N->getNumOperands()-1); 4520 DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4521 ShuffleMask); 4522 break; 4523 case X86ISD::MOVSS: 4524 case X86ISD::MOVSD: { 4525 // The index 0 always comes from the first element of the second source, 4526 // this is why MOVSS and MOVSD are used in the first place. The other 4527 // elements come from the other positions of the first source vector. 4528 unsigned OpNum = (Index == 0) ? 1 : 0; 4529 return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG, 4530 Depth+1); 4531 } 4532 case X86ISD::VPERMILPS: 4533 ImmN = N->getOperand(N->getNumOperands()-1); 4534 DecodeVPERMILPSMask(4, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4535 ShuffleMask); 4536 break; 4537 case X86ISD::VPERMILPSY: 4538 ImmN = N->getOperand(N->getNumOperands()-1); 4539 DecodeVPERMILPSMask(8, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4540 ShuffleMask); 4541 break; 4542 case X86ISD::VPERMILPD: 4543 ImmN = N->getOperand(N->getNumOperands()-1); 4544 DecodeVPERMILPDMask(2, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4545 ShuffleMask); 4546 break; 4547 case X86ISD::VPERMILPDY: 4548 ImmN = N->getOperand(N->getNumOperands()-1); 4549 DecodeVPERMILPDMask(4, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4550 ShuffleMask); 4551 break; 4552 case X86ISD::VPERM2F128: 4553 ImmN = N->getOperand(N->getNumOperands()-1); 4554 DecodeVPERM2F128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4555 ShuffleMask); 4556 break; 4557 default: 4558 assert("not implemented for target shuffle node"); 4559 return SDValue(); 4560 } 4561 4562 Index = ShuffleMask[Index]; 4563 if (Index < 0) 4564 return DAG.getUNDEF(VT.getVectorElementType()); 4565 4566 SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); 4567 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, 4568 Depth+1); 4569 } 4570 4571 // Actual nodes that may contain scalar elements 4572 if (Opcode == ISD::BITCAST) { 4573 V = V.getOperand(0); 4574 EVT SrcVT = V.getValueType(); 4575 unsigned NumElems = VT.getVectorNumElements(); 4576 4577 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4578 return SDValue(); 4579 } 4580 4581 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4582 return (Index == 0) ? V.getOperand(0) 4583 : DAG.getUNDEF(VT.getVectorElementType()); 4584 4585 if (V.getOpcode() == ISD::BUILD_VECTOR) 4586 return V.getOperand(Index); 4587 4588 return SDValue(); 4589} 4590 4591/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4592/// shuffle operation which come from a consecutively from a zero. The 4593/// search can start in two different directions, from left or right. 4594static 4595unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, 4596 bool ZerosFromLeft, SelectionDAG &DAG) { 4597 int i = 0; 4598 4599 while (i < NumElems) { 4600 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4601 SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0); 4602 if (!(Elt.getNode() && 4603 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4604 break; 4605 ++i; 4606 } 4607 4608 return i; 4609} 4610 4611/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to 4612/// MaskE correspond consecutively to elements from one of the vector operands, 4613/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4614static 4615bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE, 4616 int OpIdx, int NumElems, unsigned &OpNum) { 4617 bool SeenV1 = false; 4618 bool SeenV2 = false; 4619 4620 for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) { 4621 int Idx = SVOp->getMaskElt(i); 4622 // Ignore undef indicies 4623 if (Idx < 0) 4624 continue; 4625 4626 if (Idx < NumElems) 4627 SeenV1 = true; 4628 else 4629 SeenV2 = true; 4630 4631 // Only accept consecutive elements from the same vector 4632 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4633 return false; 4634 } 4635 4636 OpNum = SeenV1 ? 0 : 1; 4637 return true; 4638} 4639 4640/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4641/// logical left shift of a vector. 4642static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4643 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4644 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4645 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4646 false /* check zeros from right */, DAG); 4647 unsigned OpSrc; 4648 4649 if (!NumZeros) 4650 return false; 4651 4652 // Considering the elements in the mask that are not consecutive zeros, 4653 // check if they consecutively come from only one of the source vectors. 4654 // 4655 // V1 = {X, A, B, C} 0 4656 // \ \ \ / 4657 // vector_shuffle V1, V2 <1, 2, 3, X> 4658 // 4659 if (!isShuffleMaskConsecutive(SVOp, 4660 0, // Mask Start Index 4661 NumElems-NumZeros-1, // Mask End Index 4662 NumZeros, // Where to start looking in the src vector 4663 NumElems, // Number of elements in vector 4664 OpSrc)) // Which source operand ? 4665 return false; 4666 4667 isLeft = false; 4668 ShAmt = NumZeros; 4669 ShVal = SVOp->getOperand(OpSrc); 4670 return true; 4671} 4672 4673/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4674/// logical left shift of a vector. 4675static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4676 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4677 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4678 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4679 true /* check zeros from left */, DAG); 4680 unsigned OpSrc; 4681 4682 if (!NumZeros) 4683 return false; 4684 4685 // Considering the elements in the mask that are not consecutive zeros, 4686 // check if they consecutively come from only one of the source vectors. 4687 // 4688 // 0 { A, B, X, X } = V2 4689 // / \ / / 4690 // vector_shuffle V1, V2 <X, X, 4, 5> 4691 // 4692 if (!isShuffleMaskConsecutive(SVOp, 4693 NumZeros, // Mask Start Index 4694 NumElems-1, // Mask End Index 4695 0, // Where to start looking in the src vector 4696 NumElems, // Number of elements in vector 4697 OpSrc)) // Which source operand ? 4698 return false; 4699 4700 isLeft = true; 4701 ShAmt = NumZeros; 4702 ShVal = SVOp->getOperand(OpSrc); 4703 return true; 4704} 4705 4706/// isVectorShift - Returns true if the shuffle can be implemented as a 4707/// logical left or right shift of a vector. 4708static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4709 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4710 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4711 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4712 return true; 4713 4714 return false; 4715} 4716 4717/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4718/// 4719static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4720 unsigned NumNonZero, unsigned NumZero, 4721 SelectionDAG &DAG, 4722 const TargetLowering &TLI) { 4723 if (NumNonZero > 8) 4724 return SDValue(); 4725 4726 DebugLoc dl = Op.getDebugLoc(); 4727 SDValue V(0, 0); 4728 bool First = true; 4729 for (unsigned i = 0; i < 16; ++i) { 4730 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4731 if (ThisIsNonZero && First) { 4732 if (NumZero) 4733 V = getZeroVector(MVT::v8i16, true, DAG, dl); 4734 else 4735 V = DAG.getUNDEF(MVT::v8i16); 4736 First = false; 4737 } 4738 4739 if ((i & 1) != 0) { 4740 SDValue ThisElt(0, 0), LastElt(0, 0); 4741 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4742 if (LastIsNonZero) { 4743 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4744 MVT::i16, Op.getOperand(i-1)); 4745 } 4746 if (ThisIsNonZero) { 4747 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4748 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4749 ThisElt, DAG.getConstant(8, MVT::i8)); 4750 if (LastIsNonZero) 4751 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4752 } else 4753 ThisElt = LastElt; 4754 4755 if (ThisElt.getNode()) 4756 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4757 DAG.getIntPtrConstant(i/2)); 4758 } 4759 } 4760 4761 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4762} 4763 4764/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4765/// 4766static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4767 unsigned NumNonZero, unsigned NumZero, 4768 SelectionDAG &DAG, 4769 const TargetLowering &TLI) { 4770 if (NumNonZero > 4) 4771 return SDValue(); 4772 4773 DebugLoc dl = Op.getDebugLoc(); 4774 SDValue V(0, 0); 4775 bool First = true; 4776 for (unsigned i = 0; i < 8; ++i) { 4777 bool isNonZero = (NonZeros & (1 << i)) != 0; 4778 if (isNonZero) { 4779 if (First) { 4780 if (NumZero) 4781 V = getZeroVector(MVT::v8i16, true, DAG, dl); 4782 else 4783 V = DAG.getUNDEF(MVT::v8i16); 4784 First = false; 4785 } 4786 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4787 MVT::v8i16, V, Op.getOperand(i), 4788 DAG.getIntPtrConstant(i)); 4789 } 4790 } 4791 4792 return V; 4793} 4794 4795/// getVShift - Return a vector logical shift node. 4796/// 4797static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4798 unsigned NumBits, SelectionDAG &DAG, 4799 const TargetLowering &TLI, DebugLoc dl) { 4800 EVT ShVT = MVT::v2i64; 4801 unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; 4802 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4803 return DAG.getNode(ISD::BITCAST, dl, VT, 4804 DAG.getNode(Opc, dl, ShVT, SrcOp, 4805 DAG.getConstant(NumBits, 4806 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4807} 4808 4809SDValue 4810X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4811 SelectionDAG &DAG) const { 4812 4813 // Check if the scalar load can be widened into a vector load. And if 4814 // the address is "base + cst" see if the cst can be "absorbed" into 4815 // the shuffle mask. 4816 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4817 SDValue Ptr = LD->getBasePtr(); 4818 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4819 return SDValue(); 4820 EVT PVT = LD->getValueType(0); 4821 if (PVT != MVT::i32 && PVT != MVT::f32) 4822 return SDValue(); 4823 4824 int FI = -1; 4825 int64_t Offset = 0; 4826 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4827 FI = FINode->getIndex(); 4828 Offset = 0; 4829 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4830 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4831 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4832 Offset = Ptr.getConstantOperandVal(1); 4833 Ptr = Ptr.getOperand(0); 4834 } else { 4835 return SDValue(); 4836 } 4837 4838 // FIXME: 256-bit vector instructions don't require a strict alignment, 4839 // improve this code to support it better. 4840 unsigned RequiredAlign = VT.getSizeInBits()/8; 4841 SDValue Chain = LD->getChain(); 4842 // Make sure the stack object alignment is at least 16 or 32. 4843 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4844 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4845 if (MFI->isFixedObjectIndex(FI)) { 4846 // Can't change the alignment. FIXME: It's possible to compute 4847 // the exact stack offset and reference FI + adjust offset instead. 4848 // If someone *really* cares about this. That's the way to implement it. 4849 return SDValue(); 4850 } else { 4851 MFI->setObjectAlignment(FI, RequiredAlign); 4852 } 4853 } 4854 4855 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4856 // Ptr + (Offset & ~15). 4857 if (Offset < 0) 4858 return SDValue(); 4859 if ((Offset % RequiredAlign) & 3) 4860 return SDValue(); 4861 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4862 if (StartOffset) 4863 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4864 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4865 4866 int EltNo = (Offset - StartOffset) >> 2; 4867 int NumElems = VT.getVectorNumElements(); 4868 4869 EVT CanonVT = VT.getSizeInBits() == 128 ? MVT::v4i32 : MVT::v8i32; 4870 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4871 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4872 LD->getPointerInfo().getWithOffset(StartOffset), 4873 false, false, 0); 4874 4875 // Canonicalize it to a v4i32 or v8i32 shuffle. 4876 SmallVector<int, 8> Mask; 4877 for (int i = 0; i < NumElems; ++i) 4878 Mask.push_back(EltNo); 4879 4880 V1 = DAG.getNode(ISD::BITCAST, dl, CanonVT, V1); 4881 return DAG.getNode(ISD::BITCAST, dl, NVT, 4882 DAG.getVectorShuffle(CanonVT, dl, V1, 4883 DAG.getUNDEF(CanonVT),&Mask[0])); 4884 } 4885 4886 return SDValue(); 4887} 4888 4889/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4890/// vector of type 'VT', see if the elements can be replaced by a single large 4891/// load which has the same value as a build_vector whose operands are 'elts'. 4892/// 4893/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4894/// 4895/// FIXME: we'd also like to handle the case where the last elements are zero 4896/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4897/// There's even a handy isZeroNode for that purpose. 4898static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4899 DebugLoc &DL, SelectionDAG &DAG) { 4900 EVT EltVT = VT.getVectorElementType(); 4901 unsigned NumElems = Elts.size(); 4902 4903 LoadSDNode *LDBase = NULL; 4904 unsigned LastLoadedElt = -1U; 4905 4906 // For each element in the initializer, see if we've found a load or an undef. 4907 // If we don't find an initial load element, or later load elements are 4908 // non-consecutive, bail out. 4909 for (unsigned i = 0; i < NumElems; ++i) { 4910 SDValue Elt = Elts[i]; 4911 4912 if (!Elt.getNode() || 4913 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4914 return SDValue(); 4915 if (!LDBase) { 4916 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4917 return SDValue(); 4918 LDBase = cast<LoadSDNode>(Elt.getNode()); 4919 LastLoadedElt = i; 4920 continue; 4921 } 4922 if (Elt.getOpcode() == ISD::UNDEF) 4923 continue; 4924 4925 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4926 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4927 return SDValue(); 4928 LastLoadedElt = i; 4929 } 4930 4931 // If we have found an entire vector of loads and undefs, then return a large 4932 // load of the entire vector width starting at the base pointer. If we found 4933 // consecutive loads for the low half, generate a vzext_load node. 4934 if (LastLoadedElt == NumElems - 1) { 4935 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4936 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4937 LDBase->getPointerInfo(), 4938 LDBase->isVolatile(), LDBase->isNonTemporal(), 0); 4939 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4940 LDBase->getPointerInfo(), 4941 LDBase->isVolatile(), LDBase->isNonTemporal(), 4942 LDBase->getAlignment()); 4943 } else if (NumElems == 4 && LastLoadedElt == 1 && 4944 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 4945 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4946 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4947 SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, 4948 Ops, 2, MVT::i32, 4949 LDBase->getMemOperand()); 4950 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 4951 } 4952 return SDValue(); 4953} 4954 4955SDValue 4956X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 4957 DebugLoc dl = Op.getDebugLoc(); 4958 4959 EVT VT = Op.getValueType(); 4960 EVT ExtVT = VT.getVectorElementType(); 4961 unsigned NumElems = Op.getNumOperands(); 4962 4963 // Vectors containing all zeros can be matched by pxor and xorps later 4964 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 4965 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 4966 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 4967 if (Op.getValueType() == MVT::v4i32 || 4968 Op.getValueType() == MVT::v8i32) 4969 return Op; 4970 4971 return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl); 4972 } 4973 4974 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 4975 // vectors or broken into v4i32 operations on 256-bit vectors. 4976 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 4977 if (Op.getValueType() == MVT::v4i32) 4978 return Op; 4979 4980 return getOnesVector(Op.getValueType(), DAG, dl); 4981 } 4982 4983 unsigned EVTBits = ExtVT.getSizeInBits(); 4984 4985 unsigned NumZero = 0; 4986 unsigned NumNonZero = 0; 4987 unsigned NonZeros = 0; 4988 bool IsAllConstants = true; 4989 SmallSet<SDValue, 8> Values; 4990 for (unsigned i = 0; i < NumElems; ++i) { 4991 SDValue Elt = Op.getOperand(i); 4992 if (Elt.getOpcode() == ISD::UNDEF) 4993 continue; 4994 Values.insert(Elt); 4995 if (Elt.getOpcode() != ISD::Constant && 4996 Elt.getOpcode() != ISD::ConstantFP) 4997 IsAllConstants = false; 4998 if (X86::isZeroNode(Elt)) 4999 NumZero++; 5000 else { 5001 NonZeros |= (1 << i); 5002 NumNonZero++; 5003 } 5004 } 5005 5006 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5007 if (NumNonZero == 0) 5008 return DAG.getUNDEF(VT); 5009 5010 // Special case for single non-zero, non-undef, element. 5011 if (NumNonZero == 1) { 5012 unsigned Idx = CountTrailingZeros_32(NonZeros); 5013 SDValue Item = Op.getOperand(Idx); 5014 5015 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5016 // the value are obviously zero, truncate the value to i32 and do the 5017 // insertion that way. Only do this if the value is non-constant or if the 5018 // value is a constant being inserted into element 0. It is cheaper to do 5019 // a constant pool load than it is to do a movd + shuffle. 5020 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5021 (!IsAllConstants || Idx == 0)) { 5022 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5023 // Handle SSE only. 5024 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5025 EVT VecVT = MVT::v4i32; 5026 unsigned VecElts = 4; 5027 5028 // Truncate the value (which may itself be a constant) to i32, and 5029 // convert it to a vector with movd (S2V+shuffle to zero extend). 5030 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5031 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5032 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 5033 Subtarget->hasSSE2(), DAG); 5034 5035 // Now we have our 32-bit value zero extended in the low element of 5036 // a vector. If Idx != 0, swizzle it into place. 5037 if (Idx != 0) { 5038 SmallVector<int, 4> Mask; 5039 Mask.push_back(Idx); 5040 for (unsigned i = 1; i != VecElts; ++i) 5041 Mask.push_back(i); 5042 Item = DAG.getVectorShuffle(VecVT, dl, Item, 5043 DAG.getUNDEF(Item.getValueType()), 5044 &Mask[0]); 5045 } 5046 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item); 5047 } 5048 } 5049 5050 // If we have a constant or non-constant insertion into the low element of 5051 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5052 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5053 // depending on what the source datatype is. 5054 if (Idx == 0) { 5055 if (NumZero == 0) { 5056 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5057 } else if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5058 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5059 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5060 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5061 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(), 5062 DAG); 5063 } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5064 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5065 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 5066 EVT MiddleVT = MVT::v4i32; 5067 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item); 5068 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 5069 Subtarget->hasSSE2(), DAG); 5070 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5071 } 5072 } 5073 5074 // Is it a vector logical left shift? 5075 if (NumElems == 2 && Idx == 1 && 5076 X86::isZeroNode(Op.getOperand(0)) && 5077 !X86::isZeroNode(Op.getOperand(1))) { 5078 unsigned NumBits = VT.getSizeInBits(); 5079 return getVShift(true, VT, 5080 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5081 VT, Op.getOperand(1)), 5082 NumBits/2, DAG, *this, dl); 5083 } 5084 5085 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5086 return SDValue(); 5087 5088 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5089 // is a non-constant being inserted into an element other than the low one, 5090 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5091 // movd/movss) to move this into the low element, then shuffle it into 5092 // place. 5093 if (EVTBits == 32) { 5094 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5095 5096 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5097 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, 5098 Subtarget->hasSSE2(), DAG); 5099 SmallVector<int, 8> MaskVec; 5100 for (unsigned i = 0; i < NumElems; i++) 5101 MaskVec.push_back(i == Idx ? 0 : 1); 5102 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5103 } 5104 } 5105 5106 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5107 if (Values.size() == 1) { 5108 if (EVTBits == 32) { 5109 // Instead of a shuffle like this: 5110 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5111 // Check if it's possible to issue this instead. 5112 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5113 unsigned Idx = CountTrailingZeros_32(NonZeros); 5114 SDValue Item = Op.getOperand(Idx); 5115 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5116 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5117 } 5118 return SDValue(); 5119 } 5120 5121 // A vector full of immediates; various special cases are already 5122 // handled, so this is best done with a single constant-pool load. 5123 if (IsAllConstants) 5124 return SDValue(); 5125 5126 // For AVX-length vectors, build the individual 128-bit pieces and use 5127 // shuffles to put them in place. 5128 if (VT.getSizeInBits() == 256 && !ISD::isBuildVectorAllZeros(Op.getNode())) { 5129 SmallVector<SDValue, 32> V; 5130 for (unsigned i = 0; i < NumElems; ++i) 5131 V.push_back(Op.getOperand(i)); 5132 5133 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5134 5135 // Build both the lower and upper subvector. 5136 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5137 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5138 NumElems/2); 5139 5140 // Recreate the wider vector with the lower and upper part. 5141 SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Lower, 5142 DAG.getConstant(0, MVT::i32), DAG, dl); 5143 return Insert128BitVector(Vec, Upper, DAG.getConstant(NumElems/2, MVT::i32), 5144 DAG, dl); 5145 } 5146 5147 // Let legalizer expand 2-wide build_vectors. 5148 if (EVTBits == 64) { 5149 if (NumNonZero == 1) { 5150 // One half is zero or undef. 5151 unsigned Idx = CountTrailingZeros_32(NonZeros); 5152 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5153 Op.getOperand(Idx)); 5154 return getShuffleVectorZeroOrUndef(V2, Idx, true, 5155 Subtarget->hasSSE2(), DAG); 5156 } 5157 return SDValue(); 5158 } 5159 5160 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5161 if (EVTBits == 8 && NumElems == 16) { 5162 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5163 *this); 5164 if (V.getNode()) return V; 5165 } 5166 5167 if (EVTBits == 16 && NumElems == 8) { 5168 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5169 *this); 5170 if (V.getNode()) return V; 5171 } 5172 5173 // If element VT is == 32 bits, turn it into a number of shuffles. 5174 SmallVector<SDValue, 8> V; 5175 V.resize(NumElems); 5176 if (NumElems == 4 && NumZero > 0) { 5177 for (unsigned i = 0; i < 4; ++i) { 5178 bool isZero = !(NonZeros & (1 << i)); 5179 if (isZero) 5180 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 5181 else 5182 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5183 } 5184 5185 for (unsigned i = 0; i < 2; ++i) { 5186 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5187 default: break; 5188 case 0: 5189 V[i] = V[i*2]; // Must be a zero vector. 5190 break; 5191 case 1: 5192 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5193 break; 5194 case 2: 5195 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5196 break; 5197 case 3: 5198 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5199 break; 5200 } 5201 } 5202 5203 SmallVector<int, 8> MaskVec; 5204 bool Reverse = (NonZeros & 0x3) == 2; 5205 for (unsigned i = 0; i < 2; ++i) 5206 MaskVec.push_back(Reverse ? 1-i : i); 5207 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5208 for (unsigned i = 0; i < 2; ++i) 5209 MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems); 5210 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5211 } 5212 5213 if (Values.size() > 1 && VT.getSizeInBits() == 128) { 5214 // Check for a build vector of consecutive loads. 5215 for (unsigned i = 0; i < NumElems; ++i) 5216 V[i] = Op.getOperand(i); 5217 5218 // Check for elements which are consecutive loads. 5219 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5220 if (LD.getNode()) 5221 return LD; 5222 5223 // For SSE 4.1, use insertps to put the high elements into the low element. 5224 if (getSubtarget()->hasSSE41()) { 5225 SDValue Result; 5226 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5227 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5228 else 5229 Result = DAG.getUNDEF(VT); 5230 5231 for (unsigned i = 1; i < NumElems; ++i) { 5232 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5233 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5234 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5235 } 5236 return Result; 5237 } 5238 5239 // Otherwise, expand into a number of unpckl*, start by extending each of 5240 // our (non-undef) elements to the full vector width with the element in the 5241 // bottom slot of the vector (which generates no code for SSE). 5242 for (unsigned i = 0; i < NumElems; ++i) { 5243 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5244 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5245 else 5246 V[i] = DAG.getUNDEF(VT); 5247 } 5248 5249 // Next, we iteratively mix elements, e.g. for v4f32: 5250 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5251 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5252 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5253 unsigned EltStride = NumElems >> 1; 5254 while (EltStride != 0) { 5255 for (unsigned i = 0; i < EltStride; ++i) { 5256 // If V[i+EltStride] is undef and this is the first round of mixing, 5257 // then it is safe to just drop this shuffle: V[i] is already in the 5258 // right place, the one element (since it's the first round) being 5259 // inserted as undef can be dropped. This isn't safe for successive 5260 // rounds because they will permute elements within both vectors. 5261 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5262 EltStride == NumElems/2) 5263 continue; 5264 5265 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5266 } 5267 EltStride >>= 1; 5268 } 5269 return V[0]; 5270 } 5271 return SDValue(); 5272} 5273 5274// LowerMMXCONCAT_VECTORS - We support concatenate two MMX registers and place 5275// them in a MMX register. This is better than doing a stack convert. 5276static SDValue LowerMMXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5277 DebugLoc dl = Op.getDebugLoc(); 5278 EVT ResVT = Op.getValueType(); 5279 5280 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || 5281 ResVT == MVT::v8i16 || ResVT == MVT::v16i8); 5282 int Mask[2]; 5283 SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0)); 5284 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5285 InVec = Op.getOperand(1); 5286 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5287 unsigned NumElts = ResVT.getVectorNumElements(); 5288 VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5289 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, 5290 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); 5291 } else { 5292 InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec); 5293 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5294 Mask[0] = 0; Mask[1] = 2; 5295 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); 5296 } 5297 return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5298} 5299 5300// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5301// to create 256-bit vectors from two other 128-bit ones. 5302static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5303 DebugLoc dl = Op.getDebugLoc(); 5304 EVT ResVT = Op.getValueType(); 5305 5306 assert(ResVT.getSizeInBits() == 256 && "Value type must be 256-bit wide"); 5307 5308 SDValue V1 = Op.getOperand(0); 5309 SDValue V2 = Op.getOperand(1); 5310 unsigned NumElems = ResVT.getVectorNumElements(); 5311 5312 SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, ResVT), V1, 5313 DAG.getConstant(0, MVT::i32), DAG, dl); 5314 return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), 5315 DAG, dl); 5316} 5317 5318SDValue 5319X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 5320 EVT ResVT = Op.getValueType(); 5321 5322 assert(Op.getNumOperands() == 2); 5323 assert((ResVT.getSizeInBits() == 128 || ResVT.getSizeInBits() == 256) && 5324 "Unsupported CONCAT_VECTORS for value type"); 5325 5326 // We support concatenate two MMX registers and place them in a MMX register. 5327 // This is better than doing a stack convert. 5328 if (ResVT.is128BitVector()) 5329 return LowerMMXCONCAT_VECTORS(Op, DAG); 5330 5331 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5332 // from two other 128-bit ones. 5333 return LowerAVXCONCAT_VECTORS(Op, DAG); 5334} 5335 5336// v8i16 shuffles - Prefer shuffles in the following order: 5337// 1. [all] pshuflw, pshufhw, optional move 5338// 2. [ssse3] 1 x pshufb 5339// 3. [ssse3] 2 x pshufb + 1 x por 5340// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5341SDValue 5342X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 5343 SelectionDAG &DAG) const { 5344 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5345 SDValue V1 = SVOp->getOperand(0); 5346 SDValue V2 = SVOp->getOperand(1); 5347 DebugLoc dl = SVOp->getDebugLoc(); 5348 SmallVector<int, 8> MaskVals; 5349 5350 // Determine if more than 1 of the words in each of the low and high quadwords 5351 // of the result come from the same quadword of one of the two inputs. Undef 5352 // mask values count as coming from any quadword, for better codegen. 5353 SmallVector<unsigned, 4> LoQuad(4); 5354 SmallVector<unsigned, 4> HiQuad(4); 5355 BitVector InputQuads(4); 5356 for (unsigned i = 0; i < 8; ++i) { 5357 SmallVectorImpl<unsigned> &Quad = i < 4 ? LoQuad : HiQuad; 5358 int EltIdx = SVOp->getMaskElt(i); 5359 MaskVals.push_back(EltIdx); 5360 if (EltIdx < 0) { 5361 ++Quad[0]; 5362 ++Quad[1]; 5363 ++Quad[2]; 5364 ++Quad[3]; 5365 continue; 5366 } 5367 ++Quad[EltIdx / 4]; 5368 InputQuads.set(EltIdx / 4); 5369 } 5370 5371 int BestLoQuad = -1; 5372 unsigned MaxQuad = 1; 5373 for (unsigned i = 0; i < 4; ++i) { 5374 if (LoQuad[i] > MaxQuad) { 5375 BestLoQuad = i; 5376 MaxQuad = LoQuad[i]; 5377 } 5378 } 5379 5380 int BestHiQuad = -1; 5381 MaxQuad = 1; 5382 for (unsigned i = 0; i < 4; ++i) { 5383 if (HiQuad[i] > MaxQuad) { 5384 BestHiQuad = i; 5385 MaxQuad = HiQuad[i]; 5386 } 5387 } 5388 5389 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5390 // of the two input vectors, shuffle them into one input vector so only a 5391 // single pshufb instruction is necessary. If There are more than 2 input 5392 // quads, disable the next transformation since it does not help SSSE3. 5393 bool V1Used = InputQuads[0] || InputQuads[1]; 5394 bool V2Used = InputQuads[2] || InputQuads[3]; 5395 if (Subtarget->hasSSSE3()) { 5396 if (InputQuads.count() == 2 && V1Used && V2Used) { 5397 BestLoQuad = InputQuads.find_first(); 5398 BestHiQuad = InputQuads.find_next(BestLoQuad); 5399 } 5400 if (InputQuads.count() > 2) { 5401 BestLoQuad = -1; 5402 BestHiQuad = -1; 5403 } 5404 } 5405 5406 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5407 // the shuffle mask. If a quad is scored as -1, that means that it contains 5408 // words from all 4 input quadwords. 5409 SDValue NewV; 5410 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5411 SmallVector<int, 8> MaskV; 5412 MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad); 5413 MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad); 5414 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5415 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5416 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5417 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5418 5419 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5420 // source words for the shuffle, to aid later transformations. 5421 bool AllWordsInNewV = true; 5422 bool InOrder[2] = { true, true }; 5423 for (unsigned i = 0; i != 8; ++i) { 5424 int idx = MaskVals[i]; 5425 if (idx != (int)i) 5426 InOrder[i/4] = false; 5427 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5428 continue; 5429 AllWordsInNewV = false; 5430 break; 5431 } 5432 5433 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5434 if (AllWordsInNewV) { 5435 for (int i = 0; i != 8; ++i) { 5436 int idx = MaskVals[i]; 5437 if (idx < 0) 5438 continue; 5439 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5440 if ((idx != i) && idx < 4) 5441 pshufhw = false; 5442 if ((idx != i) && idx > 3) 5443 pshuflw = false; 5444 } 5445 V1 = NewV; 5446 V2Used = false; 5447 BestLoQuad = 0; 5448 BestHiQuad = 1; 5449 } 5450 5451 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5452 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5453 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5454 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5455 unsigned TargetMask = 0; 5456 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5457 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5458 TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()): 5459 X86::getShufflePSHUFLWImmediate(NewV.getNode()); 5460 V1 = NewV.getOperand(0); 5461 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5462 } 5463 } 5464 5465 // If we have SSSE3, and all words of the result are from 1 input vector, 5466 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5467 // is present, fall back to case 4. 5468 if (Subtarget->hasSSSE3()) { 5469 SmallVector<SDValue,16> pshufbMask; 5470 5471 // If we have elements from both input vectors, set the high bit of the 5472 // shuffle mask element to zero out elements that come from V2 in the V1 5473 // mask, and elements that come from V1 in the V2 mask, so that the two 5474 // results can be OR'd together. 5475 bool TwoInputs = V1Used && V2Used; 5476 for (unsigned i = 0; i != 8; ++i) { 5477 int EltIdx = MaskVals[i] * 2; 5478 if (TwoInputs && (EltIdx >= 16)) { 5479 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5480 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5481 continue; 5482 } 5483 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5484 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); 5485 } 5486 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5487 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5488 DAG.getNode(ISD::BUILD_VECTOR, dl, 5489 MVT::v16i8, &pshufbMask[0], 16)); 5490 if (!TwoInputs) 5491 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5492 5493 // Calculate the shuffle mask for the second input, shuffle it, and 5494 // OR it with the first shuffled input. 5495 pshufbMask.clear(); 5496 for (unsigned i = 0; i != 8; ++i) { 5497 int EltIdx = MaskVals[i] * 2; 5498 if (EltIdx < 16) { 5499 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5500 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5501 continue; 5502 } 5503 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 5504 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); 5505 } 5506 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5507 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5508 DAG.getNode(ISD::BUILD_VECTOR, dl, 5509 MVT::v16i8, &pshufbMask[0], 16)); 5510 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5511 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5512 } 5513 5514 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5515 // and update MaskVals with new element order. 5516 BitVector InOrder(8); 5517 if (BestLoQuad >= 0) { 5518 SmallVector<int, 8> MaskV; 5519 for (int i = 0; i != 4; ++i) { 5520 int idx = MaskVals[i]; 5521 if (idx < 0) { 5522 MaskV.push_back(-1); 5523 InOrder.set(i); 5524 } else if ((idx / 4) == BestLoQuad) { 5525 MaskV.push_back(idx & 3); 5526 InOrder.set(i); 5527 } else { 5528 MaskV.push_back(-1); 5529 } 5530 } 5531 for (unsigned i = 4; i != 8; ++i) 5532 MaskV.push_back(i); 5533 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5534 &MaskV[0]); 5535 5536 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 5537 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5538 NewV.getOperand(0), 5539 X86::getShufflePSHUFLWImmediate(NewV.getNode()), 5540 DAG); 5541 } 5542 5543 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5544 // and update MaskVals with the new element order. 5545 if (BestHiQuad >= 0) { 5546 SmallVector<int, 8> MaskV; 5547 for (unsigned i = 0; i != 4; ++i) 5548 MaskV.push_back(i); 5549 for (unsigned i = 4; i != 8; ++i) { 5550 int idx = MaskVals[i]; 5551 if (idx < 0) { 5552 MaskV.push_back(-1); 5553 InOrder.set(i); 5554 } else if ((idx / 4) == BestHiQuad) { 5555 MaskV.push_back((idx & 3) + 4); 5556 InOrder.set(i); 5557 } else { 5558 MaskV.push_back(-1); 5559 } 5560 } 5561 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5562 &MaskV[0]); 5563 5564 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 5565 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5566 NewV.getOperand(0), 5567 X86::getShufflePSHUFHWImmediate(NewV.getNode()), 5568 DAG); 5569 } 5570 5571 // In case BestHi & BestLo were both -1, which means each quadword has a word 5572 // from each of the four input quadwords, calculate the InOrder bitvector now 5573 // before falling through to the insert/extract cleanup. 5574 if (BestLoQuad == -1 && BestHiQuad == -1) { 5575 NewV = V1; 5576 for (int i = 0; i != 8; ++i) 5577 if (MaskVals[i] < 0 || MaskVals[i] == i) 5578 InOrder.set(i); 5579 } 5580 5581 // The other elements are put in the right place using pextrw and pinsrw. 5582 for (unsigned i = 0; i != 8; ++i) { 5583 if (InOrder[i]) 5584 continue; 5585 int EltIdx = MaskVals[i]; 5586 if (EltIdx < 0) 5587 continue; 5588 SDValue ExtOp = (EltIdx < 8) 5589 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5590 DAG.getIntPtrConstant(EltIdx)) 5591 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5592 DAG.getIntPtrConstant(EltIdx - 8)); 5593 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5594 DAG.getIntPtrConstant(i)); 5595 } 5596 return NewV; 5597} 5598 5599// v16i8 shuffles - Prefer shuffles in the following order: 5600// 1. [ssse3] 1 x pshufb 5601// 2. [ssse3] 2 x pshufb + 1 x por 5602// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5603static 5604SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5605 SelectionDAG &DAG, 5606 const X86TargetLowering &TLI) { 5607 SDValue V1 = SVOp->getOperand(0); 5608 SDValue V2 = SVOp->getOperand(1); 5609 DebugLoc dl = SVOp->getDebugLoc(); 5610 SmallVector<int, 16> MaskVals; 5611 SVOp->getMask(MaskVals); 5612 5613 // If we have SSSE3, case 1 is generated when all result bytes come from 5614 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5615 // present, fall back to case 3. 5616 // FIXME: kill V2Only once shuffles are canonizalized by getNode. 5617 bool V1Only = true; 5618 bool V2Only = true; 5619 for (unsigned i = 0; i < 16; ++i) { 5620 int EltIdx = MaskVals[i]; 5621 if (EltIdx < 0) 5622 continue; 5623 if (EltIdx < 16) 5624 V2Only = false; 5625 else 5626 V1Only = false; 5627 } 5628 5629 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5630 if (TLI.getSubtarget()->hasSSSE3()) { 5631 SmallVector<SDValue,16> pshufbMask; 5632 5633 // If all result elements are from one input vector, then only translate 5634 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5635 // 5636 // Otherwise, we have elements from both input vectors, and must zero out 5637 // elements that come from V2 in the first mask, and V1 in the second mask 5638 // so that we can OR them together. 5639 bool TwoInputs = !(V1Only || V2Only); 5640 for (unsigned i = 0; i != 16; ++i) { 5641 int EltIdx = MaskVals[i]; 5642 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) { 5643 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5644 continue; 5645 } 5646 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5647 } 5648 // If all the elements are from V2, assign it to V1 and return after 5649 // building the first pshufb. 5650 if (V2Only) 5651 V1 = V2; 5652 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5653 DAG.getNode(ISD::BUILD_VECTOR, dl, 5654 MVT::v16i8, &pshufbMask[0], 16)); 5655 if (!TwoInputs) 5656 return V1; 5657 5658 // Calculate the shuffle mask for the second input, shuffle it, and 5659 // OR it with the first shuffled input. 5660 pshufbMask.clear(); 5661 for (unsigned i = 0; i != 16; ++i) { 5662 int EltIdx = MaskVals[i]; 5663 if (EltIdx < 16) { 5664 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5665 continue; 5666 } 5667 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 5668 } 5669 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5670 DAG.getNode(ISD::BUILD_VECTOR, dl, 5671 MVT::v16i8, &pshufbMask[0], 16)); 5672 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5673 } 5674 5675 // No SSSE3 - Calculate in place words and then fix all out of place words 5676 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5677 // the 16 different words that comprise the two doublequadword input vectors. 5678 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5679 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5680 SDValue NewV = V2Only ? V2 : V1; 5681 for (int i = 0; i != 8; ++i) { 5682 int Elt0 = MaskVals[i*2]; 5683 int Elt1 = MaskVals[i*2+1]; 5684 5685 // This word of the result is all undef, skip it. 5686 if (Elt0 < 0 && Elt1 < 0) 5687 continue; 5688 5689 // This word of the result is already in the correct place, skip it. 5690 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1)) 5691 continue; 5692 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17)) 5693 continue; 5694 5695 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 5696 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 5697 SDValue InsElt; 5698 5699 // If Elt0 and Elt1 are defined, are consecutive, and can be load 5700 // using a single extract together, load it and store it. 5701 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 5702 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5703 DAG.getIntPtrConstant(Elt1 / 2)); 5704 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5705 DAG.getIntPtrConstant(i)); 5706 continue; 5707 } 5708 5709 // If Elt1 is defined, extract it from the appropriate source. If the 5710 // source byte is not also odd, shift the extracted word left 8 bits 5711 // otherwise clear the bottom 8 bits if we need to do an or. 5712 if (Elt1 >= 0) { 5713 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5714 DAG.getIntPtrConstant(Elt1 / 2)); 5715 if ((Elt1 & 1) == 0) 5716 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 5717 DAG.getConstant(8, 5718 TLI.getShiftAmountTy(InsElt.getValueType()))); 5719 else if (Elt0 >= 0) 5720 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 5721 DAG.getConstant(0xFF00, MVT::i16)); 5722 } 5723 // If Elt0 is defined, extract it from the appropriate source. If the 5724 // source byte is not also even, shift the extracted word right 8 bits. If 5725 // Elt1 was also defined, OR the extracted values together before 5726 // inserting them in the result. 5727 if (Elt0 >= 0) { 5728 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 5729 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 5730 if ((Elt0 & 1) != 0) 5731 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 5732 DAG.getConstant(8, 5733 TLI.getShiftAmountTy(InsElt0.getValueType()))); 5734 else if (Elt1 >= 0) 5735 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 5736 DAG.getConstant(0x00FF, MVT::i16)); 5737 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 5738 : InsElt0; 5739 } 5740 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5741 DAG.getIntPtrConstant(i)); 5742 } 5743 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 5744} 5745 5746/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 5747/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 5748/// done when every pair / quad of shuffle mask elements point to elements in 5749/// the right sequence. e.g. 5750/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 5751static 5752SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 5753 SelectionDAG &DAG, DebugLoc dl) { 5754 EVT VT = SVOp->getValueType(0); 5755 SDValue V1 = SVOp->getOperand(0); 5756 SDValue V2 = SVOp->getOperand(1); 5757 unsigned NumElems = VT.getVectorNumElements(); 5758 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 5759 EVT NewVT; 5760 switch (VT.getSimpleVT().SimpleTy) { 5761 default: assert(false && "Unexpected!"); 5762 case MVT::v4f32: NewVT = MVT::v2f64; break; 5763 case MVT::v4i32: NewVT = MVT::v2i64; break; 5764 case MVT::v8i16: NewVT = MVT::v4i32; break; 5765 case MVT::v16i8: NewVT = MVT::v4i32; break; 5766 } 5767 5768 int Scale = NumElems / NewWidth; 5769 SmallVector<int, 8> MaskVec; 5770 for (unsigned i = 0; i < NumElems; i += Scale) { 5771 int StartIdx = -1; 5772 for (int j = 0; j < Scale; ++j) { 5773 int EltIdx = SVOp->getMaskElt(i+j); 5774 if (EltIdx < 0) 5775 continue; 5776 if (StartIdx == -1) 5777 StartIdx = EltIdx - (EltIdx % Scale); 5778 if (EltIdx != StartIdx + j) 5779 return SDValue(); 5780 } 5781 if (StartIdx == -1) 5782 MaskVec.push_back(-1); 5783 else 5784 MaskVec.push_back(StartIdx / Scale); 5785 } 5786 5787 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1); 5788 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2); 5789 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 5790} 5791 5792/// getVZextMovL - Return a zero-extending vector move low node. 5793/// 5794static SDValue getVZextMovL(EVT VT, EVT OpVT, 5795 SDValue SrcOp, SelectionDAG &DAG, 5796 const X86Subtarget *Subtarget, DebugLoc dl) { 5797 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 5798 LoadSDNode *LD = NULL; 5799 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 5800 LD = dyn_cast<LoadSDNode>(SrcOp); 5801 if (!LD) { 5802 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 5803 // instead. 5804 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 5805 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 5806 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 5807 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 5808 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 5809 // PR2108 5810 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 5811 return DAG.getNode(ISD::BITCAST, dl, VT, 5812 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5813 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5814 OpVT, 5815 SrcOp.getOperand(0) 5816 .getOperand(0)))); 5817 } 5818 } 5819 } 5820 5821 return DAG.getNode(ISD::BITCAST, dl, VT, 5822 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5823 DAG.getNode(ISD::BITCAST, dl, 5824 OpVT, SrcOp))); 5825} 5826 5827/// areShuffleHalvesWithinDisjointLanes - Check whether each half of a vector 5828/// shuffle node referes to only one lane in the sources. 5829static bool areShuffleHalvesWithinDisjointLanes(ShuffleVectorSDNode *SVOp) { 5830 EVT VT = SVOp->getValueType(0); 5831 int NumElems = VT.getVectorNumElements(); 5832 int HalfSize = NumElems/2; 5833 SmallVector<int, 16> M; 5834 SVOp->getMask(M); 5835 bool MatchA = false, MatchB = false; 5836 5837 for (int l = 0; l < NumElems*2; l += HalfSize) { 5838 if (isUndefOrInRange(M, 0, HalfSize, l, l+HalfSize)) { 5839 MatchA = true; 5840 break; 5841 } 5842 } 5843 5844 for (int l = 0; l < NumElems*2; l += HalfSize) { 5845 if (isUndefOrInRange(M, HalfSize, HalfSize, l, l+HalfSize)) { 5846 MatchB = true; 5847 break; 5848 } 5849 } 5850 5851 return MatchA && MatchB; 5852} 5853 5854/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 5855/// which could not be matched by any known target speficic shuffle 5856static SDValue 5857LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 5858 if (areShuffleHalvesWithinDisjointLanes(SVOp)) { 5859 // If each half of a vector shuffle node referes to only one lane in the 5860 // source vectors, extract each used 128-bit lane and shuffle them using 5861 // 128-bit shuffles. Then, concatenate the results. Otherwise leave 5862 // the work to the legalizer. 5863 DebugLoc dl = SVOp->getDebugLoc(); 5864 EVT VT = SVOp->getValueType(0); 5865 int NumElems = VT.getVectorNumElements(); 5866 int HalfSize = NumElems/2; 5867 5868 // Extract the reference for each half 5869 int FstVecExtractIdx = 0, SndVecExtractIdx = 0; 5870 int FstVecOpNum = 0, SndVecOpNum = 0; 5871 for (int i = 0; i < HalfSize; ++i) { 5872 int Elt = SVOp->getMaskElt(i); 5873 if (SVOp->getMaskElt(i) < 0) 5874 continue; 5875 FstVecOpNum = Elt/NumElems; 5876 FstVecExtractIdx = Elt % NumElems < HalfSize ? 0 : HalfSize; 5877 break; 5878 } 5879 for (int i = HalfSize; i < NumElems; ++i) { 5880 int Elt = SVOp->getMaskElt(i); 5881 if (SVOp->getMaskElt(i) < 0) 5882 continue; 5883 SndVecOpNum = Elt/NumElems; 5884 SndVecExtractIdx = Elt % NumElems < HalfSize ? 0 : HalfSize; 5885 break; 5886 } 5887 5888 // Extract the subvectors 5889 SDValue V1 = Extract128BitVector(SVOp->getOperand(FstVecOpNum), 5890 DAG.getConstant(FstVecExtractIdx, MVT::i32), DAG, dl); 5891 SDValue V2 = Extract128BitVector(SVOp->getOperand(SndVecOpNum), 5892 DAG.getConstant(SndVecExtractIdx, MVT::i32), DAG, dl); 5893 5894 // Generate 128-bit shuffles 5895 SmallVector<int, 16> MaskV1, MaskV2; 5896 for (int i = 0; i < HalfSize; ++i) { 5897 int Elt = SVOp->getMaskElt(i); 5898 MaskV1.push_back(Elt < 0 ? Elt : Elt % HalfSize); 5899 } 5900 for (int i = HalfSize; i < NumElems; ++i) { 5901 int Elt = SVOp->getMaskElt(i); 5902 MaskV2.push_back(Elt < 0 ? Elt : Elt % HalfSize); 5903 } 5904 5905 EVT NVT = V1.getValueType(); 5906 V1 = DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &MaskV1[0]); 5907 V2 = DAG.getVectorShuffle(NVT, dl, V2, DAG.getUNDEF(NVT), &MaskV2[0]); 5908 5909 // Concatenate the result back 5910 SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), V1, 5911 DAG.getConstant(0, MVT::i32), DAG, dl); 5912 return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), 5913 DAG, dl); 5914 } 5915 5916 return SDValue(); 5917} 5918 5919/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 5920/// 4 elements, and match them with several different shuffle types. 5921static SDValue 5922LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 5923 SDValue V1 = SVOp->getOperand(0); 5924 SDValue V2 = SVOp->getOperand(1); 5925 DebugLoc dl = SVOp->getDebugLoc(); 5926 EVT VT = SVOp->getValueType(0); 5927 5928 assert(VT.getSizeInBits() == 128 && "Unsupported vector size"); 5929 5930 SmallVector<std::pair<int, int>, 8> Locs; 5931 Locs.resize(4); 5932 SmallVector<int, 8> Mask1(4U, -1); 5933 SmallVector<int, 8> PermMask; 5934 SVOp->getMask(PermMask); 5935 5936 unsigned NumHi = 0; 5937 unsigned NumLo = 0; 5938 for (unsigned i = 0; i != 4; ++i) { 5939 int Idx = PermMask[i]; 5940 if (Idx < 0) { 5941 Locs[i] = std::make_pair(-1, -1); 5942 } else { 5943 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 5944 if (Idx < 4) { 5945 Locs[i] = std::make_pair(0, NumLo); 5946 Mask1[NumLo] = Idx; 5947 NumLo++; 5948 } else { 5949 Locs[i] = std::make_pair(1, NumHi); 5950 if (2+NumHi < 4) 5951 Mask1[2+NumHi] = Idx; 5952 NumHi++; 5953 } 5954 } 5955 } 5956 5957 if (NumLo <= 2 && NumHi <= 2) { 5958 // If no more than two elements come from either vector. This can be 5959 // implemented with two shuffles. First shuffle gather the elements. 5960 // The second shuffle, which takes the first shuffle as both of its 5961 // vector operands, put the elements into the right order. 5962 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5963 5964 SmallVector<int, 8> Mask2(4U, -1); 5965 5966 for (unsigned i = 0; i != 4; ++i) { 5967 if (Locs[i].first == -1) 5968 continue; 5969 else { 5970 unsigned Idx = (i < 2) ? 0 : 4; 5971 Idx += Locs[i].first * 2 + Locs[i].second; 5972 Mask2[i] = Idx; 5973 } 5974 } 5975 5976 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 5977 } else if (NumLo == 3 || NumHi == 3) { 5978 // Otherwise, we must have three elements from one vector, call it X, and 5979 // one element from the other, call it Y. First, use a shufps to build an 5980 // intermediate vector with the one element from Y and the element from X 5981 // that will be in the same half in the final destination (the indexes don't 5982 // matter). Then, use a shufps to build the final vector, taking the half 5983 // containing the element from Y from the intermediate, and the other half 5984 // from X. 5985 if (NumHi == 3) { 5986 // Normalize it so the 3 elements come from V1. 5987 CommuteVectorShuffleMask(PermMask, VT); 5988 std::swap(V1, V2); 5989 } 5990 5991 // Find the element from V2. 5992 unsigned HiIndex; 5993 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 5994 int Val = PermMask[HiIndex]; 5995 if (Val < 0) 5996 continue; 5997 if (Val >= 4) 5998 break; 5999 } 6000 6001 Mask1[0] = PermMask[HiIndex]; 6002 Mask1[1] = -1; 6003 Mask1[2] = PermMask[HiIndex^1]; 6004 Mask1[3] = -1; 6005 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6006 6007 if (HiIndex >= 2) { 6008 Mask1[0] = PermMask[0]; 6009 Mask1[1] = PermMask[1]; 6010 Mask1[2] = HiIndex & 1 ? 6 : 4; 6011 Mask1[3] = HiIndex & 1 ? 4 : 6; 6012 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6013 } else { 6014 Mask1[0] = HiIndex & 1 ? 2 : 0; 6015 Mask1[1] = HiIndex & 1 ? 0 : 2; 6016 Mask1[2] = PermMask[2]; 6017 Mask1[3] = PermMask[3]; 6018 if (Mask1[2] >= 0) 6019 Mask1[2] += 4; 6020 if (Mask1[3] >= 0) 6021 Mask1[3] += 4; 6022 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6023 } 6024 } 6025 6026 // Break it into (shuffle shuffle_hi, shuffle_lo). 6027 Locs.clear(); 6028 Locs.resize(4); 6029 SmallVector<int,8> LoMask(4U, -1); 6030 SmallVector<int,8> HiMask(4U, -1); 6031 6032 SmallVector<int,8> *MaskPtr = &LoMask; 6033 unsigned MaskIdx = 0; 6034 unsigned LoIdx = 0; 6035 unsigned HiIdx = 2; 6036 for (unsigned i = 0; i != 4; ++i) { 6037 if (i == 2) { 6038 MaskPtr = &HiMask; 6039 MaskIdx = 1; 6040 LoIdx = 0; 6041 HiIdx = 2; 6042 } 6043 int Idx = PermMask[i]; 6044 if (Idx < 0) { 6045 Locs[i] = std::make_pair(-1, -1); 6046 } else if (Idx < 4) { 6047 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6048 (*MaskPtr)[LoIdx] = Idx; 6049 LoIdx++; 6050 } else { 6051 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6052 (*MaskPtr)[HiIdx] = Idx; 6053 HiIdx++; 6054 } 6055 } 6056 6057 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6058 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6059 SmallVector<int, 8> MaskOps; 6060 for (unsigned i = 0; i != 4; ++i) { 6061 if (Locs[i].first == -1) { 6062 MaskOps.push_back(-1); 6063 } else { 6064 unsigned Idx = Locs[i].first * 4 + Locs[i].second; 6065 MaskOps.push_back(Idx); 6066 } 6067 } 6068 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6069} 6070 6071static bool MayFoldVectorLoad(SDValue V) { 6072 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6073 V = V.getOperand(0); 6074 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6075 V = V.getOperand(0); 6076 if (MayFoldLoad(V)) 6077 return true; 6078 return false; 6079} 6080 6081// FIXME: the version above should always be used. Since there's 6082// a bug where several vector shuffles can't be folded because the 6083// DAG is not updated during lowering and a node claims to have two 6084// uses while it only has one, use this version, and let isel match 6085// another instruction if the load really happens to have more than 6086// one use. Remove this version after this bug get fixed. 6087// rdar://8434668, PR8156 6088static bool RelaxedMayFoldVectorLoad(SDValue V) { 6089 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6090 V = V.getOperand(0); 6091 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6092 V = V.getOperand(0); 6093 if (ISD::isNormalLoad(V.getNode())) 6094 return true; 6095 return false; 6096} 6097 6098/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by 6099/// a vector extract, and if both can be later optimized into a single load. 6100/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked 6101/// here because otherwise a target specific shuffle node is going to be 6102/// emitted for this shuffle, and the optimization not done. 6103/// FIXME: This is probably not the best approach, but fix the problem 6104/// until the right path is decided. 6105static 6106bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG, 6107 const TargetLowering &TLI) { 6108 EVT VT = V.getValueType(); 6109 ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V); 6110 6111 // Be sure that the vector shuffle is present in a pattern like this: 6112 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr) 6113 if (!V.hasOneUse()) 6114 return false; 6115 6116 SDNode *N = *V.getNode()->use_begin(); 6117 if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6118 return false; 6119 6120 SDValue EltNo = N->getOperand(1); 6121 if (!isa<ConstantSDNode>(EltNo)) 6122 return false; 6123 6124 // If the bit convert changed the number of elements, it is unsafe 6125 // to examine the mask. 6126 bool HasShuffleIntoBitcast = false; 6127 if (V.getOpcode() == ISD::BITCAST) { 6128 EVT SrcVT = V.getOperand(0).getValueType(); 6129 if (SrcVT.getVectorNumElements() != VT.getVectorNumElements()) 6130 return false; 6131 V = V.getOperand(0); 6132 HasShuffleIntoBitcast = true; 6133 } 6134 6135 // Select the input vector, guarding against out of range extract vector. 6136 unsigned NumElems = VT.getVectorNumElements(); 6137 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 6138 int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt); 6139 V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1); 6140 6141 // Skip one more bit_convert if necessary 6142 if (V.getOpcode() == ISD::BITCAST) 6143 V = V.getOperand(0); 6144 6145 if (ISD::isNormalLoad(V.getNode())) { 6146 // Is the original load suitable? 6147 LoadSDNode *LN0 = cast<LoadSDNode>(V); 6148 6149 // FIXME: avoid the multi-use bug that is preventing lots of 6150 // of foldings to be detected, this is still wrong of course, but 6151 // give the temporary desired behavior, and if it happens that 6152 // the load has real more uses, during isel it will not fold, and 6153 // will generate poor code. 6154 if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse() 6155 return false; 6156 6157 if (!HasShuffleIntoBitcast) 6158 return true; 6159 6160 // If there's a bitcast before the shuffle, check if the load type and 6161 // alignment is valid. 6162 unsigned Align = LN0->getAlignment(); 6163 unsigned NewAlign = 6164 TLI.getTargetData()->getABITypeAlignment( 6165 VT.getTypeForEVT(*DAG.getContext())); 6166 6167 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 6168 return false; 6169 } 6170 6171 return true; 6172} 6173 6174static 6175SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6176 EVT VT = Op.getValueType(); 6177 6178 // Canonizalize to v2f64. 6179 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6180 return DAG.getNode(ISD::BITCAST, dl, VT, 6181 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6182 V1, DAG)); 6183} 6184 6185static 6186SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6187 bool HasSSE2) { 6188 SDValue V1 = Op.getOperand(0); 6189 SDValue V2 = Op.getOperand(1); 6190 EVT VT = Op.getValueType(); 6191 6192 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6193 6194 if (HasSSE2 && VT == MVT::v2f64) 6195 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6196 6197 // v4f32 or v4i32 6198 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V2, DAG); 6199} 6200 6201static 6202SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6203 SDValue V1 = Op.getOperand(0); 6204 SDValue V2 = Op.getOperand(1); 6205 EVT VT = Op.getValueType(); 6206 6207 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6208 "unsupported shuffle type"); 6209 6210 if (V2.getOpcode() == ISD::UNDEF) 6211 V2 = V1; 6212 6213 // v4i32 or v4f32 6214 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6215} 6216 6217static inline unsigned getSHUFPOpcode(EVT VT) { 6218 switch(VT.getSimpleVT().SimpleTy) { 6219 case MVT::v8i32: // Use fp unit for int unpack. 6220 case MVT::v8f32: 6221 case MVT::v4i32: // Use fp unit for int unpack. 6222 case MVT::v4f32: return X86ISD::SHUFPS; 6223 case MVT::v4i64: // Use fp unit for int unpack. 6224 case MVT::v4f64: 6225 case MVT::v2i64: // Use fp unit for int unpack. 6226 case MVT::v2f64: return X86ISD::SHUFPD; 6227 default: 6228 llvm_unreachable("Unknown type for shufp*"); 6229 } 6230 return 0; 6231} 6232 6233static 6234SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6235 SDValue V1 = Op.getOperand(0); 6236 SDValue V2 = Op.getOperand(1); 6237 EVT VT = Op.getValueType(); 6238 unsigned NumElems = VT.getVectorNumElements(); 6239 6240 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6241 // operand of these instructions is only memory, so check if there's a 6242 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6243 // same masks. 6244 bool CanFoldLoad = false; 6245 6246 // Trivial case, when V2 comes from a load. 6247 if (MayFoldVectorLoad(V2)) 6248 CanFoldLoad = true; 6249 6250 // When V1 is a load, it can be folded later into a store in isel, example: 6251 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6252 // turns into: 6253 // (MOVLPSmr addr:$src1, VR128:$src2) 6254 // So, recognize this potential and also use MOVLPS or MOVLPD 6255 if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6256 CanFoldLoad = true; 6257 6258 // Both of them can't be memory operations though. 6259 if (MayFoldVectorLoad(V1) && MayFoldVectorLoad(V2)) 6260 CanFoldLoad = false; 6261 6262 if (CanFoldLoad) { 6263 if (HasSSE2 && NumElems == 2) 6264 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6265 6266 if (NumElems == 4) 6267 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6268 } 6269 6270 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6271 // movl and movlp will both match v2i64, but v2i64 is never matched by 6272 // movl earlier because we make it strict to avoid messing with the movlp load 6273 // folding logic (see the code above getMOVLP call). Match it here then, 6274 // this is horrible, but will stay like this until we move all shuffle 6275 // matching to x86 specific nodes. Note that for the 1st condition all 6276 // types are matched with movsd. 6277 if ((HasSSE2 && NumElems == 2) || !X86::isMOVLMask(SVOp)) 6278 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6279 else if (HasSSE2) 6280 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6281 6282 6283 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6284 6285 // Invert the operand order and use SHUFPS to match it. 6286 return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V2, V1, 6287 X86::getShuffleSHUFImmediate(SVOp), DAG); 6288} 6289 6290static inline unsigned getUNPCKLOpcode(EVT VT) { 6291 switch(VT.getSimpleVT().SimpleTy) { 6292 case MVT::v4i32: return X86ISD::PUNPCKLDQ; 6293 case MVT::v2i64: return X86ISD::PUNPCKLQDQ; 6294 case MVT::v4f32: return X86ISD::UNPCKLPS; 6295 case MVT::v2f64: return X86ISD::UNPCKLPD; 6296 case MVT::v8i32: // Use fp unit for int unpack. 6297 case MVT::v8f32: return X86ISD::VUNPCKLPSY; 6298 case MVT::v4i64: // Use fp unit for int unpack. 6299 case MVT::v4f64: return X86ISD::VUNPCKLPDY; 6300 case MVT::v16i8: return X86ISD::PUNPCKLBW; 6301 case MVT::v8i16: return X86ISD::PUNPCKLWD; 6302 default: 6303 llvm_unreachable("Unknown type for unpckl"); 6304 } 6305 return 0; 6306} 6307 6308static inline unsigned getUNPCKHOpcode(EVT VT) { 6309 switch(VT.getSimpleVT().SimpleTy) { 6310 case MVT::v4i32: return X86ISD::PUNPCKHDQ; 6311 case MVT::v2i64: return X86ISD::PUNPCKHQDQ; 6312 case MVT::v4f32: return X86ISD::UNPCKHPS; 6313 case MVT::v2f64: return X86ISD::UNPCKHPD; 6314 case MVT::v8i32: // Use fp unit for int unpack. 6315 case MVT::v8f32: return X86ISD::VUNPCKHPSY; 6316 case MVT::v4i64: // Use fp unit for int unpack. 6317 case MVT::v4f64: return X86ISD::VUNPCKHPDY; 6318 case MVT::v16i8: return X86ISD::PUNPCKHBW; 6319 case MVT::v8i16: return X86ISD::PUNPCKHWD; 6320 default: 6321 llvm_unreachable("Unknown type for unpckh"); 6322 } 6323 return 0; 6324} 6325 6326static inline unsigned getVPERMILOpcode(EVT VT) { 6327 switch(VT.getSimpleVT().SimpleTy) { 6328 case MVT::v4i32: 6329 case MVT::v4f32: return X86ISD::VPERMILPS; 6330 case MVT::v2i64: 6331 case MVT::v2f64: return X86ISD::VPERMILPD; 6332 case MVT::v8i32: 6333 case MVT::v8f32: return X86ISD::VPERMILPSY; 6334 case MVT::v4i64: 6335 case MVT::v4f64: return X86ISD::VPERMILPDY; 6336 default: 6337 llvm_unreachable("Unknown type for vpermil"); 6338 } 6339 return 0; 6340} 6341 6342/// isVectorBroadcast - Check if the node chain is suitable to be xformed to 6343/// a vbroadcast node. The nodes are suitable whenever we can fold a load coming 6344/// from a 32 or 64 bit scalar. Update Op to the desired load to be folded. 6345static bool isVectorBroadcast(SDValue &Op) { 6346 EVT VT = Op.getValueType(); 6347 bool Is256 = VT.getSizeInBits() == 256; 6348 6349 assert((VT.getSizeInBits() == 128 || Is256) && 6350 "Unsupported type for vbroadcast node"); 6351 6352 SDValue V = Op; 6353 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6354 V = V.getOperand(0); 6355 6356 if (Is256 && !(V.hasOneUse() && 6357 V.getOpcode() == ISD::INSERT_SUBVECTOR && 6358 V.getOperand(0).getOpcode() == ISD::UNDEF)) 6359 return false; 6360 6361 if (Is256) 6362 V = V.getOperand(1); 6363 if (V.hasOneUse() && V.getOpcode() != ISD::SCALAR_TO_VECTOR) 6364 return false; 6365 6366 // Check the source scalar_to_vector type. 256-bit broadcasts are 6367 // supported for 32/64-bit sizes, while 128-bit ones are only supported 6368 // for 32-bit scalars. 6369 unsigned ScalarSize = V.getOperand(0).getValueType().getSizeInBits(); 6370 if (ScalarSize != 32 && ScalarSize != 64) 6371 return false; 6372 if (!Is256 && ScalarSize == 64) 6373 return false; 6374 6375 V = V.getOperand(0); 6376 if (!MayFoldLoad(V)) 6377 return false; 6378 6379 // Return the load node 6380 Op = V; 6381 return true; 6382} 6383 6384static 6385SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG, 6386 const TargetLowering &TLI, 6387 const X86Subtarget *Subtarget) { 6388 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6389 EVT VT = Op.getValueType(); 6390 DebugLoc dl = Op.getDebugLoc(); 6391 SDValue V1 = Op.getOperand(0); 6392 SDValue V2 = Op.getOperand(1); 6393 6394 if (isZeroShuffle(SVOp)) 6395 return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 6396 6397 // Handle splat operations 6398 if (SVOp->isSplat()) { 6399 unsigned NumElem = VT.getVectorNumElements(); 6400 int Size = VT.getSizeInBits(); 6401 // Special case, this is the only place now where it's allowed to return 6402 // a vector_shuffle operation without using a target specific node, because 6403 // *hopefully* it will be optimized away by the dag combiner. FIXME: should 6404 // this be moved to DAGCombine instead? 6405 if (NumElem <= 4 && CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI)) 6406 return Op; 6407 6408 // Use vbroadcast whenever the splat comes from a foldable load 6409 if (Subtarget->hasAVX() && isVectorBroadcast(V1)) 6410 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, V1); 6411 6412 // Handle splats by matching through known shuffle masks 6413 if ((Size == 128 && NumElem <= 4) || 6414 (Size == 256 && NumElem < 8)) 6415 return SDValue(); 6416 6417 // All remaning splats are promoted to target supported vector shuffles. 6418 return PromoteSplat(SVOp, DAG); 6419 } 6420 6421 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6422 // do it! 6423 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 6424 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6425 if (NewOp.getNode()) 6426 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6427 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6428 // FIXME: Figure out a cleaner way to do this. 6429 // Try to make use of movq to zero out the top part. 6430 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6431 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6432 if (NewOp.getNode()) { 6433 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false)) 6434 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0), 6435 DAG, Subtarget, dl); 6436 } 6437 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6438 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6439 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp))) 6440 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), 6441 DAG, Subtarget, dl); 6442 } 6443 } 6444 return SDValue(); 6445} 6446 6447SDValue 6448X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6449 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6450 SDValue V1 = Op.getOperand(0); 6451 SDValue V2 = Op.getOperand(1); 6452 EVT VT = Op.getValueType(); 6453 DebugLoc dl = Op.getDebugLoc(); 6454 unsigned NumElems = VT.getVectorNumElements(); 6455 bool isMMX = VT.getSizeInBits() == 64; 6456 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6457 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6458 bool V1IsSplat = false; 6459 bool V2IsSplat = false; 6460 bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX(); 6461 bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX(); 6462 bool HasSSSE3 = Subtarget->hasSSSE3() || Subtarget->hasAVX(); 6463 MachineFunction &MF = DAG.getMachineFunction(); 6464 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 6465 6466 // Shuffle operations on MMX not supported. 6467 if (isMMX) 6468 return Op; 6469 6470 // Vector shuffle lowering takes 3 steps: 6471 // 6472 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6473 // narrowing and commutation of operands should be handled. 6474 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6475 // shuffle nodes. 6476 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6477 // so the shuffle can be broken into other shuffles and the legalizer can 6478 // try the lowering again. 6479 // 6480 // The general ideia is that no vector_shuffle operation should be left to 6481 // be matched during isel, all of them must be converted to a target specific 6482 // node here. 6483 6484 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6485 // narrowing and commutation of operands should be handled. The actual code 6486 // doesn't include all of those, work in progress... 6487 SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget); 6488 if (NewOp.getNode()) 6489 return NewOp; 6490 6491 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6492 // unpckh_undef). Only use pshufd if speed is more important than size. 6493 if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp)) 6494 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); 6495 if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp)) 6496 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 6497 6498 if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef && 6499 RelaxedMayFoldVectorLoad(V1)) 6500 return getMOVDDup(Op, dl, V1, DAG); 6501 6502 if (X86::isMOVHLPS_v_undef_Mask(SVOp)) 6503 return getMOVHighToLow(Op, dl, DAG); 6504 6505 // Use to match splats 6506 if (HasSSE2 && X86::isUNPCKHMask(SVOp) && V2IsUndef && 6507 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6508 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 6509 6510 if (X86::isPSHUFDMask(SVOp)) { 6511 // The actual implementation will match the mask in the if above and then 6512 // during isel it can match several different instructions, not only pshufd 6513 // as its name says, sad but true, emulate the behavior for now... 6514 if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6515 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6516 6517 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 6518 6519 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6520 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6521 6522 return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V1, 6523 TargetMask, DAG); 6524 } 6525 6526 // Check if this can be converted into a logical shift. 6527 bool isLeft = false; 6528 unsigned ShAmt = 0; 6529 SDValue ShVal; 6530 bool isShift = getSubtarget()->hasSSE2() && 6531 isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6532 if (isShift && ShVal.hasOneUse()) { 6533 // If the shifted value has multiple uses, it may be cheaper to use 6534 // v_set0 + movlhps or movhlps, etc. 6535 EVT EltVT = VT.getVectorElementType(); 6536 ShAmt *= EltVT.getSizeInBits(); 6537 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6538 } 6539 6540 if (X86::isMOVLMask(SVOp)) { 6541 if (V1IsUndef) 6542 return V2; 6543 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6544 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6545 if (!X86::isMOVLPMask(SVOp)) { 6546 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6547 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6548 6549 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6550 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6551 } 6552 } 6553 6554 // FIXME: fold these into legal mask. 6555 if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp)) 6556 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6557 6558 if (X86::isMOVHLPSMask(SVOp)) 6559 return getMOVHighToLow(Op, dl, DAG); 6560 6561 if (X86::isMOVSHDUPMask(SVOp, Subtarget)) 6562 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6563 6564 if (X86::isMOVSLDUPMask(SVOp, Subtarget)) 6565 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6566 6567 if (X86::isMOVLPMask(SVOp)) 6568 return getMOVLP(Op, dl, DAG, HasSSE2); 6569 6570 if (ShouldXformToMOVHLPS(SVOp) || 6571 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) 6572 return CommuteVectorShuffle(SVOp, DAG); 6573 6574 if (isShift) { 6575 // No better options. Use a vshl / vsrl. 6576 EVT EltVT = VT.getVectorElementType(); 6577 ShAmt *= EltVT.getSizeInBits(); 6578 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6579 } 6580 6581 bool Commuted = false; 6582 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6583 // 1,1,1,1 -> v8i16 though. 6584 V1IsSplat = isSplatVector(V1.getNode()); 6585 V2IsSplat = isSplatVector(V2.getNode()); 6586 6587 // Canonicalize the splat or undef, if present, to be on the RHS. 6588 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 6589 Op = CommuteVectorShuffle(SVOp, DAG); 6590 SVOp = cast<ShuffleVectorSDNode>(Op); 6591 V1 = SVOp->getOperand(0); 6592 V2 = SVOp->getOperand(1); 6593 std::swap(V1IsSplat, V2IsSplat); 6594 std::swap(V1IsUndef, V2IsUndef); 6595 Commuted = true; 6596 } 6597 6598 if (isCommutedMOVL(SVOp, V2IsSplat, V2IsUndef)) { 6599 // Shuffling low element of v1 into undef, just return v1. 6600 if (V2IsUndef) 6601 return V1; 6602 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6603 // the instruction selector will not match, so get a canonical MOVL with 6604 // swapped operands to undo the commute. 6605 return getMOVL(DAG, dl, VT, V2, V1); 6606 } 6607 6608 if (X86::isUNPCKLMask(SVOp)) 6609 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG); 6610 6611 if (X86::isUNPCKHMask(SVOp)) 6612 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG); 6613 6614 if (V2IsSplat) { 6615 // Normalize mask so all entries that point to V2 points to its first 6616 // element then try to match unpck{h|l} again. If match, return a 6617 // new vector_shuffle with the corrected mask. 6618 SDValue NewMask = NormalizeMask(SVOp, DAG); 6619 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask); 6620 if (NSVOp != SVOp) { 6621 if (X86::isUNPCKLMask(NSVOp, true)) { 6622 return NewMask; 6623 } else if (X86::isUNPCKHMask(NSVOp, true)) { 6624 return NewMask; 6625 } 6626 } 6627 } 6628 6629 if (Commuted) { 6630 // Commute is back and try unpck* again. 6631 // FIXME: this seems wrong. 6632 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG); 6633 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); 6634 6635 if (X86::isUNPCKLMask(NewSVOp)) 6636 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG); 6637 6638 if (X86::isUNPCKHMask(NewSVOp)) 6639 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG); 6640 } 6641 6642 // Normalize the node to match x86 shuffle ops if needed 6643 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp)) 6644 return CommuteVectorShuffle(SVOp, DAG); 6645 6646 // The checks below are all present in isShuffleMaskLegal, but they are 6647 // inlined here right now to enable us to directly emit target specific 6648 // nodes, and remove one by one until they don't return Op anymore. 6649 SmallVector<int, 16> M; 6650 SVOp->getMask(M); 6651 6652 if (isPALIGNRMask(M, VT, HasSSSE3)) 6653 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 6654 X86::getShufflePALIGNRImmediate(SVOp), 6655 DAG); 6656 6657 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 6658 SVOp->getSplatIndex() == 0 && V2IsUndef) { 6659 if (VT == MVT::v2f64) 6660 return getTargetShuffleNode(X86ISD::UNPCKLPD, dl, VT, V1, V1, DAG); 6661 if (VT == MVT::v2i64) 6662 return getTargetShuffleNode(X86ISD::PUNPCKLQDQ, dl, VT, V1, V1, DAG); 6663 } 6664 6665 if (isPSHUFHWMask(M, VT)) 6666 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 6667 X86::getShufflePSHUFHWImmediate(SVOp), 6668 DAG); 6669 6670 if (isPSHUFLWMask(M, VT)) 6671 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 6672 X86::getShufflePSHUFLWImmediate(SVOp), 6673 DAG); 6674 6675 if (isSHUFPMask(M, VT)) 6676 return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V2, 6677 X86::getShuffleSHUFImmediate(SVOp), DAG); 6678 6679 if (X86::isUNPCKL_v_undef_Mask(SVOp)) 6680 return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); 6681 if (X86::isUNPCKH_v_undef_Mask(SVOp)) 6682 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 6683 6684 //===--------------------------------------------------------------------===// 6685 // Generate target specific nodes for 128 or 256-bit shuffles only 6686 // supported in the AVX instruction set. 6687 // 6688 6689 // Handle VPERMILPS* permutations 6690 if (isVPERMILPSMask(M, VT, Subtarget)) 6691 return getTargetShuffleNode(getVPERMILOpcode(VT), dl, VT, V1, 6692 getShuffleVPERMILPSImmediate(SVOp), DAG); 6693 6694 // Handle VPERMILPD* permutations 6695 if (isVPERMILPDMask(M, VT, Subtarget)) 6696 return getTargetShuffleNode(getVPERMILOpcode(VT), dl, VT, V1, 6697 getShuffleVPERMILPDImmediate(SVOp), DAG); 6698 6699 // Handle VPERM2F128 permutations 6700 if (isVPERM2F128Mask(M, VT, Subtarget)) 6701 return getTargetShuffleNode(X86ISD::VPERM2F128, dl, VT, V1, V2, 6702 getShuffleVPERM2F128Immediate(SVOp), DAG); 6703 6704 // Handle VSHUFPSY permutations 6705 if (isVSHUFPSYMask(M, VT, Subtarget)) 6706 return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V2, 6707 getShuffleVSHUFPSYImmediate(SVOp), DAG); 6708 6709 // Handle VSHUFPDY permutations 6710 if (isVSHUFPDYMask(M, VT, Subtarget)) 6711 return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V2, 6712 getShuffleVSHUFPDYImmediate(SVOp), DAG); 6713 6714 //===--------------------------------------------------------------------===// 6715 // Since no target specific shuffle was selected for this generic one, 6716 // lower it into other known shuffles. FIXME: this isn't true yet, but 6717 // this is the plan. 6718 // 6719 6720 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 6721 if (VT == MVT::v8i16) { 6722 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); 6723 if (NewOp.getNode()) 6724 return NewOp; 6725 } 6726 6727 if (VT == MVT::v16i8) { 6728 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 6729 if (NewOp.getNode()) 6730 return NewOp; 6731 } 6732 6733 // Handle all 128-bit wide vectors with 4 elements, and match them with 6734 // several different shuffle types. 6735 if (NumElems == 4 && VT.getSizeInBits() == 128) 6736 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 6737 6738 // Handle general 256-bit shuffles 6739 if (VT.is256BitVector()) 6740 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 6741 6742 return SDValue(); 6743} 6744 6745SDValue 6746X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 6747 SelectionDAG &DAG) const { 6748 EVT VT = Op.getValueType(); 6749 DebugLoc dl = Op.getDebugLoc(); 6750 6751 if (Op.getOperand(0).getValueType().getSizeInBits() != 128) 6752 return SDValue(); 6753 6754 if (VT.getSizeInBits() == 8) { 6755 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 6756 Op.getOperand(0), Op.getOperand(1)); 6757 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6758 DAG.getValueType(VT)); 6759 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6760 } else if (VT.getSizeInBits() == 16) { 6761 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6762 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 6763 if (Idx == 0) 6764 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6765 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6766 DAG.getNode(ISD::BITCAST, dl, 6767 MVT::v4i32, 6768 Op.getOperand(0)), 6769 Op.getOperand(1))); 6770 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 6771 Op.getOperand(0), Op.getOperand(1)); 6772 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6773 DAG.getValueType(VT)); 6774 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6775 } else if (VT == MVT::f32) { 6776 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 6777 // the result back to FR32 register. It's only worth matching if the 6778 // result has a single use which is a store or a bitcast to i32. And in 6779 // the case of a store, it's not worth it if the index is a constant 0, 6780 // because a MOVSSmr can be used instead, which is smaller and faster. 6781 if (!Op.hasOneUse()) 6782 return SDValue(); 6783 SDNode *User = *Op.getNode()->use_begin(); 6784 if ((User->getOpcode() != ISD::STORE || 6785 (isa<ConstantSDNode>(Op.getOperand(1)) && 6786 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 6787 (User->getOpcode() != ISD::BITCAST || 6788 User->getValueType(0) != MVT::i32)) 6789 return SDValue(); 6790 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6791 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 6792 Op.getOperand(0)), 6793 Op.getOperand(1)); 6794 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 6795 } else if (VT == MVT::i32) { 6796 // ExtractPS works with constant index. 6797 if (isa<ConstantSDNode>(Op.getOperand(1))) 6798 return Op; 6799 } 6800 return SDValue(); 6801} 6802 6803 6804SDValue 6805X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 6806 SelectionDAG &DAG) const { 6807 if (!isa<ConstantSDNode>(Op.getOperand(1))) 6808 return SDValue(); 6809 6810 SDValue Vec = Op.getOperand(0); 6811 EVT VecVT = Vec.getValueType(); 6812 6813 // If this is a 256-bit vector result, first extract the 128-bit vector and 6814 // then extract the element from the 128-bit vector. 6815 if (VecVT.getSizeInBits() == 256) { 6816 DebugLoc dl = Op.getNode()->getDebugLoc(); 6817 unsigned NumElems = VecVT.getVectorNumElements(); 6818 SDValue Idx = Op.getOperand(1); 6819 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 6820 6821 // Get the 128-bit vector. 6822 bool Upper = IdxVal >= NumElems/2; 6823 Vec = Extract128BitVector(Vec, 6824 DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32), DAG, dl); 6825 6826 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 6827 Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : Idx); 6828 } 6829 6830 assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length"); 6831 6832 if (Subtarget->hasSSE41() || Subtarget->hasAVX()) { 6833 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 6834 if (Res.getNode()) 6835 return Res; 6836 } 6837 6838 EVT VT = Op.getValueType(); 6839 DebugLoc dl = Op.getDebugLoc(); 6840 // TODO: handle v16i8. 6841 if (VT.getSizeInBits() == 16) { 6842 SDValue Vec = Op.getOperand(0); 6843 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6844 if (Idx == 0) 6845 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6846 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6847 DAG.getNode(ISD::BITCAST, dl, 6848 MVT::v4i32, Vec), 6849 Op.getOperand(1))); 6850 // Transform it so it match pextrw which produces a 32-bit result. 6851 EVT EltVT = MVT::i32; 6852 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 6853 Op.getOperand(0), Op.getOperand(1)); 6854 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 6855 DAG.getValueType(VT)); 6856 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6857 } else if (VT.getSizeInBits() == 32) { 6858 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6859 if (Idx == 0) 6860 return Op; 6861 6862 // SHUFPS the element to the lowest double word, then movss. 6863 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 6864 EVT VVT = Op.getOperand(0).getValueType(); 6865 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6866 DAG.getUNDEF(VVT), Mask); 6867 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6868 DAG.getIntPtrConstant(0)); 6869 } else if (VT.getSizeInBits() == 64) { 6870 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 6871 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 6872 // to match extract_elt for f64. 6873 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6874 if (Idx == 0) 6875 return Op; 6876 6877 // UNPCKHPD the element to the lowest double word, then movsd. 6878 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 6879 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 6880 int Mask[2] = { 1, -1 }; 6881 EVT VVT = Op.getOperand(0).getValueType(); 6882 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6883 DAG.getUNDEF(VVT), Mask); 6884 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6885 DAG.getIntPtrConstant(0)); 6886 } 6887 6888 return SDValue(); 6889} 6890 6891SDValue 6892X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 6893 SelectionDAG &DAG) const { 6894 EVT VT = Op.getValueType(); 6895 EVT EltVT = VT.getVectorElementType(); 6896 DebugLoc dl = Op.getDebugLoc(); 6897 6898 SDValue N0 = Op.getOperand(0); 6899 SDValue N1 = Op.getOperand(1); 6900 SDValue N2 = Op.getOperand(2); 6901 6902 if (VT.getSizeInBits() == 256) 6903 return SDValue(); 6904 6905 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 6906 isa<ConstantSDNode>(N2)) { 6907 unsigned Opc; 6908 if (VT == MVT::v8i16) 6909 Opc = X86ISD::PINSRW; 6910 else if (VT == MVT::v16i8) 6911 Opc = X86ISD::PINSRB; 6912 else 6913 Opc = X86ISD::PINSRB; 6914 6915 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 6916 // argument. 6917 if (N1.getValueType() != MVT::i32) 6918 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6919 if (N2.getValueType() != MVT::i32) 6920 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6921 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 6922 } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 6923 // Bits [7:6] of the constant are the source select. This will always be 6924 // zero here. The DAG Combiner may combine an extract_elt index into these 6925 // bits. For example (insert (extract, 3), 2) could be matched by putting 6926 // the '3' into bits [7:6] of X86ISD::INSERTPS. 6927 // Bits [5:4] of the constant are the destination select. This is the 6928 // value of the incoming immediate. 6929 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 6930 // combine either bitwise AND or insert of float 0.0 to set these bits. 6931 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 6932 // Create this as a scalar to vector.. 6933 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 6934 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 6935 } else if (EltVT == MVT::i32 && isa<ConstantSDNode>(N2)) { 6936 // PINSR* works with constant index. 6937 return Op; 6938 } 6939 return SDValue(); 6940} 6941 6942SDValue 6943X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 6944 EVT VT = Op.getValueType(); 6945 EVT EltVT = VT.getVectorElementType(); 6946 6947 DebugLoc dl = Op.getDebugLoc(); 6948 SDValue N0 = Op.getOperand(0); 6949 SDValue N1 = Op.getOperand(1); 6950 SDValue N2 = Op.getOperand(2); 6951 6952 // If this is a 256-bit vector result, first extract the 128-bit vector, 6953 // insert the element into the extracted half and then place it back. 6954 if (VT.getSizeInBits() == 256) { 6955 if (!isa<ConstantSDNode>(N2)) 6956 return SDValue(); 6957 6958 // Get the desired 128-bit vector half. 6959 unsigned NumElems = VT.getVectorNumElements(); 6960 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 6961 bool Upper = IdxVal >= NumElems/2; 6962 SDValue Ins128Idx = DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32); 6963 SDValue V = Extract128BitVector(N0, Ins128Idx, DAG, dl); 6964 6965 // Insert the element into the desired half. 6966 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, 6967 N1, Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : N2); 6968 6969 // Insert the changed part back to the 256-bit vector 6970 return Insert128BitVector(N0, V, Ins128Idx, DAG, dl); 6971 } 6972 6973 if (Subtarget->hasSSE41() || Subtarget->hasAVX()) 6974 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 6975 6976 if (EltVT == MVT::i8) 6977 return SDValue(); 6978 6979 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 6980 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 6981 // as its second argument. 6982 if (N1.getValueType() != MVT::i32) 6983 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6984 if (N2.getValueType() != MVT::i32) 6985 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6986 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 6987 } 6988 return SDValue(); 6989} 6990 6991SDValue 6992X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { 6993 LLVMContext *Context = DAG.getContext(); 6994 DebugLoc dl = Op.getDebugLoc(); 6995 EVT OpVT = Op.getValueType(); 6996 6997 // If this is a 256-bit vector result, first insert into a 128-bit 6998 // vector and then insert into the 256-bit vector. 6999 if (OpVT.getSizeInBits() > 128) { 7000 // Insert into a 128-bit vector. 7001 EVT VT128 = EVT::getVectorVT(*Context, 7002 OpVT.getVectorElementType(), 7003 OpVT.getVectorNumElements() / 2); 7004 7005 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7006 7007 // Insert the 128-bit vector. 7008 return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op, 7009 DAG.getConstant(0, MVT::i32), 7010 DAG, dl); 7011 } 7012 7013 if (Op.getValueType() == MVT::v1i64 && 7014 Op.getOperand(0).getValueType() == MVT::i64) 7015 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7016 7017 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7018 assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && 7019 "Expected an SSE type!"); 7020 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), 7021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7022} 7023 7024// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7025// a simple subregister reference or explicit instructions to grab 7026// upper bits of a vector. 7027SDValue 7028X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 7029 if (Subtarget->hasAVX()) { 7030 DebugLoc dl = Op.getNode()->getDebugLoc(); 7031 SDValue Vec = Op.getNode()->getOperand(0); 7032 SDValue Idx = Op.getNode()->getOperand(1); 7033 7034 if (Op.getNode()->getValueType(0).getSizeInBits() == 128 7035 && Vec.getNode()->getValueType(0).getSizeInBits() == 256) { 7036 return Extract128BitVector(Vec, Idx, DAG, dl); 7037 } 7038 } 7039 return SDValue(); 7040} 7041 7042// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7043// simple superregister reference or explicit instructions to insert 7044// the upper bits of a vector. 7045SDValue 7046X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 7047 if (Subtarget->hasAVX()) { 7048 DebugLoc dl = Op.getNode()->getDebugLoc(); 7049 SDValue Vec = Op.getNode()->getOperand(0); 7050 SDValue SubVec = Op.getNode()->getOperand(1); 7051 SDValue Idx = Op.getNode()->getOperand(2); 7052 7053 if (Op.getNode()->getValueType(0).getSizeInBits() == 256 7054 && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) { 7055 return Insert128BitVector(Vec, SubVec, Idx, DAG, dl); 7056 } 7057 } 7058 return SDValue(); 7059} 7060 7061// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7062// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7063// one of the above mentioned nodes. It has to be wrapped because otherwise 7064// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7065// be used to form addressing mode. These wrapped nodes will be selected 7066// into MOV32ri. 7067SDValue 7068X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7069 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7070 7071 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7072 // global base reg. 7073 unsigned char OpFlag = 0; 7074 unsigned WrapperKind = X86ISD::Wrapper; 7075 CodeModel::Model M = getTargetMachine().getCodeModel(); 7076 7077 if (Subtarget->isPICStyleRIPRel() && 7078 (M == CodeModel::Small || M == CodeModel::Kernel)) 7079 WrapperKind = X86ISD::WrapperRIP; 7080 else if (Subtarget->isPICStyleGOT()) 7081 OpFlag = X86II::MO_GOTOFF; 7082 else if (Subtarget->isPICStyleStubPIC()) 7083 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7084 7085 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7086 CP->getAlignment(), 7087 CP->getOffset(), OpFlag); 7088 DebugLoc DL = CP->getDebugLoc(); 7089 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7090 // With PIC, the address is actually $g + Offset. 7091 if (OpFlag) { 7092 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7093 DAG.getNode(X86ISD::GlobalBaseReg, 7094 DebugLoc(), getPointerTy()), 7095 Result); 7096 } 7097 7098 return Result; 7099} 7100 7101SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7102 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7103 7104 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7105 // global base reg. 7106 unsigned char OpFlag = 0; 7107 unsigned WrapperKind = X86ISD::Wrapper; 7108 CodeModel::Model M = getTargetMachine().getCodeModel(); 7109 7110 if (Subtarget->isPICStyleRIPRel() && 7111 (M == CodeModel::Small || M == CodeModel::Kernel)) 7112 WrapperKind = X86ISD::WrapperRIP; 7113 else if (Subtarget->isPICStyleGOT()) 7114 OpFlag = X86II::MO_GOTOFF; 7115 else if (Subtarget->isPICStyleStubPIC()) 7116 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7117 7118 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7119 OpFlag); 7120 DebugLoc DL = JT->getDebugLoc(); 7121 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7122 7123 // With PIC, the address is actually $g + Offset. 7124 if (OpFlag) 7125 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7126 DAG.getNode(X86ISD::GlobalBaseReg, 7127 DebugLoc(), getPointerTy()), 7128 Result); 7129 7130 return Result; 7131} 7132 7133SDValue 7134X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7135 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7136 7137 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7138 // global base reg. 7139 unsigned char OpFlag = 0; 7140 unsigned WrapperKind = X86ISD::Wrapper; 7141 CodeModel::Model M = getTargetMachine().getCodeModel(); 7142 7143 if (Subtarget->isPICStyleRIPRel() && 7144 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7145 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7146 OpFlag = X86II::MO_GOTPCREL; 7147 WrapperKind = X86ISD::WrapperRIP; 7148 } else if (Subtarget->isPICStyleGOT()) { 7149 OpFlag = X86II::MO_GOT; 7150 } else if (Subtarget->isPICStyleStubPIC()) { 7151 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7152 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7153 OpFlag = X86II::MO_DARWIN_NONLAZY; 7154 } 7155 7156 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7157 7158 DebugLoc DL = Op.getDebugLoc(); 7159 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7160 7161 7162 // With PIC, the address is actually $g + Offset. 7163 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7164 !Subtarget->is64Bit()) { 7165 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7166 DAG.getNode(X86ISD::GlobalBaseReg, 7167 DebugLoc(), getPointerTy()), 7168 Result); 7169 } 7170 7171 // For symbols that require a load from a stub to get the address, emit the 7172 // load. 7173 if (isGlobalStubReference(OpFlag)) 7174 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7175 MachinePointerInfo::getGOT(), false, false, 0); 7176 7177 return Result; 7178} 7179 7180SDValue 7181X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7182 // Create the TargetBlockAddressAddress node. 7183 unsigned char OpFlags = 7184 Subtarget->ClassifyBlockAddressReference(); 7185 CodeModel::Model M = getTargetMachine().getCodeModel(); 7186 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7187 DebugLoc dl = Op.getDebugLoc(); 7188 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), 7189 /*isTarget=*/true, OpFlags); 7190 7191 if (Subtarget->isPICStyleRIPRel() && 7192 (M == CodeModel::Small || M == CodeModel::Kernel)) 7193 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7194 else 7195 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7196 7197 // With PIC, the address is actually $g + Offset. 7198 if (isGlobalRelativeToPICBase(OpFlags)) { 7199 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7200 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7201 Result); 7202 } 7203 7204 return Result; 7205} 7206 7207SDValue 7208X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7209 int64_t Offset, 7210 SelectionDAG &DAG) const { 7211 // Create the TargetGlobalAddress node, folding in the constant 7212 // offset if it is legal. 7213 unsigned char OpFlags = 7214 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7215 CodeModel::Model M = getTargetMachine().getCodeModel(); 7216 SDValue Result; 7217 if (OpFlags == X86II::MO_NO_FLAG && 7218 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7219 // A direct static reference to a global. 7220 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7221 Offset = 0; 7222 } else { 7223 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7224 } 7225 7226 if (Subtarget->isPICStyleRIPRel() && 7227 (M == CodeModel::Small || M == CodeModel::Kernel)) 7228 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7229 else 7230 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7231 7232 // With PIC, the address is actually $g + Offset. 7233 if (isGlobalRelativeToPICBase(OpFlags)) { 7234 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7235 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7236 Result); 7237 } 7238 7239 // For globals that require a load from a stub to get the address, emit the 7240 // load. 7241 if (isGlobalStubReference(OpFlags)) 7242 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7243 MachinePointerInfo::getGOT(), false, false, 0); 7244 7245 // If there was a non-zero offset that we didn't fold, create an explicit 7246 // addition for it. 7247 if (Offset != 0) 7248 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7249 DAG.getConstant(Offset, getPointerTy())); 7250 7251 return Result; 7252} 7253 7254SDValue 7255X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7256 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7257 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7258 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7259} 7260 7261static SDValue 7262GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7263 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7264 unsigned char OperandFlags) { 7265 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7266 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7267 DebugLoc dl = GA->getDebugLoc(); 7268 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7269 GA->getValueType(0), 7270 GA->getOffset(), 7271 OperandFlags); 7272 if (InFlag) { 7273 SDValue Ops[] = { Chain, TGA, *InFlag }; 7274 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3); 7275 } else { 7276 SDValue Ops[] = { Chain, TGA }; 7277 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2); 7278 } 7279 7280 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7281 MFI->setAdjustsStack(true); 7282 7283 SDValue Flag = Chain.getValue(1); 7284 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7285} 7286 7287// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7288static SDValue 7289LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7290 const EVT PtrVT) { 7291 SDValue InFlag; 7292 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7293 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7294 DAG.getNode(X86ISD::GlobalBaseReg, 7295 DebugLoc(), PtrVT), InFlag); 7296 InFlag = Chain.getValue(1); 7297 7298 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7299} 7300 7301// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7302static SDValue 7303LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7304 const EVT PtrVT) { 7305 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7306 X86::RAX, X86II::MO_TLSGD); 7307} 7308 7309// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 7310// "local exec" model. 7311static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7312 const EVT PtrVT, TLSModel::Model model, 7313 bool is64Bit) { 7314 DebugLoc dl = GA->getDebugLoc(); 7315 7316 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7317 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7318 is64Bit ? 257 : 256)); 7319 7320 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7321 DAG.getIntPtrConstant(0), 7322 MachinePointerInfo(Ptr), false, false, 0); 7323 7324 unsigned char OperandFlags = 0; 7325 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7326 // initialexec. 7327 unsigned WrapperKind = X86ISD::Wrapper; 7328 if (model == TLSModel::LocalExec) { 7329 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7330 } else if (is64Bit) { 7331 assert(model == TLSModel::InitialExec); 7332 OperandFlags = X86II::MO_GOTTPOFF; 7333 WrapperKind = X86ISD::WrapperRIP; 7334 } else { 7335 assert(model == TLSModel::InitialExec); 7336 OperandFlags = X86II::MO_INDNTPOFF; 7337 } 7338 7339 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 7340 // exec) 7341 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7342 GA->getValueType(0), 7343 GA->getOffset(), OperandFlags); 7344 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7345 7346 if (model == TLSModel::InitialExec) 7347 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7348 MachinePointerInfo::getGOT(), false, false, 0); 7349 7350 // The address of the thread local variable is the add of the thread 7351 // pointer with the offset of the variable. 7352 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7353} 7354 7355SDValue 7356X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7357 7358 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7359 const GlobalValue *GV = GA->getGlobal(); 7360 7361 if (Subtarget->isTargetELF()) { 7362 // TODO: implement the "local dynamic" model 7363 // TODO: implement the "initial exec"model for pic executables 7364 7365 // If GV is an alias then use the aliasee for determining 7366 // thread-localness. 7367 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7368 GV = GA->resolveAliasedGlobal(false); 7369 7370 TLSModel::Model model 7371 = getTLSModel(GV, getTargetMachine().getRelocationModel()); 7372 7373 switch (model) { 7374 case TLSModel::GeneralDynamic: 7375 case TLSModel::LocalDynamic: // not implemented 7376 if (Subtarget->is64Bit()) 7377 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7378 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7379 7380 case TLSModel::InitialExec: 7381 case TLSModel::LocalExec: 7382 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7383 Subtarget->is64Bit()); 7384 } 7385 } else if (Subtarget->isTargetDarwin()) { 7386 // Darwin only has one model of TLS. Lower to that. 7387 unsigned char OpFlag = 0; 7388 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7389 X86ISD::WrapperRIP : X86ISD::Wrapper; 7390 7391 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7392 // global base reg. 7393 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7394 !Subtarget->is64Bit(); 7395 if (PIC32) 7396 OpFlag = X86II::MO_TLVP_PIC_BASE; 7397 else 7398 OpFlag = X86II::MO_TLVP; 7399 DebugLoc DL = Op.getDebugLoc(); 7400 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7401 GA->getValueType(0), 7402 GA->getOffset(), OpFlag); 7403 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7404 7405 // With PIC32, the address is actually $g + Offset. 7406 if (PIC32) 7407 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7408 DAG.getNode(X86ISD::GlobalBaseReg, 7409 DebugLoc(), getPointerTy()), 7410 Offset); 7411 7412 // Lowering the machine isd will make sure everything is in the right 7413 // location. 7414 SDValue Chain = DAG.getEntryNode(); 7415 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7416 SDValue Args[] = { Chain, Offset }; 7417 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7418 7419 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7420 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7421 MFI->setAdjustsStack(true); 7422 7423 // And our return value (tls address) is in the standard call return value 7424 // location. 7425 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7426 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); 7427 } 7428 7429 assert(false && 7430 "TLS not implemented for this target."); 7431 7432 llvm_unreachable("Unreachable"); 7433 return SDValue(); 7434} 7435 7436 7437/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values and 7438/// take a 2 x i32 value to shift plus a shift amount. 7439SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const { 7440 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7441 EVT VT = Op.getValueType(); 7442 unsigned VTBits = VT.getSizeInBits(); 7443 DebugLoc dl = Op.getDebugLoc(); 7444 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7445 SDValue ShOpLo = Op.getOperand(0); 7446 SDValue ShOpHi = Op.getOperand(1); 7447 SDValue ShAmt = Op.getOperand(2); 7448 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7449 DAG.getConstant(VTBits - 1, MVT::i8)) 7450 : DAG.getConstant(0, VT); 7451 7452 SDValue Tmp2, Tmp3; 7453 if (Op.getOpcode() == ISD::SHL_PARTS) { 7454 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7455 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7456 } else { 7457 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7458 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7459 } 7460 7461 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7462 DAG.getConstant(VTBits, MVT::i8)); 7463 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7464 AndNode, DAG.getConstant(0, MVT::i8)); 7465 7466 SDValue Hi, Lo; 7467 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7468 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7469 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7470 7471 if (Op.getOpcode() == ISD::SHL_PARTS) { 7472 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7473 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7474 } else { 7475 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7476 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7477 } 7478 7479 SDValue Ops[2] = { Lo, Hi }; 7480 return DAG.getMergeValues(Ops, 2, dl); 7481} 7482 7483SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7484 SelectionDAG &DAG) const { 7485 EVT SrcVT = Op.getOperand(0).getValueType(); 7486 7487 if (SrcVT.isVector()) 7488 return SDValue(); 7489 7490 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7491 "Unknown SINT_TO_FP to lower!"); 7492 7493 // These are really Legal; return the operand so the caller accepts it as 7494 // Legal. 7495 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7496 return Op; 7497 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7498 Subtarget->is64Bit()) { 7499 return Op; 7500 } 7501 7502 DebugLoc dl = Op.getDebugLoc(); 7503 unsigned Size = SrcVT.getSizeInBits()/8; 7504 MachineFunction &MF = DAG.getMachineFunction(); 7505 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7506 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7507 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7508 StackSlot, 7509 MachinePointerInfo::getFixedStack(SSFI), 7510 false, false, 0); 7511 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7512} 7513 7514SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7515 SDValue StackSlot, 7516 SelectionDAG &DAG) const { 7517 // Build the FILD 7518 DebugLoc DL = Op.getDebugLoc(); 7519 SDVTList Tys; 7520 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 7521 if (useSSE) 7522 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 7523 else 7524 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 7525 7526 unsigned ByteSize = SrcVT.getSizeInBits()/8; 7527 7528 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 7529 MachineMemOperand *MMO; 7530 if (FI) { 7531 int SSFI = FI->getIndex(); 7532 MMO = 7533 DAG.getMachineFunction() 7534 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7535 MachineMemOperand::MOLoad, ByteSize, ByteSize); 7536 } else { 7537 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 7538 StackSlot = StackSlot.getOperand(1); 7539 } 7540 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 7541 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 7542 X86ISD::FILD, DL, 7543 Tys, Ops, array_lengthof(Ops), 7544 SrcVT, MMO); 7545 7546 if (useSSE) { 7547 Chain = Result.getValue(1); 7548 SDValue InFlag = Result.getValue(2); 7549 7550 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 7551 // shouldn't be necessary except that RFP cannot be live across 7552 // multiple blocks. When stackifier is fixed, they can be uncoupled. 7553 MachineFunction &MF = DAG.getMachineFunction(); 7554 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 7555 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 7556 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7557 Tys = DAG.getVTList(MVT::Other); 7558 SDValue Ops[] = { 7559 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 7560 }; 7561 MachineMemOperand *MMO = 7562 DAG.getMachineFunction() 7563 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7564 MachineMemOperand::MOStore, SSFISize, SSFISize); 7565 7566 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 7567 Ops, array_lengthof(Ops), 7568 Op.getValueType(), MMO); 7569 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 7570 MachinePointerInfo::getFixedStack(SSFI), 7571 false, false, 0); 7572 } 7573 7574 return Result; 7575} 7576 7577// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 7578SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 7579 SelectionDAG &DAG) const { 7580 // This algorithm is not obvious. Here it is in C code, more or less: 7581 /* 7582 double uint64_to_double( uint32_t hi, uint32_t lo ) { 7583 static const __m128i exp = { 0x4330000045300000ULL, 0 }; 7584 static const __m128d bias = { 0x1.0p84, 0x1.0p52 }; 7585 7586 // Copy ints to xmm registers. 7587 __m128i xh = _mm_cvtsi32_si128( hi ); 7588 __m128i xl = _mm_cvtsi32_si128( lo ); 7589 7590 // Combine into low half of a single xmm register. 7591 __m128i x = _mm_unpacklo_epi32( xh, xl ); 7592 __m128d d; 7593 double sd; 7594 7595 // Merge in appropriate exponents to give the integer bits the right 7596 // magnitude. 7597 x = _mm_unpacklo_epi32( x, exp ); 7598 7599 // Subtract away the biases to deal with the IEEE-754 double precision 7600 // implicit 1. 7601 d = _mm_sub_pd( (__m128d) x, bias ); 7602 7603 // All conversions up to here are exact. The correctly rounded result is 7604 // calculated using the current rounding mode using the following 7605 // horizontal add. 7606 d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) ); 7607 _mm_store_sd( &sd, d ); // Because we are returning doubles in XMM, this 7608 // store doesn't really need to be here (except 7609 // maybe to zero the other double) 7610 return sd; 7611 } 7612 */ 7613 7614 DebugLoc dl = Op.getDebugLoc(); 7615 LLVMContext *Context = DAG.getContext(); 7616 7617 // Build some magic constants. 7618 std::vector<Constant*> CV0; 7619 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000))); 7620 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000))); 7621 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 7622 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 7623 Constant *C0 = ConstantVector::get(CV0); 7624 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 7625 7626 std::vector<Constant*> CV1; 7627 CV1.push_back( 7628 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 7629 CV1.push_back( 7630 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 7631 Constant *C1 = ConstantVector::get(CV1); 7632 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 7633 7634 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 7635 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 7636 Op.getOperand(0), 7637 DAG.getIntPtrConstant(1))); 7638 SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 7639 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 7640 Op.getOperand(0), 7641 DAG.getIntPtrConstant(0))); 7642 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2); 7643 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 7644 MachinePointerInfo::getConstantPool(), 7645 false, false, 16); 7646 SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0); 7647 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck2); 7648 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 7649 MachinePointerInfo::getConstantPool(), 7650 false, false, 16); 7651 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 7652 7653 // Add the halves; easiest way is to swap them into another reg first. 7654 int ShufMask[2] = { 1, -1 }; 7655 SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, 7656 DAG.getUNDEF(MVT::v2f64), ShufMask); 7657 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub); 7658 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add, 7659 DAG.getIntPtrConstant(0)); 7660} 7661 7662// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 7663SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 7664 SelectionDAG &DAG) const { 7665 DebugLoc dl = Op.getDebugLoc(); 7666 // FP constant to bias correct the final result. 7667 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 7668 MVT::f64); 7669 7670 // Load the 32-bit value into an XMM register. 7671 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 7672 Op.getOperand(0)); 7673 7674 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7675 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 7676 DAG.getIntPtrConstant(0)); 7677 7678 // Or the load with the bias. 7679 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 7680 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7681 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7682 MVT::v2f64, Load)), 7683 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7684 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7685 MVT::v2f64, Bias))); 7686 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7687 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 7688 DAG.getIntPtrConstant(0)); 7689 7690 // Subtract the bias. 7691 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 7692 7693 // Handle final rounding. 7694 EVT DestVT = Op.getValueType(); 7695 7696 if (DestVT.bitsLT(MVT::f64)) { 7697 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 7698 DAG.getIntPtrConstant(0)); 7699 } else if (DestVT.bitsGT(MVT::f64)) { 7700 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 7701 } 7702 7703 // Handle final rounding. 7704 return Sub; 7705} 7706 7707SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 7708 SelectionDAG &DAG) const { 7709 SDValue N0 = Op.getOperand(0); 7710 DebugLoc dl = Op.getDebugLoc(); 7711 7712 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 7713 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 7714 // the optimization here. 7715 if (DAG.SignBitIsZero(N0)) 7716 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 7717 7718 EVT SrcVT = N0.getValueType(); 7719 EVT DstVT = Op.getValueType(); 7720 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 7721 return LowerUINT_TO_FP_i64(Op, DAG); 7722 else if (SrcVT == MVT::i32 && X86ScalarSSEf64) 7723 return LowerUINT_TO_FP_i32(Op, DAG); 7724 7725 // Make a 64-bit buffer, and use it to build an FILD. 7726 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 7727 if (SrcVT == MVT::i32) { 7728 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 7729 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 7730 getPointerTy(), StackSlot, WordOff); 7731 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7732 StackSlot, MachinePointerInfo(), 7733 false, false, 0); 7734 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 7735 OffsetSlot, MachinePointerInfo(), 7736 false, false, 0); 7737 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 7738 return Fild; 7739 } 7740 7741 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 7742 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7743 StackSlot, MachinePointerInfo(), 7744 false, false, 0); 7745 // For i64 source, we need to add the appropriate power of 2 if the input 7746 // was negative. This is the same as the optimization in 7747 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 7748 // we must be careful to do the computation in x87 extended precision, not 7749 // in SSE. (The generic code can't know it's OK to do this, or how to.) 7750 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 7751 MachineMemOperand *MMO = 7752 DAG.getMachineFunction() 7753 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7754 MachineMemOperand::MOLoad, 8, 8); 7755 7756 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 7757 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 7758 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 7759 MVT::i64, MMO); 7760 7761 APInt FF(32, 0x5F800000ULL); 7762 7763 // Check whether the sign bit is set. 7764 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 7765 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 7766 ISD::SETLT); 7767 7768 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 7769 SDValue FudgePtr = DAG.getConstantPool( 7770 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 7771 getPointerTy()); 7772 7773 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 7774 SDValue Zero = DAG.getIntPtrConstant(0); 7775 SDValue Four = DAG.getIntPtrConstant(4); 7776 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 7777 Zero, Four); 7778 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 7779 7780 // Load the value out, extending it from f32 to f80. 7781 // FIXME: Avoid the extend by constructing the right constant pool? 7782 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 7783 FudgePtr, MachinePointerInfo::getConstantPool(), 7784 MVT::f32, false, false, 4); 7785 // Extend everything to 80 bits to force it to be done on x87. 7786 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 7787 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 7788} 7789 7790std::pair<SDValue,SDValue> X86TargetLowering:: 7791FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const { 7792 DebugLoc DL = Op.getDebugLoc(); 7793 7794 EVT DstTy = Op.getValueType(); 7795 7796 if (!IsSigned) { 7797 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 7798 DstTy = MVT::i64; 7799 } 7800 7801 assert(DstTy.getSimpleVT() <= MVT::i64 && 7802 DstTy.getSimpleVT() >= MVT::i16 && 7803 "Unknown FP_TO_SINT to lower!"); 7804 7805 // These are really Legal. 7806 if (DstTy == MVT::i32 && 7807 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7808 return std::make_pair(SDValue(), SDValue()); 7809 if (Subtarget->is64Bit() && 7810 DstTy == MVT::i64 && 7811 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7812 return std::make_pair(SDValue(), SDValue()); 7813 7814 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 7815 // stack slot. 7816 MachineFunction &MF = DAG.getMachineFunction(); 7817 unsigned MemSize = DstTy.getSizeInBits()/8; 7818 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7819 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7820 7821 7822 7823 unsigned Opc; 7824 switch (DstTy.getSimpleVT().SimpleTy) { 7825 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 7826 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 7827 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 7828 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 7829 } 7830 7831 SDValue Chain = DAG.getEntryNode(); 7832 SDValue Value = Op.getOperand(0); 7833 EVT TheVT = Op.getOperand(0).getValueType(); 7834 if (isScalarFPTypeInSSEReg(TheVT)) { 7835 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 7836 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 7837 MachinePointerInfo::getFixedStack(SSFI), 7838 false, false, 0); 7839 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 7840 SDValue Ops[] = { 7841 Chain, StackSlot, DAG.getValueType(TheVT) 7842 }; 7843 7844 MachineMemOperand *MMO = 7845 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7846 MachineMemOperand::MOLoad, MemSize, MemSize); 7847 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 7848 DstTy, MMO); 7849 Chain = Value.getValue(1); 7850 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7851 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7852 } 7853 7854 MachineMemOperand *MMO = 7855 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7856 MachineMemOperand::MOStore, MemSize, MemSize); 7857 7858 // Build the FP_TO_INT*_IN_MEM 7859 SDValue Ops[] = { Chain, Value, StackSlot }; 7860 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 7861 Ops, 3, DstTy, MMO); 7862 7863 return std::make_pair(FIST, StackSlot); 7864} 7865 7866SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 7867 SelectionDAG &DAG) const { 7868 if (Op.getValueType().isVector()) 7869 return SDValue(); 7870 7871 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true); 7872 SDValue FIST = Vals.first, StackSlot = Vals.second; 7873 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 7874 if (FIST.getNode() == 0) return Op; 7875 7876 // Load the result. 7877 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7878 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 7879} 7880 7881SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 7882 SelectionDAG &DAG) const { 7883 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false); 7884 SDValue FIST = Vals.first, StackSlot = Vals.second; 7885 assert(FIST.getNode() && "Unexpected failure"); 7886 7887 // Load the result. 7888 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7889 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 7890} 7891 7892SDValue X86TargetLowering::LowerFABS(SDValue Op, 7893 SelectionDAG &DAG) const { 7894 LLVMContext *Context = DAG.getContext(); 7895 DebugLoc dl = Op.getDebugLoc(); 7896 EVT VT = Op.getValueType(); 7897 EVT EltVT = VT; 7898 if (VT.isVector()) 7899 EltVT = VT.getVectorElementType(); 7900 std::vector<Constant*> CV; 7901 if (EltVT == MVT::f64) { 7902 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 7903 CV.push_back(C); 7904 CV.push_back(C); 7905 } else { 7906 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 7907 CV.push_back(C); 7908 CV.push_back(C); 7909 CV.push_back(C); 7910 CV.push_back(C); 7911 } 7912 Constant *C = ConstantVector::get(CV); 7913 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7914 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7915 MachinePointerInfo::getConstantPool(), 7916 false, false, 16); 7917 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 7918} 7919 7920SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 7921 LLVMContext *Context = DAG.getContext(); 7922 DebugLoc dl = Op.getDebugLoc(); 7923 EVT VT = Op.getValueType(); 7924 EVT EltVT = VT; 7925 if (VT.isVector()) 7926 EltVT = VT.getVectorElementType(); 7927 std::vector<Constant*> CV; 7928 if (EltVT == MVT::f64) { 7929 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 7930 CV.push_back(C); 7931 CV.push_back(C); 7932 } else { 7933 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 7934 CV.push_back(C); 7935 CV.push_back(C); 7936 CV.push_back(C); 7937 CV.push_back(C); 7938 } 7939 Constant *C = ConstantVector::get(CV); 7940 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7941 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7942 MachinePointerInfo::getConstantPool(), 7943 false, false, 16); 7944 if (VT.isVector()) { 7945 return DAG.getNode(ISD::BITCAST, dl, VT, 7946 DAG.getNode(ISD::XOR, dl, MVT::v2i64, 7947 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7948 Op.getOperand(0)), 7949 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Mask))); 7950 } else { 7951 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 7952 } 7953} 7954 7955SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 7956 LLVMContext *Context = DAG.getContext(); 7957 SDValue Op0 = Op.getOperand(0); 7958 SDValue Op1 = Op.getOperand(1); 7959 DebugLoc dl = Op.getDebugLoc(); 7960 EVT VT = Op.getValueType(); 7961 EVT SrcVT = Op1.getValueType(); 7962 7963 // If second operand is smaller, extend it first. 7964 if (SrcVT.bitsLT(VT)) { 7965 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 7966 SrcVT = VT; 7967 } 7968 // And if it is bigger, shrink it first. 7969 if (SrcVT.bitsGT(VT)) { 7970 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 7971 SrcVT = VT; 7972 } 7973 7974 // At this point the operands and the result should have the same 7975 // type, and that won't be f80 since that is not custom lowered. 7976 7977 // First get the sign bit of second operand. 7978 std::vector<Constant*> CV; 7979 if (SrcVT == MVT::f64) { 7980 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 7981 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 7982 } else { 7983 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 7984 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7985 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7986 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7987 } 7988 Constant *C = ConstantVector::get(CV); 7989 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7990 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 7991 MachinePointerInfo::getConstantPool(), 7992 false, false, 16); 7993 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 7994 7995 // Shift sign bit right or left if the two operands have different types. 7996 if (SrcVT.bitsGT(VT)) { 7997 // Op0 is MVT::f32, Op1 is MVT::f64. 7998 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 7999 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 8000 DAG.getConstant(32, MVT::i32)); 8001 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 8002 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 8003 DAG.getIntPtrConstant(0)); 8004 } 8005 8006 // Clear first operand sign bit. 8007 CV.clear(); 8008 if (VT == MVT::f64) { 8009 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 8010 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8011 } else { 8012 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 8013 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8014 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8015 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8016 } 8017 C = ConstantVector::get(CV); 8018 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8019 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8020 MachinePointerInfo::getConstantPool(), 8021 false, false, 16); 8022 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8023 8024 // Or the value with the sign bit. 8025 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8026} 8027 8028SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const { 8029 SDValue N0 = Op.getOperand(0); 8030 DebugLoc dl = Op.getDebugLoc(); 8031 EVT VT = Op.getValueType(); 8032 8033 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8034 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8035 DAG.getConstant(1, VT)); 8036 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8037} 8038 8039/// Emit nodes that will be selected as "test Op0,Op0", or something 8040/// equivalent. 8041SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8042 SelectionDAG &DAG) const { 8043 DebugLoc dl = Op.getDebugLoc(); 8044 8045 // CF and OF aren't always set the way we want. Determine which 8046 // of these we need. 8047 bool NeedCF = false; 8048 bool NeedOF = false; 8049 switch (X86CC) { 8050 default: break; 8051 case X86::COND_A: case X86::COND_AE: 8052 case X86::COND_B: case X86::COND_BE: 8053 NeedCF = true; 8054 break; 8055 case X86::COND_G: case X86::COND_GE: 8056 case X86::COND_L: case X86::COND_LE: 8057 case X86::COND_O: case X86::COND_NO: 8058 NeedOF = true; 8059 break; 8060 } 8061 8062 // See if we can use the EFLAGS value from the operand instead of 8063 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8064 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8065 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8066 // Emit a CMP with 0, which is the TEST pattern. 8067 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8068 DAG.getConstant(0, Op.getValueType())); 8069 8070 unsigned Opcode = 0; 8071 unsigned NumOperands = 0; 8072 switch (Op.getNode()->getOpcode()) { 8073 case ISD::ADD: 8074 // Due to an isel shortcoming, be conservative if this add is likely to be 8075 // selected as part of a load-modify-store instruction. When the root node 8076 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8077 // uses of other nodes in the match, such as the ADD in this case. This 8078 // leads to the ADD being left around and reselected, with the result being 8079 // two adds in the output. Alas, even if none our users are stores, that 8080 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8081 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8082 // climbing the DAG back to the root, and it doesn't seem to be worth the 8083 // effort. 8084 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8085 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8086 if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC) 8087 goto default_case; 8088 8089 if (ConstantSDNode *C = 8090 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) { 8091 // An add of one will be selected as an INC. 8092 if (C->getAPIntValue() == 1) { 8093 Opcode = X86ISD::INC; 8094 NumOperands = 1; 8095 break; 8096 } 8097 8098 // An add of negative one (subtract of one) will be selected as a DEC. 8099 if (C->getAPIntValue().isAllOnesValue()) { 8100 Opcode = X86ISD::DEC; 8101 NumOperands = 1; 8102 break; 8103 } 8104 } 8105 8106 // Otherwise use a regular EFLAGS-setting add. 8107 Opcode = X86ISD::ADD; 8108 NumOperands = 2; 8109 break; 8110 case ISD::AND: { 8111 // If the primary and result isn't used, don't bother using X86ISD::AND, 8112 // because a TEST instruction will be better. 8113 bool NonFlagUse = false; 8114 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8115 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8116 SDNode *User = *UI; 8117 unsigned UOpNo = UI.getOperandNo(); 8118 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8119 // Look pass truncate. 8120 UOpNo = User->use_begin().getOperandNo(); 8121 User = *User->use_begin(); 8122 } 8123 8124 if (User->getOpcode() != ISD::BRCOND && 8125 User->getOpcode() != ISD::SETCC && 8126 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { 8127 NonFlagUse = true; 8128 break; 8129 } 8130 } 8131 8132 if (!NonFlagUse) 8133 break; 8134 } 8135 // FALL THROUGH 8136 case ISD::SUB: 8137 case ISD::OR: 8138 case ISD::XOR: 8139 // Due to the ISEL shortcoming noted above, be conservative if this op is 8140 // likely to be selected as part of a load-modify-store instruction. 8141 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8142 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8143 if (UI->getOpcode() == ISD::STORE) 8144 goto default_case; 8145 8146 // Otherwise use a regular EFLAGS-setting instruction. 8147 switch (Op.getNode()->getOpcode()) { 8148 default: llvm_unreachable("unexpected operator!"); 8149 case ISD::SUB: Opcode = X86ISD::SUB; break; 8150 case ISD::OR: Opcode = X86ISD::OR; break; 8151 case ISD::XOR: Opcode = X86ISD::XOR; break; 8152 case ISD::AND: Opcode = X86ISD::AND; break; 8153 } 8154 8155 NumOperands = 2; 8156 break; 8157 case X86ISD::ADD: 8158 case X86ISD::SUB: 8159 case X86ISD::INC: 8160 case X86ISD::DEC: 8161 case X86ISD::OR: 8162 case X86ISD::XOR: 8163 case X86ISD::AND: 8164 return SDValue(Op.getNode(), 1); 8165 default: 8166 default_case: 8167 break; 8168 } 8169 8170 if (Opcode == 0) 8171 // Emit a CMP with 0, which is the TEST pattern. 8172 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8173 DAG.getConstant(0, Op.getValueType())); 8174 8175 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 8176 SmallVector<SDValue, 4> Ops; 8177 for (unsigned i = 0; i != NumOperands; ++i) 8178 Ops.push_back(Op.getOperand(i)); 8179 8180 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 8181 DAG.ReplaceAllUsesWith(Op, New); 8182 return SDValue(New.getNode(), 1); 8183} 8184 8185/// Emit nodes that will be selected as "cmp Op0,Op1", or something 8186/// equivalent. 8187SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 8188 SelectionDAG &DAG) const { 8189 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 8190 if (C->getAPIntValue() == 0) 8191 return EmitTest(Op0, X86CC, DAG); 8192 8193 DebugLoc dl = Op0.getDebugLoc(); 8194 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 8195} 8196 8197/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 8198/// if it's possible. 8199SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 8200 DebugLoc dl, SelectionDAG &DAG) const { 8201 SDValue Op0 = And.getOperand(0); 8202 SDValue Op1 = And.getOperand(1); 8203 if (Op0.getOpcode() == ISD::TRUNCATE) 8204 Op0 = Op0.getOperand(0); 8205 if (Op1.getOpcode() == ISD::TRUNCATE) 8206 Op1 = Op1.getOperand(0); 8207 8208 SDValue LHS, RHS; 8209 if (Op1.getOpcode() == ISD::SHL) 8210 std::swap(Op0, Op1); 8211 if (Op0.getOpcode() == ISD::SHL) { 8212 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 8213 if (And00C->getZExtValue() == 1) { 8214 // If we looked past a truncate, check that it's only truncating away 8215 // known zeros. 8216 unsigned BitWidth = Op0.getValueSizeInBits(); 8217 unsigned AndBitWidth = And.getValueSizeInBits(); 8218 if (BitWidth > AndBitWidth) { 8219 APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones; 8220 DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones); 8221 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 8222 return SDValue(); 8223 } 8224 LHS = Op1; 8225 RHS = Op0.getOperand(1); 8226 } 8227 } else if (Op1.getOpcode() == ISD::Constant) { 8228 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 8229 SDValue AndLHS = Op0; 8230 if (AndRHS->getZExtValue() == 1 && AndLHS.getOpcode() == ISD::SRL) { 8231 LHS = AndLHS.getOperand(0); 8232 RHS = AndLHS.getOperand(1); 8233 } 8234 } 8235 8236 if (LHS.getNode()) { 8237 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 8238 // instruction. Since the shift amount is in-range-or-undefined, we know 8239 // that doing a bittest on the i32 value is ok. We extend to i32 because 8240 // the encoding for the i16 version is larger than the i32 version. 8241 // Also promote i16 to i32 for performance / code size reason. 8242 if (LHS.getValueType() == MVT::i8 || 8243 LHS.getValueType() == MVT::i16) 8244 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 8245 8246 // If the operand types disagree, extend the shift amount to match. Since 8247 // BT ignores high bits (like shifts) we can use anyextend. 8248 if (LHS.getValueType() != RHS.getValueType()) 8249 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 8250 8251 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 8252 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 8253 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8254 DAG.getConstant(Cond, MVT::i8), BT); 8255 } 8256 8257 return SDValue(); 8258} 8259 8260SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 8261 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 8262 SDValue Op0 = Op.getOperand(0); 8263 SDValue Op1 = Op.getOperand(1); 8264 DebugLoc dl = Op.getDebugLoc(); 8265 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8266 8267 // Optimize to BT if possible. 8268 // Lower (X & (1 << N)) == 0 to BT(X, N). 8269 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 8270 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 8271 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 8272 Op1.getOpcode() == ISD::Constant && 8273 cast<ConstantSDNode>(Op1)->isNullValue() && 8274 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8275 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 8276 if (NewSetCC.getNode()) 8277 return NewSetCC; 8278 } 8279 8280 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 8281 // these. 8282 if (Op1.getOpcode() == ISD::Constant && 8283 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 8284 cast<ConstantSDNode>(Op1)->isNullValue()) && 8285 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8286 8287 // If the input is a setcc, then reuse the input setcc or use a new one with 8288 // the inverted condition. 8289 if (Op0.getOpcode() == X86ISD::SETCC) { 8290 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 8291 bool Invert = (CC == ISD::SETNE) ^ 8292 cast<ConstantSDNode>(Op1)->isNullValue(); 8293 if (!Invert) return Op0; 8294 8295 CCode = X86::GetOppositeBranchCondition(CCode); 8296 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8297 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 8298 } 8299 } 8300 8301 bool isFP = Op1.getValueType().isFloatingPoint(); 8302 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 8303 if (X86CC == X86::COND_INVALID) 8304 return SDValue(); 8305 8306 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 8307 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8308 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 8309} 8310 8311// Lower256IntVETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 8312// ones, and then concatenate the result back. 8313static SDValue Lower256IntVETCC(SDValue Op, SelectionDAG &DAG) { 8314 EVT VT = Op.getValueType(); 8315 8316 assert(VT.getSizeInBits() == 256 && Op.getOpcode() == ISD::VSETCC && 8317 "Unsupported value type for operation"); 8318 8319 int NumElems = VT.getVectorNumElements(); 8320 DebugLoc dl = Op.getDebugLoc(); 8321 SDValue CC = Op.getOperand(2); 8322 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 8323 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 8324 8325 // Extract the LHS vectors 8326 SDValue LHS = Op.getOperand(0); 8327 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 8328 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 8329 8330 // Extract the RHS vectors 8331 SDValue RHS = Op.getOperand(1); 8332 SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); 8333 SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); 8334 8335 // Issue the operation on the smaller types and concatenate the result back 8336 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 8337 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 8338 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 8339 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 8340 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 8341} 8342 8343 8344SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 8345 SDValue Cond; 8346 SDValue Op0 = Op.getOperand(0); 8347 SDValue Op1 = Op.getOperand(1); 8348 SDValue CC = Op.getOperand(2); 8349 EVT VT = Op.getValueType(); 8350 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 8351 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 8352 DebugLoc dl = Op.getDebugLoc(); 8353 8354 if (isFP) { 8355 unsigned SSECC = 8; 8356 EVT EltVT = Op0.getValueType().getVectorElementType(); 8357 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 8358 8359 unsigned Opc = EltVT == MVT::f32 ? X86ISD::CMPPS : X86ISD::CMPPD; 8360 bool Swap = false; 8361 8362 switch (SetCCOpcode) { 8363 default: break; 8364 case ISD::SETOEQ: 8365 case ISD::SETEQ: SSECC = 0; break; 8366 case ISD::SETOGT: 8367 case ISD::SETGT: Swap = true; // Fallthrough 8368 case ISD::SETLT: 8369 case ISD::SETOLT: SSECC = 1; break; 8370 case ISD::SETOGE: 8371 case ISD::SETGE: Swap = true; // Fallthrough 8372 case ISD::SETLE: 8373 case ISD::SETOLE: SSECC = 2; break; 8374 case ISD::SETUO: SSECC = 3; break; 8375 case ISD::SETUNE: 8376 case ISD::SETNE: SSECC = 4; break; 8377 case ISD::SETULE: Swap = true; 8378 case ISD::SETUGE: SSECC = 5; break; 8379 case ISD::SETULT: Swap = true; 8380 case ISD::SETUGT: SSECC = 6; break; 8381 case ISD::SETO: SSECC = 7; break; 8382 } 8383 if (Swap) 8384 std::swap(Op0, Op1); 8385 8386 // In the two special cases we can't handle, emit two comparisons. 8387 if (SSECC == 8) { 8388 if (SetCCOpcode == ISD::SETUEQ) { 8389 SDValue UNORD, EQ; 8390 UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8)); 8391 EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8)); 8392 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); 8393 } 8394 else if (SetCCOpcode == ISD::SETONE) { 8395 SDValue ORD, NEQ; 8396 ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8)); 8397 NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8)); 8398 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ); 8399 } 8400 llvm_unreachable("Illegal FP comparison"); 8401 } 8402 // Handle all other FP comparisons here. 8403 return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8)); 8404 } 8405 8406 // Break 256-bit integer vector compare into smaller ones. 8407 if (!isFP && VT.getSizeInBits() == 256) 8408 return Lower256IntVETCC(Op, DAG); 8409 8410 // We are handling one of the integer comparisons here. Since SSE only has 8411 // GT and EQ comparisons for integer, swapping operands and multiple 8412 // operations may be required for some comparisons. 8413 unsigned Opc = 0, EQOpc = 0, GTOpc = 0; 8414 bool Swap = false, Invert = false, FlipSigns = false; 8415 8416 switch (VT.getSimpleVT().SimpleTy) { 8417 default: break; 8418 case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break; 8419 case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break; 8420 case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break; 8421 case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break; 8422 } 8423 8424 switch (SetCCOpcode) { 8425 default: break; 8426 case ISD::SETNE: Invert = true; 8427 case ISD::SETEQ: Opc = EQOpc; break; 8428 case ISD::SETLT: Swap = true; 8429 case ISD::SETGT: Opc = GTOpc; break; 8430 case ISD::SETGE: Swap = true; 8431 case ISD::SETLE: Opc = GTOpc; Invert = true; break; 8432 case ISD::SETULT: Swap = true; 8433 case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break; 8434 case ISD::SETUGE: Swap = true; 8435 case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break; 8436 } 8437 if (Swap) 8438 std::swap(Op0, Op1); 8439 8440 // Since SSE has no unsigned integer comparisons, we need to flip the sign 8441 // bits of the inputs before performing those operations. 8442 if (FlipSigns) { 8443 EVT EltVT = VT.getVectorElementType(); 8444 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 8445 EltVT); 8446 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 8447 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 8448 SignBits.size()); 8449 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 8450 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 8451 } 8452 8453 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 8454 8455 // If the logical-not of the result is required, perform that now. 8456 if (Invert) 8457 Result = DAG.getNOT(dl, Result, VT); 8458 8459 return Result; 8460} 8461 8462// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 8463static bool isX86LogicalCmp(SDValue Op) { 8464 unsigned Opc = Op.getNode()->getOpcode(); 8465 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) 8466 return true; 8467 if (Op.getResNo() == 1 && 8468 (Opc == X86ISD::ADD || 8469 Opc == X86ISD::SUB || 8470 Opc == X86ISD::ADC || 8471 Opc == X86ISD::SBB || 8472 Opc == X86ISD::SMUL || 8473 Opc == X86ISD::UMUL || 8474 Opc == X86ISD::INC || 8475 Opc == X86ISD::DEC || 8476 Opc == X86ISD::OR || 8477 Opc == X86ISD::XOR || 8478 Opc == X86ISD::AND)) 8479 return true; 8480 8481 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 8482 return true; 8483 8484 return false; 8485} 8486 8487static bool isZero(SDValue V) { 8488 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8489 return C && C->isNullValue(); 8490} 8491 8492static bool isAllOnes(SDValue V) { 8493 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8494 return C && C->isAllOnesValue(); 8495} 8496 8497SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 8498 bool addTest = true; 8499 SDValue Cond = Op.getOperand(0); 8500 SDValue Op1 = Op.getOperand(1); 8501 SDValue Op2 = Op.getOperand(2); 8502 DebugLoc DL = Op.getDebugLoc(); 8503 SDValue CC; 8504 8505 if (Cond.getOpcode() == ISD::SETCC) { 8506 SDValue NewCond = LowerSETCC(Cond, DAG); 8507 if (NewCond.getNode()) 8508 Cond = NewCond; 8509 } 8510 8511 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 8512 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 8513 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 8514 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 8515 if (Cond.getOpcode() == X86ISD::SETCC && 8516 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 8517 isZero(Cond.getOperand(1).getOperand(1))) { 8518 SDValue Cmp = Cond.getOperand(1); 8519 8520 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 8521 8522 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 8523 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 8524 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 8525 8526 SDValue CmpOp0 = Cmp.getOperand(0); 8527 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 8528 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 8529 8530 SDValue Res = // Res = 0 or -1. 8531 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8532 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 8533 8534 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 8535 Res = DAG.getNOT(DL, Res, Res.getValueType()); 8536 8537 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 8538 if (N2C == 0 || !N2C->isNullValue()) 8539 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 8540 return Res; 8541 } 8542 } 8543 8544 // Look past (and (setcc_carry (cmp ...)), 1). 8545 if (Cond.getOpcode() == ISD::AND && 8546 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8547 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8548 if (C && C->getAPIntValue() == 1) 8549 Cond = Cond.getOperand(0); 8550 } 8551 8552 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8553 // setting operand in place of the X86ISD::SETCC. 8554 if (Cond.getOpcode() == X86ISD::SETCC || 8555 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 8556 CC = Cond.getOperand(0); 8557 8558 SDValue Cmp = Cond.getOperand(1); 8559 unsigned Opc = Cmp.getOpcode(); 8560 EVT VT = Op.getValueType(); 8561 8562 bool IllegalFPCMov = false; 8563 if (VT.isFloatingPoint() && !VT.isVector() && 8564 !isScalarFPTypeInSSEReg(VT)) // FPStack? 8565 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 8566 8567 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 8568 Opc == X86ISD::BT) { // FIXME 8569 Cond = Cmp; 8570 addTest = false; 8571 } 8572 } 8573 8574 if (addTest) { 8575 // Look pass the truncate. 8576 if (Cond.getOpcode() == ISD::TRUNCATE) 8577 Cond = Cond.getOperand(0); 8578 8579 // We know the result of AND is compared against zero. Try to match 8580 // it to BT. 8581 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 8582 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 8583 if (NewSetCC.getNode()) { 8584 CC = NewSetCC.getOperand(0); 8585 Cond = NewSetCC.getOperand(1); 8586 addTest = false; 8587 } 8588 } 8589 } 8590 8591 if (addTest) { 8592 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8593 Cond = EmitTest(Cond, X86::COND_NE, DAG); 8594 } 8595 8596 // a < b ? -1 : 0 -> RES = ~setcc_carry 8597 // a < b ? 0 : -1 -> RES = setcc_carry 8598 // a >= b ? -1 : 0 -> RES = setcc_carry 8599 // a >= b ? 0 : -1 -> RES = ~setcc_carry 8600 if (Cond.getOpcode() == X86ISD::CMP) { 8601 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 8602 8603 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 8604 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 8605 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8606 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 8607 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 8608 return DAG.getNOT(DL, Res, Res.getValueType()); 8609 return Res; 8610 } 8611 } 8612 8613 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 8614 // condition is true. 8615 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 8616 SDValue Ops[] = { Op2, Op1, CC, Cond }; 8617 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 8618} 8619 8620// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 8621// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 8622// from the AND / OR. 8623static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 8624 Opc = Op.getOpcode(); 8625 if (Opc != ISD::OR && Opc != ISD::AND) 8626 return false; 8627 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8628 Op.getOperand(0).hasOneUse() && 8629 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 8630 Op.getOperand(1).hasOneUse()); 8631} 8632 8633// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 8634// 1 and that the SETCC node has a single use. 8635static bool isXor1OfSetCC(SDValue Op) { 8636 if (Op.getOpcode() != ISD::XOR) 8637 return false; 8638 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 8639 if (N1C && N1C->getAPIntValue() == 1) { 8640 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8641 Op.getOperand(0).hasOneUse(); 8642 } 8643 return false; 8644} 8645 8646SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 8647 bool addTest = true; 8648 SDValue Chain = Op.getOperand(0); 8649 SDValue Cond = Op.getOperand(1); 8650 SDValue Dest = Op.getOperand(2); 8651 DebugLoc dl = Op.getDebugLoc(); 8652 SDValue CC; 8653 8654 if (Cond.getOpcode() == ISD::SETCC) { 8655 SDValue NewCond = LowerSETCC(Cond, DAG); 8656 if (NewCond.getNode()) 8657 Cond = NewCond; 8658 } 8659#if 0 8660 // FIXME: LowerXALUO doesn't handle these!! 8661 else if (Cond.getOpcode() == X86ISD::ADD || 8662 Cond.getOpcode() == X86ISD::SUB || 8663 Cond.getOpcode() == X86ISD::SMUL || 8664 Cond.getOpcode() == X86ISD::UMUL) 8665 Cond = LowerXALUO(Cond, DAG); 8666#endif 8667 8668 // Look pass (and (setcc_carry (cmp ...)), 1). 8669 if (Cond.getOpcode() == ISD::AND && 8670 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8671 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8672 if (C && C->getAPIntValue() == 1) 8673 Cond = Cond.getOperand(0); 8674 } 8675 8676 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8677 // setting operand in place of the X86ISD::SETCC. 8678 if (Cond.getOpcode() == X86ISD::SETCC || 8679 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 8680 CC = Cond.getOperand(0); 8681 8682 SDValue Cmp = Cond.getOperand(1); 8683 unsigned Opc = Cmp.getOpcode(); 8684 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 8685 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 8686 Cond = Cmp; 8687 addTest = false; 8688 } else { 8689 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 8690 default: break; 8691 case X86::COND_O: 8692 case X86::COND_B: 8693 // These can only come from an arithmetic instruction with overflow, 8694 // e.g. SADDO, UADDO. 8695 Cond = Cond.getNode()->getOperand(1); 8696 addTest = false; 8697 break; 8698 } 8699 } 8700 } else { 8701 unsigned CondOpc; 8702 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 8703 SDValue Cmp = Cond.getOperand(0).getOperand(1); 8704 if (CondOpc == ISD::OR) { 8705 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 8706 // two branches instead of an explicit OR instruction with a 8707 // separate test. 8708 if (Cmp == Cond.getOperand(1).getOperand(1) && 8709 isX86LogicalCmp(Cmp)) { 8710 CC = Cond.getOperand(0).getOperand(0); 8711 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8712 Chain, Dest, CC, Cmp); 8713 CC = Cond.getOperand(1).getOperand(0); 8714 Cond = Cmp; 8715 addTest = false; 8716 } 8717 } else { // ISD::AND 8718 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 8719 // two branches instead of an explicit AND instruction with a 8720 // separate test. However, we only do this if this block doesn't 8721 // have a fall-through edge, because this requires an explicit 8722 // jmp when the condition is false. 8723 if (Cmp == Cond.getOperand(1).getOperand(1) && 8724 isX86LogicalCmp(Cmp) && 8725 Op.getNode()->hasOneUse()) { 8726 X86::CondCode CCode = 8727 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 8728 CCode = X86::GetOppositeBranchCondition(CCode); 8729 CC = DAG.getConstant(CCode, MVT::i8); 8730 SDNode *User = *Op.getNode()->use_begin(); 8731 // Look for an unconditional branch following this conditional branch. 8732 // We need this because we need to reverse the successors in order 8733 // to implement FCMP_OEQ. 8734 if (User->getOpcode() == ISD::BR) { 8735 SDValue FalseBB = User->getOperand(1); 8736 SDNode *NewBR = 8737 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 8738 assert(NewBR == User); 8739 (void)NewBR; 8740 Dest = FalseBB; 8741 8742 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8743 Chain, Dest, CC, Cmp); 8744 X86::CondCode CCode = 8745 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 8746 CCode = X86::GetOppositeBranchCondition(CCode); 8747 CC = DAG.getConstant(CCode, MVT::i8); 8748 Cond = Cmp; 8749 addTest = false; 8750 } 8751 } 8752 } 8753 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 8754 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 8755 // It should be transformed during dag combiner except when the condition 8756 // is set by a arithmetics with overflow node. 8757 X86::CondCode CCode = 8758 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 8759 CCode = X86::GetOppositeBranchCondition(CCode); 8760 CC = DAG.getConstant(CCode, MVT::i8); 8761 Cond = Cond.getOperand(0).getOperand(1); 8762 addTest = false; 8763 } 8764 } 8765 8766 if (addTest) { 8767 // Look pass the truncate. 8768 if (Cond.getOpcode() == ISD::TRUNCATE) 8769 Cond = Cond.getOperand(0); 8770 8771 // We know the result of AND is compared against zero. Try to match 8772 // it to BT. 8773 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 8774 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 8775 if (NewSetCC.getNode()) { 8776 CC = NewSetCC.getOperand(0); 8777 Cond = NewSetCC.getOperand(1); 8778 addTest = false; 8779 } 8780 } 8781 } 8782 8783 if (addTest) { 8784 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8785 Cond = EmitTest(Cond, X86::COND_NE, DAG); 8786 } 8787 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8788 Chain, Dest, CC, Cond); 8789} 8790 8791 8792// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 8793// Calls to _alloca is needed to probe the stack when allocating more than 4k 8794// bytes in one go. Touching the stack at 4K increments is necessary to ensure 8795// that the guard pages used by the OS virtual memory manager are allocated in 8796// correct sequence. 8797SDValue 8798X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 8799 SelectionDAG &DAG) const { 8800 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows()) && 8801 "This should be used only on Windows targets"); 8802 assert(!Subtarget->isTargetEnvMacho()); 8803 DebugLoc dl = Op.getDebugLoc(); 8804 8805 // Get the inputs. 8806 SDValue Chain = Op.getOperand(0); 8807 SDValue Size = Op.getOperand(1); 8808 // FIXME: Ensure alignment here 8809 8810 SDValue Flag; 8811 8812 EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 8813 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 8814 8815 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 8816 Flag = Chain.getValue(1); 8817 8818 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8819 8820 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 8821 Flag = Chain.getValue(1); 8822 8823 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 8824 8825 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 8826 return DAG.getMergeValues(Ops1, 2, dl); 8827} 8828 8829SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 8830 MachineFunction &MF = DAG.getMachineFunction(); 8831 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 8832 8833 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 8834 DebugLoc DL = Op.getDebugLoc(); 8835 8836 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 8837 // vastart just stores the address of the VarArgsFrameIndex slot into the 8838 // memory location argument. 8839 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 8840 getPointerTy()); 8841 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 8842 MachinePointerInfo(SV), false, false, 0); 8843 } 8844 8845 // __va_list_tag: 8846 // gp_offset (0 - 6 * 8) 8847 // fp_offset (48 - 48 + 8 * 16) 8848 // overflow_arg_area (point to parameters coming in memory). 8849 // reg_save_area 8850 SmallVector<SDValue, 8> MemOps; 8851 SDValue FIN = Op.getOperand(1); 8852 // Store gp_offset 8853 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 8854 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 8855 MVT::i32), 8856 FIN, MachinePointerInfo(SV), false, false, 0); 8857 MemOps.push_back(Store); 8858 8859 // Store fp_offset 8860 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 8861 FIN, DAG.getIntPtrConstant(4)); 8862 Store = DAG.getStore(Op.getOperand(0), DL, 8863 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 8864 MVT::i32), 8865 FIN, MachinePointerInfo(SV, 4), false, false, 0); 8866 MemOps.push_back(Store); 8867 8868 // Store ptr to overflow_arg_area 8869 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 8870 FIN, DAG.getIntPtrConstant(4)); 8871 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 8872 getPointerTy()); 8873 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 8874 MachinePointerInfo(SV, 8), 8875 false, false, 0); 8876 MemOps.push_back(Store); 8877 8878 // Store ptr to reg_save_area. 8879 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 8880 FIN, DAG.getIntPtrConstant(8)); 8881 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 8882 getPointerTy()); 8883 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 8884 MachinePointerInfo(SV, 16), false, false, 0); 8885 MemOps.push_back(Store); 8886 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 8887 &MemOps[0], MemOps.size()); 8888} 8889 8890SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 8891 assert(Subtarget->is64Bit() && 8892 "LowerVAARG only handles 64-bit va_arg!"); 8893 assert((Subtarget->isTargetLinux() || 8894 Subtarget->isTargetDarwin()) && 8895 "Unhandled target in LowerVAARG"); 8896 assert(Op.getNode()->getNumOperands() == 4); 8897 SDValue Chain = Op.getOperand(0); 8898 SDValue SrcPtr = Op.getOperand(1); 8899 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 8900 unsigned Align = Op.getConstantOperandVal(3); 8901 DebugLoc dl = Op.getDebugLoc(); 8902 8903 EVT ArgVT = Op.getNode()->getValueType(0); 8904 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 8905 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); 8906 uint8_t ArgMode; 8907 8908 // Decide which area this value should be read from. 8909 // TODO: Implement the AMD64 ABI in its entirety. This simple 8910 // selection mechanism works only for the basic types. 8911 if (ArgVT == MVT::f80) { 8912 llvm_unreachable("va_arg for f80 not yet implemented"); 8913 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 8914 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 8915 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 8916 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 8917 } else { 8918 llvm_unreachable("Unhandled argument type in LowerVAARG"); 8919 } 8920 8921 if (ArgMode == 2) { 8922 // Sanity Check: Make sure using fp_offset makes sense. 8923 assert(!UseSoftFloat && 8924 !(DAG.getMachineFunction() 8925 .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && 8926 Subtarget->hasXMM()); 8927 } 8928 8929 // Insert VAARG_64 node into the DAG 8930 // VAARG_64 returns two values: Variable Argument Address, Chain 8931 SmallVector<SDValue, 11> InstOps; 8932 InstOps.push_back(Chain); 8933 InstOps.push_back(SrcPtr); 8934 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 8935 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 8936 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 8937 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 8938 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 8939 VTs, &InstOps[0], InstOps.size(), 8940 MVT::i64, 8941 MachinePointerInfo(SV), 8942 /*Align=*/0, 8943 /*Volatile=*/false, 8944 /*ReadMem=*/true, 8945 /*WriteMem=*/true); 8946 Chain = VAARG.getValue(1); 8947 8948 // Load the next argument and return it 8949 return DAG.getLoad(ArgVT, dl, 8950 Chain, 8951 VAARG, 8952 MachinePointerInfo(), 8953 false, false, 0); 8954} 8955 8956SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 8957 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 8958 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 8959 SDValue Chain = Op.getOperand(0); 8960 SDValue DstPtr = Op.getOperand(1); 8961 SDValue SrcPtr = Op.getOperand(2); 8962 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 8963 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 8964 DebugLoc DL = Op.getDebugLoc(); 8965 8966 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 8967 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 8968 false, 8969 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 8970} 8971 8972SDValue 8973X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 8974 DebugLoc dl = Op.getDebugLoc(); 8975 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8976 switch (IntNo) { 8977 default: return SDValue(); // Don't custom lower most intrinsics. 8978 // Comparison intrinsics. 8979 case Intrinsic::x86_sse_comieq_ss: 8980 case Intrinsic::x86_sse_comilt_ss: 8981 case Intrinsic::x86_sse_comile_ss: 8982 case Intrinsic::x86_sse_comigt_ss: 8983 case Intrinsic::x86_sse_comige_ss: 8984 case Intrinsic::x86_sse_comineq_ss: 8985 case Intrinsic::x86_sse_ucomieq_ss: 8986 case Intrinsic::x86_sse_ucomilt_ss: 8987 case Intrinsic::x86_sse_ucomile_ss: 8988 case Intrinsic::x86_sse_ucomigt_ss: 8989 case Intrinsic::x86_sse_ucomige_ss: 8990 case Intrinsic::x86_sse_ucomineq_ss: 8991 case Intrinsic::x86_sse2_comieq_sd: 8992 case Intrinsic::x86_sse2_comilt_sd: 8993 case Intrinsic::x86_sse2_comile_sd: 8994 case Intrinsic::x86_sse2_comigt_sd: 8995 case Intrinsic::x86_sse2_comige_sd: 8996 case Intrinsic::x86_sse2_comineq_sd: 8997 case Intrinsic::x86_sse2_ucomieq_sd: 8998 case Intrinsic::x86_sse2_ucomilt_sd: 8999 case Intrinsic::x86_sse2_ucomile_sd: 9000 case Intrinsic::x86_sse2_ucomigt_sd: 9001 case Intrinsic::x86_sse2_ucomige_sd: 9002 case Intrinsic::x86_sse2_ucomineq_sd: { 9003 unsigned Opc = 0; 9004 ISD::CondCode CC = ISD::SETCC_INVALID; 9005 switch (IntNo) { 9006 default: break; 9007 case Intrinsic::x86_sse_comieq_ss: 9008 case Intrinsic::x86_sse2_comieq_sd: 9009 Opc = X86ISD::COMI; 9010 CC = ISD::SETEQ; 9011 break; 9012 case Intrinsic::x86_sse_comilt_ss: 9013 case Intrinsic::x86_sse2_comilt_sd: 9014 Opc = X86ISD::COMI; 9015 CC = ISD::SETLT; 9016 break; 9017 case Intrinsic::x86_sse_comile_ss: 9018 case Intrinsic::x86_sse2_comile_sd: 9019 Opc = X86ISD::COMI; 9020 CC = ISD::SETLE; 9021 break; 9022 case Intrinsic::x86_sse_comigt_ss: 9023 case Intrinsic::x86_sse2_comigt_sd: 9024 Opc = X86ISD::COMI; 9025 CC = ISD::SETGT; 9026 break; 9027 case Intrinsic::x86_sse_comige_ss: 9028 case Intrinsic::x86_sse2_comige_sd: 9029 Opc = X86ISD::COMI; 9030 CC = ISD::SETGE; 9031 break; 9032 case Intrinsic::x86_sse_comineq_ss: 9033 case Intrinsic::x86_sse2_comineq_sd: 9034 Opc = X86ISD::COMI; 9035 CC = ISD::SETNE; 9036 break; 9037 case Intrinsic::x86_sse_ucomieq_ss: 9038 case Intrinsic::x86_sse2_ucomieq_sd: 9039 Opc = X86ISD::UCOMI; 9040 CC = ISD::SETEQ; 9041 break; 9042 case Intrinsic::x86_sse_ucomilt_ss: 9043 case Intrinsic::x86_sse2_ucomilt_sd: 9044 Opc = X86ISD::UCOMI; 9045 CC = ISD::SETLT; 9046 break; 9047 case Intrinsic::x86_sse_ucomile_ss: 9048 case Intrinsic::x86_sse2_ucomile_sd: 9049 Opc = X86ISD::UCOMI; 9050 CC = ISD::SETLE; 9051 break; 9052 case Intrinsic::x86_sse_ucomigt_ss: 9053 case Intrinsic::x86_sse2_ucomigt_sd: 9054 Opc = X86ISD::UCOMI; 9055 CC = ISD::SETGT; 9056 break; 9057 case Intrinsic::x86_sse_ucomige_ss: 9058 case Intrinsic::x86_sse2_ucomige_sd: 9059 Opc = X86ISD::UCOMI; 9060 CC = ISD::SETGE; 9061 break; 9062 case Intrinsic::x86_sse_ucomineq_ss: 9063 case Intrinsic::x86_sse2_ucomineq_sd: 9064 Opc = X86ISD::UCOMI; 9065 CC = ISD::SETNE; 9066 break; 9067 } 9068 9069 SDValue LHS = Op.getOperand(1); 9070 SDValue RHS = Op.getOperand(2); 9071 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 9072 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 9073 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 9074 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9075 DAG.getConstant(X86CC, MVT::i8), Cond); 9076 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9077 } 9078 // ptest and testp intrinsics. The intrinsic these come from are designed to 9079 // return an integer value, not just an instruction so lower it to the ptest 9080 // or testp pattern and a setcc for the result. 9081 case Intrinsic::x86_sse41_ptestz: 9082 case Intrinsic::x86_sse41_ptestc: 9083 case Intrinsic::x86_sse41_ptestnzc: 9084 case Intrinsic::x86_avx_ptestz_256: 9085 case Intrinsic::x86_avx_ptestc_256: 9086 case Intrinsic::x86_avx_ptestnzc_256: 9087 case Intrinsic::x86_avx_vtestz_ps: 9088 case Intrinsic::x86_avx_vtestc_ps: 9089 case Intrinsic::x86_avx_vtestnzc_ps: 9090 case Intrinsic::x86_avx_vtestz_pd: 9091 case Intrinsic::x86_avx_vtestc_pd: 9092 case Intrinsic::x86_avx_vtestnzc_pd: 9093 case Intrinsic::x86_avx_vtestz_ps_256: 9094 case Intrinsic::x86_avx_vtestc_ps_256: 9095 case Intrinsic::x86_avx_vtestnzc_ps_256: 9096 case Intrinsic::x86_avx_vtestz_pd_256: 9097 case Intrinsic::x86_avx_vtestc_pd_256: 9098 case Intrinsic::x86_avx_vtestnzc_pd_256: { 9099 bool IsTestPacked = false; 9100 unsigned X86CC = 0; 9101 switch (IntNo) { 9102 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 9103 case Intrinsic::x86_avx_vtestz_ps: 9104 case Intrinsic::x86_avx_vtestz_pd: 9105 case Intrinsic::x86_avx_vtestz_ps_256: 9106 case Intrinsic::x86_avx_vtestz_pd_256: 9107 IsTestPacked = true; // Fallthrough 9108 case Intrinsic::x86_sse41_ptestz: 9109 case Intrinsic::x86_avx_ptestz_256: 9110 // ZF = 1 9111 X86CC = X86::COND_E; 9112 break; 9113 case Intrinsic::x86_avx_vtestc_ps: 9114 case Intrinsic::x86_avx_vtestc_pd: 9115 case Intrinsic::x86_avx_vtestc_ps_256: 9116 case Intrinsic::x86_avx_vtestc_pd_256: 9117 IsTestPacked = true; // Fallthrough 9118 case Intrinsic::x86_sse41_ptestc: 9119 case Intrinsic::x86_avx_ptestc_256: 9120 // CF = 1 9121 X86CC = X86::COND_B; 9122 break; 9123 case Intrinsic::x86_avx_vtestnzc_ps: 9124 case Intrinsic::x86_avx_vtestnzc_pd: 9125 case Intrinsic::x86_avx_vtestnzc_ps_256: 9126 case Intrinsic::x86_avx_vtestnzc_pd_256: 9127 IsTestPacked = true; // Fallthrough 9128 case Intrinsic::x86_sse41_ptestnzc: 9129 case Intrinsic::x86_avx_ptestnzc_256: 9130 // ZF and CF = 0 9131 X86CC = X86::COND_A; 9132 break; 9133 } 9134 9135 SDValue LHS = Op.getOperand(1); 9136 SDValue RHS = Op.getOperand(2); 9137 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 9138 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 9139 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 9140 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 9141 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9142 } 9143 9144 // Fix vector shift instructions where the last operand is a non-immediate 9145 // i32 value. 9146 case Intrinsic::x86_sse2_pslli_w: 9147 case Intrinsic::x86_sse2_pslli_d: 9148 case Intrinsic::x86_sse2_pslli_q: 9149 case Intrinsic::x86_sse2_psrli_w: 9150 case Intrinsic::x86_sse2_psrli_d: 9151 case Intrinsic::x86_sse2_psrli_q: 9152 case Intrinsic::x86_sse2_psrai_w: 9153 case Intrinsic::x86_sse2_psrai_d: 9154 case Intrinsic::x86_mmx_pslli_w: 9155 case Intrinsic::x86_mmx_pslli_d: 9156 case Intrinsic::x86_mmx_pslli_q: 9157 case Intrinsic::x86_mmx_psrli_w: 9158 case Intrinsic::x86_mmx_psrli_d: 9159 case Intrinsic::x86_mmx_psrli_q: 9160 case Intrinsic::x86_mmx_psrai_w: 9161 case Intrinsic::x86_mmx_psrai_d: { 9162 SDValue ShAmt = Op.getOperand(2); 9163 if (isa<ConstantSDNode>(ShAmt)) 9164 return SDValue(); 9165 9166 unsigned NewIntNo = 0; 9167 EVT ShAmtVT = MVT::v4i32; 9168 switch (IntNo) { 9169 case Intrinsic::x86_sse2_pslli_w: 9170 NewIntNo = Intrinsic::x86_sse2_psll_w; 9171 break; 9172 case Intrinsic::x86_sse2_pslli_d: 9173 NewIntNo = Intrinsic::x86_sse2_psll_d; 9174 break; 9175 case Intrinsic::x86_sse2_pslli_q: 9176 NewIntNo = Intrinsic::x86_sse2_psll_q; 9177 break; 9178 case Intrinsic::x86_sse2_psrli_w: 9179 NewIntNo = Intrinsic::x86_sse2_psrl_w; 9180 break; 9181 case Intrinsic::x86_sse2_psrli_d: 9182 NewIntNo = Intrinsic::x86_sse2_psrl_d; 9183 break; 9184 case Intrinsic::x86_sse2_psrli_q: 9185 NewIntNo = Intrinsic::x86_sse2_psrl_q; 9186 break; 9187 case Intrinsic::x86_sse2_psrai_w: 9188 NewIntNo = Intrinsic::x86_sse2_psra_w; 9189 break; 9190 case Intrinsic::x86_sse2_psrai_d: 9191 NewIntNo = Intrinsic::x86_sse2_psra_d; 9192 break; 9193 default: { 9194 ShAmtVT = MVT::v2i32; 9195 switch (IntNo) { 9196 case Intrinsic::x86_mmx_pslli_w: 9197 NewIntNo = Intrinsic::x86_mmx_psll_w; 9198 break; 9199 case Intrinsic::x86_mmx_pslli_d: 9200 NewIntNo = Intrinsic::x86_mmx_psll_d; 9201 break; 9202 case Intrinsic::x86_mmx_pslli_q: 9203 NewIntNo = Intrinsic::x86_mmx_psll_q; 9204 break; 9205 case Intrinsic::x86_mmx_psrli_w: 9206 NewIntNo = Intrinsic::x86_mmx_psrl_w; 9207 break; 9208 case Intrinsic::x86_mmx_psrli_d: 9209 NewIntNo = Intrinsic::x86_mmx_psrl_d; 9210 break; 9211 case Intrinsic::x86_mmx_psrli_q: 9212 NewIntNo = Intrinsic::x86_mmx_psrl_q; 9213 break; 9214 case Intrinsic::x86_mmx_psrai_w: 9215 NewIntNo = Intrinsic::x86_mmx_psra_w; 9216 break; 9217 case Intrinsic::x86_mmx_psrai_d: 9218 NewIntNo = Intrinsic::x86_mmx_psra_d; 9219 break; 9220 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9221 } 9222 break; 9223 } 9224 } 9225 9226 // The vector shift intrinsics with scalars uses 32b shift amounts but 9227 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 9228 // to be zero. 9229 SDValue ShOps[4]; 9230 ShOps[0] = ShAmt; 9231 ShOps[1] = DAG.getConstant(0, MVT::i32); 9232 if (ShAmtVT == MVT::v4i32) { 9233 ShOps[2] = DAG.getUNDEF(MVT::i32); 9234 ShOps[3] = DAG.getUNDEF(MVT::i32); 9235 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4); 9236 } else { 9237 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2); 9238// FIXME this must be lowered to get rid of the invalid type. 9239 } 9240 9241 EVT VT = Op.getValueType(); 9242 ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); 9243 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9244 DAG.getConstant(NewIntNo, MVT::i32), 9245 Op.getOperand(1), ShAmt); 9246 } 9247 } 9248} 9249 9250SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 9251 SelectionDAG &DAG) const { 9252 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9253 MFI->setReturnAddressIsTaken(true); 9254 9255 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9256 DebugLoc dl = Op.getDebugLoc(); 9257 9258 if (Depth > 0) { 9259 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 9260 SDValue Offset = 9261 DAG.getConstant(TD->getPointerSize(), 9262 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 9263 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9264 DAG.getNode(ISD::ADD, dl, getPointerTy(), 9265 FrameAddr, Offset), 9266 MachinePointerInfo(), false, false, 0); 9267 } 9268 9269 // Just load the return address. 9270 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 9271 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9272 RetAddrFI, MachinePointerInfo(), false, false, 0); 9273} 9274 9275SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 9276 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9277 MFI->setFrameAddressIsTaken(true); 9278 9279 EVT VT = Op.getValueType(); 9280 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 9281 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9282 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 9283 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 9284 while (Depth--) 9285 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 9286 MachinePointerInfo(), 9287 false, false, 0); 9288 return FrameAddr; 9289} 9290 9291SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 9292 SelectionDAG &DAG) const { 9293 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 9294} 9295 9296SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 9297 MachineFunction &MF = DAG.getMachineFunction(); 9298 SDValue Chain = Op.getOperand(0); 9299 SDValue Offset = Op.getOperand(1); 9300 SDValue Handler = Op.getOperand(2); 9301 DebugLoc dl = Op.getDebugLoc(); 9302 9303 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 9304 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 9305 getPointerTy()); 9306 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 9307 9308 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 9309 DAG.getIntPtrConstant(TD->getPointerSize())); 9310 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 9311 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 9312 false, false, 0); 9313 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 9314 MF.getRegInfo().addLiveOut(StoreAddrReg); 9315 9316 return DAG.getNode(X86ISD::EH_RETURN, dl, 9317 MVT::Other, 9318 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 9319} 9320 9321SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op, 9322 SelectionDAG &DAG) const { 9323 SDValue Root = Op.getOperand(0); 9324 SDValue Trmp = Op.getOperand(1); // trampoline 9325 SDValue FPtr = Op.getOperand(2); // nested function 9326 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 9327 DebugLoc dl = Op.getDebugLoc(); 9328 9329 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9330 9331 if (Subtarget->is64Bit()) { 9332 SDValue OutChains[6]; 9333 9334 // Large code-model. 9335 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 9336 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 9337 9338 const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10); 9339 const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11); 9340 9341 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 9342 9343 // Load the pointer to the nested function into R11. 9344 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 9345 SDValue Addr = Trmp; 9346 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9347 Addr, MachinePointerInfo(TrmpAddr), 9348 false, false, 0); 9349 9350 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9351 DAG.getConstant(2, MVT::i64)); 9352 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 9353 MachinePointerInfo(TrmpAddr, 2), 9354 false, false, 2); 9355 9356 // Load the 'nest' parameter value into R10. 9357 // R10 is specified in X86CallingConv.td 9358 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 9359 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9360 DAG.getConstant(10, MVT::i64)); 9361 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9362 Addr, MachinePointerInfo(TrmpAddr, 10), 9363 false, false, 0); 9364 9365 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9366 DAG.getConstant(12, MVT::i64)); 9367 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 9368 MachinePointerInfo(TrmpAddr, 12), 9369 false, false, 2); 9370 9371 // Jump to the nested function. 9372 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 9373 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9374 DAG.getConstant(20, MVT::i64)); 9375 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9376 Addr, MachinePointerInfo(TrmpAddr, 20), 9377 false, false, 0); 9378 9379 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 9380 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9381 DAG.getConstant(22, MVT::i64)); 9382 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 9383 MachinePointerInfo(TrmpAddr, 22), 9384 false, false, 0); 9385 9386 SDValue Ops[] = 9387 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) }; 9388 return DAG.getMergeValues(Ops, 2, dl); 9389 } else { 9390 const Function *Func = 9391 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 9392 CallingConv::ID CC = Func->getCallingConv(); 9393 unsigned NestReg; 9394 9395 switch (CC) { 9396 default: 9397 llvm_unreachable("Unsupported calling convention"); 9398 case CallingConv::C: 9399 case CallingConv::X86_StdCall: { 9400 // Pass 'nest' parameter in ECX. 9401 // Must be kept in sync with X86CallingConv.td 9402 NestReg = X86::ECX; 9403 9404 // Check that ECX wasn't needed by an 'inreg' parameter. 9405 FunctionType *FTy = Func->getFunctionType(); 9406 const AttrListPtr &Attrs = Func->getAttributes(); 9407 9408 if (!Attrs.isEmpty() && !Func->isVarArg()) { 9409 unsigned InRegCount = 0; 9410 unsigned Idx = 1; 9411 9412 for (FunctionType::param_iterator I = FTy->param_begin(), 9413 E = FTy->param_end(); I != E; ++I, ++Idx) 9414 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 9415 // FIXME: should only count parameters that are lowered to integers. 9416 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 9417 9418 if (InRegCount > 2) { 9419 report_fatal_error("Nest register in use - reduce number of inreg" 9420 " parameters!"); 9421 } 9422 } 9423 break; 9424 } 9425 case CallingConv::X86_FastCall: 9426 case CallingConv::X86_ThisCall: 9427 case CallingConv::Fast: 9428 // Pass 'nest' parameter in EAX. 9429 // Must be kept in sync with X86CallingConv.td 9430 NestReg = X86::EAX; 9431 break; 9432 } 9433 9434 SDValue OutChains[4]; 9435 SDValue Addr, Disp; 9436 9437 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9438 DAG.getConstant(10, MVT::i32)); 9439 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 9440 9441 // This is storing the opcode for MOV32ri. 9442 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 9443 const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg); 9444 OutChains[0] = DAG.getStore(Root, dl, 9445 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 9446 Trmp, MachinePointerInfo(TrmpAddr), 9447 false, false, 0); 9448 9449 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9450 DAG.getConstant(1, MVT::i32)); 9451 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 9452 MachinePointerInfo(TrmpAddr, 1), 9453 false, false, 1); 9454 9455 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 9456 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9457 DAG.getConstant(5, MVT::i32)); 9458 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 9459 MachinePointerInfo(TrmpAddr, 5), 9460 false, false, 1); 9461 9462 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9463 DAG.getConstant(6, MVT::i32)); 9464 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 9465 MachinePointerInfo(TrmpAddr, 6), 9466 false, false, 1); 9467 9468 SDValue Ops[] = 9469 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4) }; 9470 return DAG.getMergeValues(Ops, 2, dl); 9471 } 9472} 9473 9474SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 9475 SelectionDAG &DAG) const { 9476 /* 9477 The rounding mode is in bits 11:10 of FPSR, and has the following 9478 settings: 9479 00 Round to nearest 9480 01 Round to -inf 9481 10 Round to +inf 9482 11 Round to 0 9483 9484 FLT_ROUNDS, on the other hand, expects the following: 9485 -1 Undefined 9486 0 Round to 0 9487 1 Round to nearest 9488 2 Round to +inf 9489 3 Round to -inf 9490 9491 To perform the conversion, we do: 9492 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 9493 */ 9494 9495 MachineFunction &MF = DAG.getMachineFunction(); 9496 const TargetMachine &TM = MF.getTarget(); 9497 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 9498 unsigned StackAlignment = TFI.getStackAlignment(); 9499 EVT VT = Op.getValueType(); 9500 DebugLoc DL = Op.getDebugLoc(); 9501 9502 // Save FP Control Word to stack slot 9503 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 9504 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 9505 9506 9507 MachineMemOperand *MMO = 9508 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 9509 MachineMemOperand::MOStore, 2, 2); 9510 9511 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 9512 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 9513 DAG.getVTList(MVT::Other), 9514 Ops, 2, MVT::i16, MMO); 9515 9516 // Load FP Control Word from stack slot 9517 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 9518 MachinePointerInfo(), false, false, 0); 9519 9520 // Transform as necessary 9521 SDValue CWD1 = 9522 DAG.getNode(ISD::SRL, DL, MVT::i16, 9523 DAG.getNode(ISD::AND, DL, MVT::i16, 9524 CWD, DAG.getConstant(0x800, MVT::i16)), 9525 DAG.getConstant(11, MVT::i8)); 9526 SDValue CWD2 = 9527 DAG.getNode(ISD::SRL, DL, MVT::i16, 9528 DAG.getNode(ISD::AND, DL, MVT::i16, 9529 CWD, DAG.getConstant(0x400, MVT::i16)), 9530 DAG.getConstant(9, MVT::i8)); 9531 9532 SDValue RetVal = 9533 DAG.getNode(ISD::AND, DL, MVT::i16, 9534 DAG.getNode(ISD::ADD, DL, MVT::i16, 9535 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 9536 DAG.getConstant(1, MVT::i16)), 9537 DAG.getConstant(3, MVT::i16)); 9538 9539 9540 return DAG.getNode((VT.getSizeInBits() < 16 ? 9541 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 9542} 9543 9544SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 9545 EVT VT = Op.getValueType(); 9546 EVT OpVT = VT; 9547 unsigned NumBits = VT.getSizeInBits(); 9548 DebugLoc dl = Op.getDebugLoc(); 9549 9550 Op = Op.getOperand(0); 9551 if (VT == MVT::i8) { 9552 // Zero extend to i32 since there is not an i8 bsr. 9553 OpVT = MVT::i32; 9554 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 9555 } 9556 9557 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 9558 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 9559 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 9560 9561 // If src is zero (i.e. bsr sets ZF), returns NumBits. 9562 SDValue Ops[] = { 9563 Op, 9564 DAG.getConstant(NumBits+NumBits-1, OpVT), 9565 DAG.getConstant(X86::COND_E, MVT::i8), 9566 Op.getValue(1) 9567 }; 9568 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 9569 9570 // Finally xor with NumBits-1. 9571 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 9572 9573 if (VT == MVT::i8) 9574 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 9575 return Op; 9576} 9577 9578SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { 9579 EVT VT = Op.getValueType(); 9580 EVT OpVT = VT; 9581 unsigned NumBits = VT.getSizeInBits(); 9582 DebugLoc dl = Op.getDebugLoc(); 9583 9584 Op = Op.getOperand(0); 9585 if (VT == MVT::i8) { 9586 OpVT = MVT::i32; 9587 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 9588 } 9589 9590 // Issue a bsf (scan bits forward) which also sets EFLAGS. 9591 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 9592 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 9593 9594 // If src is zero (i.e. bsf sets ZF), returns NumBits. 9595 SDValue Ops[] = { 9596 Op, 9597 DAG.getConstant(NumBits, OpVT), 9598 DAG.getConstant(X86::COND_E, MVT::i8), 9599 Op.getValue(1) 9600 }; 9601 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 9602 9603 if (VT == MVT::i8) 9604 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 9605 return Op; 9606} 9607 9608// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 9609// ones, and then concatenate the result back. 9610static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 9611 EVT VT = Op.getValueType(); 9612 9613 assert(VT.getSizeInBits() == 256 && VT.isInteger() && 9614 "Unsupported value type for operation"); 9615 9616 int NumElems = VT.getVectorNumElements(); 9617 DebugLoc dl = Op.getDebugLoc(); 9618 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 9619 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 9620 9621 // Extract the LHS vectors 9622 SDValue LHS = Op.getOperand(0); 9623 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 9624 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 9625 9626 // Extract the RHS vectors 9627 SDValue RHS = Op.getOperand(1); 9628 SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); 9629 SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); 9630 9631 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9632 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9633 9634 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9635 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 9636 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 9637} 9638 9639SDValue X86TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const { 9640 assert(Op.getValueType().getSizeInBits() == 256 && 9641 Op.getValueType().isInteger() && 9642 "Only handle AVX 256-bit vector integer operation"); 9643 return Lower256IntArith(Op, DAG); 9644} 9645 9646SDValue X86TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const { 9647 assert(Op.getValueType().getSizeInBits() == 256 && 9648 Op.getValueType().isInteger() && 9649 "Only handle AVX 256-bit vector integer operation"); 9650 return Lower256IntArith(Op, DAG); 9651} 9652 9653SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9654 EVT VT = Op.getValueType(); 9655 9656 // Decompose 256-bit ops into smaller 128-bit ops. 9657 if (VT.getSizeInBits() == 256) 9658 return Lower256IntArith(Op, DAG); 9659 9660 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply"); 9661 DebugLoc dl = Op.getDebugLoc(); 9662 9663 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32); 9664 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32); 9665 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b ); 9666 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi ); 9667 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b ); 9668 // 9669 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 ); 9670 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 ); 9671 // return AloBlo + AloBhi + AhiBlo; 9672 9673 SDValue A = Op.getOperand(0); 9674 SDValue B = Op.getOperand(1); 9675 9676 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9677 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 9678 A, DAG.getConstant(32, MVT::i32)); 9679 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9680 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 9681 B, DAG.getConstant(32, MVT::i32)); 9682 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9683 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 9684 A, B); 9685 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9686 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 9687 A, Bhi); 9688 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9689 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 9690 Ahi, B); 9691 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9692 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 9693 AloBhi, DAG.getConstant(32, MVT::i32)); 9694 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9695 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 9696 AhiBlo, DAG.getConstant(32, MVT::i32)); 9697 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 9698 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 9699 return Res; 9700} 9701 9702SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 9703 9704 EVT VT = Op.getValueType(); 9705 DebugLoc dl = Op.getDebugLoc(); 9706 SDValue R = Op.getOperand(0); 9707 SDValue Amt = Op.getOperand(1); 9708 LLVMContext *Context = DAG.getContext(); 9709 9710 if (!(Subtarget->hasSSE2() || Subtarget->hasAVX())) 9711 return SDValue(); 9712 9713 // Decompose 256-bit shifts into smaller 128-bit shifts. 9714 if (VT.getSizeInBits() == 256) { 9715 int NumElems = VT.getVectorNumElements(); 9716 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9717 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9718 9719 // Extract the two vectors 9720 SDValue V1 = Extract128BitVector(R, DAG.getConstant(0, MVT::i32), DAG, dl); 9721 SDValue V2 = Extract128BitVector(R, DAG.getConstant(NumElems/2, MVT::i32), 9722 DAG, dl); 9723 9724 // Recreate the shift amount vectors 9725 SDValue Amt1, Amt2; 9726 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 9727 // Constant shift amount 9728 SmallVector<SDValue, 4> Amt1Csts; 9729 SmallVector<SDValue, 4> Amt2Csts; 9730 for (int i = 0; i < NumElems/2; ++i) 9731 Amt1Csts.push_back(Amt->getOperand(i)); 9732 for (int i = NumElems/2; i < NumElems; ++i) 9733 Amt2Csts.push_back(Amt->getOperand(i)); 9734 9735 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 9736 &Amt1Csts[0], NumElems/2); 9737 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 9738 &Amt2Csts[0], NumElems/2); 9739 } else { 9740 // Variable shift amount 9741 Amt1 = Extract128BitVector(Amt, DAG.getConstant(0, MVT::i32), DAG, dl); 9742 Amt2 = Extract128BitVector(Amt, DAG.getConstant(NumElems/2, MVT::i32), 9743 DAG, dl); 9744 } 9745 9746 // Issue new vector shifts for the smaller types 9747 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 9748 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 9749 9750 // Concatenate the result back 9751 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 9752 } 9753 9754 // Optimize shl/srl/sra with constant shift amount. 9755 if (isSplatVector(Amt.getNode())) { 9756 SDValue SclrAmt = Amt->getOperand(0); 9757 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 9758 uint64_t ShiftAmt = C->getZExtValue(); 9759 9760 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SHL) 9761 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9762 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 9763 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9764 9765 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SHL) 9766 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9767 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 9768 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9769 9770 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SHL) 9771 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9772 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 9773 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9774 9775 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SRL) 9776 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9777 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 9778 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9779 9780 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRL) 9781 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9782 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 9783 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9784 9785 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRL) 9786 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9787 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 9788 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9789 9790 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRA) 9791 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9792 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 9793 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9794 9795 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRA) 9796 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9797 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 9798 R, DAG.getConstant(ShiftAmt, MVT::i32)); 9799 } 9800 } 9801 9802 // Lower SHL with variable shift amount. 9803 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 9804 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9805 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 9806 Op.getOperand(1), DAG.getConstant(23, MVT::i32)); 9807 9808 ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); 9809 9810 std::vector<Constant*> CV(4, CI); 9811 Constant *C = ConstantVector::get(CV); 9812 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 9813 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 9814 MachinePointerInfo::getConstantPool(), 9815 false, false, 16); 9816 9817 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 9818 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 9819 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 9820 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 9821 } 9822 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 9823 // a = a << 5; 9824 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9825 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 9826 Op.getOperand(1), DAG.getConstant(5, MVT::i32)); 9827 9828 ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15)); 9829 ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63)); 9830 9831 std::vector<Constant*> CVM1(16, CM1); 9832 std::vector<Constant*> CVM2(16, CM2); 9833 Constant *C = ConstantVector::get(CVM1); 9834 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 9835 SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 9836 MachinePointerInfo::getConstantPool(), 9837 false, false, 16); 9838 9839 // r = pblendv(r, psllw(r & (char16)15, 4), a); 9840 M = DAG.getNode(ISD::AND, dl, VT, R, M); 9841 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9842 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 9843 DAG.getConstant(4, MVT::i32)); 9844 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op); 9845 // a += a 9846 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 9847 9848 C = ConstantVector::get(CVM2); 9849 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 9850 M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 9851 MachinePointerInfo::getConstantPool(), 9852 false, false, 16); 9853 9854 // r = pblendv(r, psllw(r & (char16)63, 2), a); 9855 M = DAG.getNode(ISD::AND, dl, VT, R, M); 9856 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9857 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 9858 DAG.getConstant(2, MVT::i32)); 9859 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op); 9860 // a += a 9861 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 9862 9863 // return pblendv(r, r+r, a); 9864 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, 9865 R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op); 9866 return R; 9867 } 9868 return SDValue(); 9869} 9870 9871SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 9872 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 9873 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 9874 // looks for this combo and may remove the "setcc" instruction if the "setcc" 9875 // has only one use. 9876 SDNode *N = Op.getNode(); 9877 SDValue LHS = N->getOperand(0); 9878 SDValue RHS = N->getOperand(1); 9879 unsigned BaseOp = 0; 9880 unsigned Cond = 0; 9881 DebugLoc DL = Op.getDebugLoc(); 9882 switch (Op.getOpcode()) { 9883 default: llvm_unreachable("Unknown ovf instruction!"); 9884 case ISD::SADDO: 9885 // A subtract of one will be selected as a INC. Note that INC doesn't 9886 // set CF, so we can't do this for UADDO. 9887 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 9888 if (C->isOne()) { 9889 BaseOp = X86ISD::INC; 9890 Cond = X86::COND_O; 9891 break; 9892 } 9893 BaseOp = X86ISD::ADD; 9894 Cond = X86::COND_O; 9895 break; 9896 case ISD::UADDO: 9897 BaseOp = X86ISD::ADD; 9898 Cond = X86::COND_B; 9899 break; 9900 case ISD::SSUBO: 9901 // A subtract of one will be selected as a DEC. Note that DEC doesn't 9902 // set CF, so we can't do this for USUBO. 9903 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 9904 if (C->isOne()) { 9905 BaseOp = X86ISD::DEC; 9906 Cond = X86::COND_O; 9907 break; 9908 } 9909 BaseOp = X86ISD::SUB; 9910 Cond = X86::COND_O; 9911 break; 9912 case ISD::USUBO: 9913 BaseOp = X86ISD::SUB; 9914 Cond = X86::COND_B; 9915 break; 9916 case ISD::SMULO: 9917 BaseOp = X86ISD::SMUL; 9918 Cond = X86::COND_O; 9919 break; 9920 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 9921 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 9922 MVT::i32); 9923 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 9924 9925 SDValue SetCC = 9926 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 9927 DAG.getConstant(X86::COND_O, MVT::i32), 9928 SDValue(Sum.getNode(), 2)); 9929 9930 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 9931 } 9932 } 9933 9934 // Also sets EFLAGS. 9935 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 9936 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 9937 9938 SDValue SetCC = 9939 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 9940 DAG.getConstant(Cond, MVT::i32), 9941 SDValue(Sum.getNode(), 1)); 9942 9943 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 9944} 9945 9946SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const{ 9947 DebugLoc dl = Op.getDebugLoc(); 9948 SDNode* Node = Op.getNode(); 9949 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 9950 EVT VT = Node->getValueType(0); 9951 9952 if (Subtarget->hasSSE2() && VT.isVector()) { 9953 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 9954 ExtraVT.getScalarType().getSizeInBits(); 9955 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 9956 9957 unsigned SHLIntrinsicsID = 0; 9958 unsigned SRAIntrinsicsID = 0; 9959 switch (VT.getSimpleVT().SimpleTy) { 9960 default: 9961 return SDValue(); 9962 case MVT::v2i64: { 9963 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_q; 9964 SRAIntrinsicsID = 0; 9965 break; 9966 } 9967 case MVT::v4i32: { 9968 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_d; 9969 SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_d; 9970 break; 9971 } 9972 case MVT::v8i16: { 9973 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_w; 9974 SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_w; 9975 break; 9976 } 9977 } 9978 9979 SDValue Tmp1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9980 DAG.getConstant(SHLIntrinsicsID, MVT::i32), 9981 Node->getOperand(0), ShAmt); 9982 9983 // In case of 1 bit sext, no need to shr 9984 if (ExtraVT.getScalarType().getSizeInBits() == 1) return Tmp1; 9985 9986 if (SRAIntrinsicsID) { 9987 Tmp1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9988 DAG.getConstant(SRAIntrinsicsID, MVT::i32), 9989 Tmp1, ShAmt); 9990 } 9991 return Tmp1; 9992 } 9993 9994 return SDValue(); 9995} 9996 9997 9998SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ 9999 DebugLoc dl = Op.getDebugLoc(); 10000 10001 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. 10002 // There isn't any reason to disable it if the target processor supports it. 10003 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { 10004 SDValue Chain = Op.getOperand(0); 10005 SDValue Zero = DAG.getConstant(0, MVT::i32); 10006 SDValue Ops[] = { 10007 DAG.getRegister(X86::ESP, MVT::i32), // Base 10008 DAG.getTargetConstant(1, MVT::i8), // Scale 10009 DAG.getRegister(0, MVT::i32), // Index 10010 DAG.getTargetConstant(0, MVT::i32), // Disp 10011 DAG.getRegister(0, MVT::i32), // Segment. 10012 Zero, 10013 Chain 10014 }; 10015 SDNode *Res = 10016 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10017 array_lengthof(Ops)); 10018 return SDValue(Res, 0); 10019 } 10020 10021 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 10022 if (!isDev) 10023 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10024 10025 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10026 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 10027 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 10028 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 10029 10030 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 10031 if (!Op1 && !Op2 && !Op3 && Op4) 10032 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 10033 10034 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 10035 if (Op1 && !Op2 && !Op3 && !Op4) 10036 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 10037 10038 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 10039 // (MFENCE)>; 10040 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10041} 10042 10043SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op, 10044 SelectionDAG &DAG) const { 10045 DebugLoc dl = Op.getDebugLoc(); 10046 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 10047 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 10048 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 10049 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 10050 10051 // The only fence that needs an instruction is a sequentially-consistent 10052 // cross-thread fence. 10053 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 10054 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 10055 // no-sse2). There isn't any reason to disable it if the target processor 10056 // supports it. 10057 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 10058 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10059 10060 SDValue Chain = Op.getOperand(0); 10061 SDValue Zero = DAG.getConstant(0, MVT::i32); 10062 SDValue Ops[] = { 10063 DAG.getRegister(X86::ESP, MVT::i32), // Base 10064 DAG.getTargetConstant(1, MVT::i8), // Scale 10065 DAG.getRegister(0, MVT::i32), // Index 10066 DAG.getTargetConstant(0, MVT::i32), // Disp 10067 DAG.getRegister(0, MVT::i32), // Segment. 10068 Zero, 10069 Chain 10070 }; 10071 SDNode *Res = 10072 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10073 array_lengthof(Ops)); 10074 return SDValue(Res, 0); 10075 } 10076 10077 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 10078 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10079} 10080 10081 10082SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 10083 EVT T = Op.getValueType(); 10084 DebugLoc DL = Op.getDebugLoc(); 10085 unsigned Reg = 0; 10086 unsigned size = 0; 10087 switch(T.getSimpleVT().SimpleTy) { 10088 default: 10089 assert(false && "Invalid value type!"); 10090 case MVT::i8: Reg = X86::AL; size = 1; break; 10091 case MVT::i16: Reg = X86::AX; size = 2; break; 10092 case MVT::i32: Reg = X86::EAX; size = 4; break; 10093 case MVT::i64: 10094 assert(Subtarget->is64Bit() && "Node not type legal!"); 10095 Reg = X86::RAX; size = 8; 10096 break; 10097 } 10098 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 10099 Op.getOperand(2), SDValue()); 10100 SDValue Ops[] = { cpIn.getValue(0), 10101 Op.getOperand(1), 10102 Op.getOperand(3), 10103 DAG.getTargetConstant(size, MVT::i8), 10104 cpIn.getValue(1) }; 10105 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10106 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 10107 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 10108 Ops, 5, T, MMO); 10109 SDValue cpOut = 10110 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 10111 return cpOut; 10112} 10113 10114SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 10115 SelectionDAG &DAG) const { 10116 assert(Subtarget->is64Bit() && "Result not type legalized?"); 10117 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10118 SDValue TheChain = Op.getOperand(0); 10119 DebugLoc dl = Op.getDebugLoc(); 10120 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 10121 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 10122 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 10123 rax.getValue(2)); 10124 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 10125 DAG.getConstant(32, MVT::i8)); 10126 SDValue Ops[] = { 10127 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 10128 rdx.getValue(1) 10129 }; 10130 return DAG.getMergeValues(Ops, 2, dl); 10131} 10132 10133SDValue X86TargetLowering::LowerBITCAST(SDValue Op, 10134 SelectionDAG &DAG) const { 10135 EVT SrcVT = Op.getOperand(0).getValueType(); 10136 EVT DstVT = Op.getValueType(); 10137 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 10138 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 10139 assert((DstVT == MVT::i64 || 10140 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 10141 "Unexpected custom BITCAST"); 10142 // i64 <=> MMX conversions are Legal. 10143 if (SrcVT==MVT::i64 && DstVT.isVector()) 10144 return Op; 10145 if (DstVT==MVT::i64 && SrcVT.isVector()) 10146 return Op; 10147 // MMX <=> MMX conversions are Legal. 10148 if (SrcVT.isVector() && DstVT.isVector()) 10149 return Op; 10150 // All other conversions need to be expanded. 10151 return SDValue(); 10152} 10153 10154SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { 10155 SDNode *Node = Op.getNode(); 10156 DebugLoc dl = Node->getDebugLoc(); 10157 EVT T = Node->getValueType(0); 10158 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 10159 DAG.getConstant(0, T), Node->getOperand(2)); 10160 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 10161 cast<AtomicSDNode>(Node)->getMemoryVT(), 10162 Node->getOperand(0), 10163 Node->getOperand(1), negOp, 10164 cast<AtomicSDNode>(Node)->getSrcValue(), 10165 cast<AtomicSDNode>(Node)->getAlignment(), 10166 cast<AtomicSDNode>(Node)->getOrdering(), 10167 cast<AtomicSDNode>(Node)->getSynchScope()); 10168} 10169 10170static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 10171 SDNode *Node = Op.getNode(); 10172 DebugLoc dl = Node->getDebugLoc(); 10173 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 10174 10175 // Convert seq_cst store -> xchg 10176 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 10177 // FIXME: On 32-bit, store -> fist or movq would be more efficient 10178 // (The only way to get a 16-byte store is cmpxchg16b) 10179 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 10180 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 10181 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 10182 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 10183 cast<AtomicSDNode>(Node)->getMemoryVT(), 10184 Node->getOperand(0), 10185 Node->getOperand(1), Node->getOperand(2), 10186 cast<AtomicSDNode>(Node)->getMemOperand(), 10187 cast<AtomicSDNode>(Node)->getOrdering(), 10188 cast<AtomicSDNode>(Node)->getSynchScope()); 10189 return Swap.getValue(1); 10190 } 10191 // Other atomic stores have a simple pattern. 10192 return Op; 10193} 10194 10195static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 10196 EVT VT = Op.getNode()->getValueType(0); 10197 10198 // Let legalize expand this if it isn't a legal type yet. 10199 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10200 return SDValue(); 10201 10202 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10203 10204 unsigned Opc; 10205 bool ExtraOp = false; 10206 switch (Op.getOpcode()) { 10207 default: assert(0 && "Invalid code"); 10208 case ISD::ADDC: Opc = X86ISD::ADD; break; 10209 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 10210 case ISD::SUBC: Opc = X86ISD::SUB; break; 10211 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 10212 } 10213 10214 if (!ExtraOp) 10215 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10216 Op.getOperand(1)); 10217 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10218 Op.getOperand(1), Op.getOperand(2)); 10219} 10220 10221/// LowerOperation - Provide custom lowering hooks for some operations. 10222/// 10223SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10224 switch (Op.getOpcode()) { 10225 default: llvm_unreachable("Should not custom lower this!"); 10226 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 10227 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); 10228 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG); 10229 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); 10230 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 10231 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 10232 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10233 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 10234 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10235 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10236 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10237 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 10238 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); 10239 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10240 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10241 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10242 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10243 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 10244 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10245 case ISD::SHL_PARTS: 10246 case ISD::SRA_PARTS: 10247 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 10248 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 10249 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 10250 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 10251 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 10252 case ISD::FABS: return LowerFABS(Op, DAG); 10253 case ISD::FNEG: return LowerFNEG(Op, DAG); 10254 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 10255 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 10256 case ISD::SETCC: return LowerSETCC(Op, DAG); 10257 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 10258 case ISD::SELECT: return LowerSELECT(Op, DAG); 10259 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 10260 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10261 case ISD::VASTART: return LowerVASTART(Op, DAG); 10262 case ISD::VAARG: return LowerVAARG(Op, DAG); 10263 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10264 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10265 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10266 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10267 case ISD::FRAME_TO_ARGS_OFFSET: 10268 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 10269 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10270 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 10271 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 10272 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10273 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 10274 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 10275 case ISD::MUL: return LowerMUL(Op, DAG); 10276 case ISD::SRA: 10277 case ISD::SRL: 10278 case ISD::SHL: return LowerShift(Op, DAG); 10279 case ISD::SADDO: 10280 case ISD::UADDO: 10281 case ISD::SSUBO: 10282 case ISD::USUBO: 10283 case ISD::SMULO: 10284 case ISD::UMULO: return LowerXALUO(Op, DAG); 10285 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 10286 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10287 case ISD::ADDC: 10288 case ISD::ADDE: 10289 case ISD::SUBC: 10290 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 10291 case ISD::ADD: return LowerADD(Op, DAG); 10292 case ISD::SUB: return LowerSUB(Op, DAG); 10293 } 10294} 10295 10296static void ReplaceATOMIC_LOAD(SDNode *Node, 10297 SmallVectorImpl<SDValue> &Results, 10298 SelectionDAG &DAG) { 10299 DebugLoc dl = Node->getDebugLoc(); 10300 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 10301 10302 // Convert wide load -> cmpxchg8b/cmpxchg16b 10303 // FIXME: On 32-bit, load -> fild or movq would be more efficient 10304 // (The only way to get a 16-byte load is cmpxchg16b) 10305 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 10306 SDValue Zero = DAG.getConstant(0, cast<AtomicSDNode>(Node)->getMemoryVT()); 10307 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, 10308 cast<AtomicSDNode>(Node)->getMemoryVT(), 10309 Node->getOperand(0), 10310 Node->getOperand(1), Zero, Zero, 10311 cast<AtomicSDNode>(Node)->getMemOperand(), 10312 cast<AtomicSDNode>(Node)->getOrdering(), 10313 cast<AtomicSDNode>(Node)->getSynchScope()); 10314 Results.push_back(Swap.getValue(0)); 10315 Results.push_back(Swap.getValue(1)); 10316} 10317 10318void X86TargetLowering:: 10319ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 10320 SelectionDAG &DAG, unsigned NewOp) const { 10321 EVT T = Node->getValueType(0); 10322 DebugLoc dl = Node->getDebugLoc(); 10323 assert (T == MVT::i64 && "Only know how to expand i64 atomics"); 10324 10325 SDValue Chain = Node->getOperand(0); 10326 SDValue In1 = Node->getOperand(1); 10327 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 10328 Node->getOperand(2), DAG.getIntPtrConstant(0)); 10329 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 10330 Node->getOperand(2), DAG.getIntPtrConstant(1)); 10331 SDValue Ops[] = { Chain, In1, In2L, In2H }; 10332 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10333 SDValue Result = 10334 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 10335 cast<MemSDNode>(Node)->getMemOperand()); 10336 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 10337 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 10338 Results.push_back(Result.getValue(2)); 10339} 10340 10341/// ReplaceNodeResults - Replace a node with an illegal result type 10342/// with a new node built out of custom code. 10343void X86TargetLowering::ReplaceNodeResults(SDNode *N, 10344 SmallVectorImpl<SDValue>&Results, 10345 SelectionDAG &DAG) const { 10346 DebugLoc dl = N->getDebugLoc(); 10347 switch (N->getOpcode()) { 10348 default: 10349 assert(false && "Do not know how to custom type legalize this operation!"); 10350 return; 10351 case ISD::SIGN_EXTEND_INREG: 10352 case ISD::ADDC: 10353 case ISD::ADDE: 10354 case ISD::SUBC: 10355 case ISD::SUBE: 10356 // We don't want to expand or promote these. 10357 return; 10358 case ISD::FP_TO_SINT: { 10359 std::pair<SDValue,SDValue> Vals = 10360 FP_TO_INTHelper(SDValue(N, 0), DAG, true); 10361 SDValue FIST = Vals.first, StackSlot = Vals.second; 10362 if (FIST.getNode() != 0) { 10363 EVT VT = N->getValueType(0); 10364 // Return a load from the stack slot. 10365 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 10366 MachinePointerInfo(), false, false, 0)); 10367 } 10368 return; 10369 } 10370 case ISD::READCYCLECOUNTER: { 10371 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10372 SDValue TheChain = N->getOperand(0); 10373 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 10374 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 10375 rd.getValue(1)); 10376 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 10377 eax.getValue(2)); 10378 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 10379 SDValue Ops[] = { eax, edx }; 10380 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 10381 Results.push_back(edx.getValue(1)); 10382 return; 10383 } 10384 case ISD::ATOMIC_CMP_SWAP: { 10385 EVT T = N->getValueType(0); 10386 assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); 10387 SDValue cpInL, cpInH; 10388 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 10389 DAG.getConstant(0, MVT::i32)); 10390 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 10391 DAG.getConstant(1, MVT::i32)); 10392 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, X86::EAX, cpInL, SDValue()); 10393 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, X86::EDX, cpInH, 10394 cpInL.getValue(1)); 10395 SDValue swapInL, swapInH; 10396 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 10397 DAG.getConstant(0, MVT::i32)); 10398 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 10399 DAG.getConstant(1, MVT::i32)); 10400 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, X86::EBX, swapInL, 10401 cpInH.getValue(1)); 10402 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, X86::ECX, swapInH, 10403 swapInL.getValue(1)); 10404 SDValue Ops[] = { swapInH.getValue(0), 10405 N->getOperand(1), 10406 swapInH.getValue(1) }; 10407 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10408 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 10409 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, 10410 Ops, 3, T, MMO); 10411 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX, 10412 MVT::i32, Result.getValue(1)); 10413 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX, 10414 MVT::i32, cpOutL.getValue(2)); 10415 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 10416 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 10417 Results.push_back(cpOutH.getValue(1)); 10418 return; 10419 } 10420 case ISD::ATOMIC_LOAD_ADD: 10421 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); 10422 return; 10423 case ISD::ATOMIC_LOAD_AND: 10424 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); 10425 return; 10426 case ISD::ATOMIC_LOAD_NAND: 10427 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); 10428 return; 10429 case ISD::ATOMIC_LOAD_OR: 10430 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); 10431 return; 10432 case ISD::ATOMIC_LOAD_SUB: 10433 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); 10434 return; 10435 case ISD::ATOMIC_LOAD_XOR: 10436 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); 10437 return; 10438 case ISD::ATOMIC_SWAP: 10439 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); 10440 return; 10441 case ISD::ATOMIC_LOAD: 10442 ReplaceATOMIC_LOAD(N, Results, DAG); 10443 } 10444} 10445 10446const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 10447 switch (Opcode) { 10448 default: return NULL; 10449 case X86ISD::BSF: return "X86ISD::BSF"; 10450 case X86ISD::BSR: return "X86ISD::BSR"; 10451 case X86ISD::SHLD: return "X86ISD::SHLD"; 10452 case X86ISD::SHRD: return "X86ISD::SHRD"; 10453 case X86ISD::FAND: return "X86ISD::FAND"; 10454 case X86ISD::FOR: return "X86ISD::FOR"; 10455 case X86ISD::FXOR: return "X86ISD::FXOR"; 10456 case X86ISD::FSRL: return "X86ISD::FSRL"; 10457 case X86ISD::FILD: return "X86ISD::FILD"; 10458 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 10459 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 10460 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 10461 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 10462 case X86ISD::FLD: return "X86ISD::FLD"; 10463 case X86ISD::FST: return "X86ISD::FST"; 10464 case X86ISD::CALL: return "X86ISD::CALL"; 10465 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 10466 case X86ISD::BT: return "X86ISD::BT"; 10467 case X86ISD::CMP: return "X86ISD::CMP"; 10468 case X86ISD::COMI: return "X86ISD::COMI"; 10469 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 10470 case X86ISD::SETCC: return "X86ISD::SETCC"; 10471 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 10472 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 10473 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 10474 case X86ISD::CMOV: return "X86ISD::CMOV"; 10475 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 10476 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 10477 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 10478 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 10479 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 10480 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 10481 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 10482 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 10483 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 10484 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 10485 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 10486 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 10487 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 10488 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 10489 case X86ISD::PSIGNB: return "X86ISD::PSIGNB"; 10490 case X86ISD::PSIGNW: return "X86ISD::PSIGNW"; 10491 case X86ISD::PSIGND: return "X86ISD::PSIGND"; 10492 case X86ISD::PBLENDVB: return "X86ISD::PBLENDVB"; 10493 case X86ISD::FMAX: return "X86ISD::FMAX"; 10494 case X86ISD::FMIN: return "X86ISD::FMIN"; 10495 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 10496 case X86ISD::FRCP: return "X86ISD::FRCP"; 10497 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 10498 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 10499 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 10500 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 10501 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 10502 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 10503 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 10504 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 10505 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 10506 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 10507 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 10508 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 10509 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 10510 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 10511 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 10512 case X86ISD::VSHL: return "X86ISD::VSHL"; 10513 case X86ISD::VSRL: return "X86ISD::VSRL"; 10514 case X86ISD::CMPPD: return "X86ISD::CMPPD"; 10515 case X86ISD::CMPPS: return "X86ISD::CMPPS"; 10516 case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB"; 10517 case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW"; 10518 case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD"; 10519 case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ"; 10520 case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB"; 10521 case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW"; 10522 case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD"; 10523 case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ"; 10524 case X86ISD::ADD: return "X86ISD::ADD"; 10525 case X86ISD::SUB: return "X86ISD::SUB"; 10526 case X86ISD::ADC: return "X86ISD::ADC"; 10527 case X86ISD::SBB: return "X86ISD::SBB"; 10528 case X86ISD::SMUL: return "X86ISD::SMUL"; 10529 case X86ISD::UMUL: return "X86ISD::UMUL"; 10530 case X86ISD::INC: return "X86ISD::INC"; 10531 case X86ISD::DEC: return "X86ISD::DEC"; 10532 case X86ISD::OR: return "X86ISD::OR"; 10533 case X86ISD::XOR: return "X86ISD::XOR"; 10534 case X86ISD::AND: return "X86ISD::AND"; 10535 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 10536 case X86ISD::PTEST: return "X86ISD::PTEST"; 10537 case X86ISD::TESTP: return "X86ISD::TESTP"; 10538 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 10539 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 10540 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 10541 case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD"; 10542 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 10543 case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD"; 10544 case X86ISD::SHUFPS: return "X86ISD::SHUFPS"; 10545 case X86ISD::SHUFPD: return "X86ISD::SHUFPD"; 10546 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 10547 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 10548 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 10549 case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD"; 10550 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 10551 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 10552 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 10553 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 10554 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 10555 case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD"; 10556 case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD"; 10557 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 10558 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 10559 case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS"; 10560 case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD"; 10561 case X86ISD::VUNPCKLPDY: return "X86ISD::VUNPCKLPDY"; 10562 case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS"; 10563 case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD"; 10564 case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW"; 10565 case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD"; 10566 case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ"; 10567 case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ"; 10568 case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW"; 10569 case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD"; 10570 case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ"; 10571 case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ"; 10572 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 10573 case X86ISD::VPERMILPS: return "X86ISD::VPERMILPS"; 10574 case X86ISD::VPERMILPSY: return "X86ISD::VPERMILPSY"; 10575 case X86ISD::VPERMILPD: return "X86ISD::VPERMILPD"; 10576 case X86ISD::VPERMILPDY: return "X86ISD::VPERMILPDY"; 10577 case X86ISD::VPERM2F128: return "X86ISD::VPERM2F128"; 10578 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 10579 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 10580 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 10581 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 10582 } 10583} 10584 10585// isLegalAddressingMode - Return true if the addressing mode represented 10586// by AM is legal for this target, for a load/store of the specified type. 10587bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 10588 Type *Ty) const { 10589 // X86 supports extremely general addressing modes. 10590 CodeModel::Model M = getTargetMachine().getCodeModel(); 10591 Reloc::Model R = getTargetMachine().getRelocationModel(); 10592 10593 // X86 allows a sign-extended 32-bit immediate field as a displacement. 10594 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 10595 return false; 10596 10597 if (AM.BaseGV) { 10598 unsigned GVFlags = 10599 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 10600 10601 // If a reference to this global requires an extra load, we can't fold it. 10602 if (isGlobalStubReference(GVFlags)) 10603 return false; 10604 10605 // If BaseGV requires a register for the PIC base, we cannot also have a 10606 // BaseReg specified. 10607 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 10608 return false; 10609 10610 // If lower 4G is not available, then we must use rip-relative addressing. 10611 if ((M != CodeModel::Small || R != Reloc::Static) && 10612 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 10613 return false; 10614 } 10615 10616 switch (AM.Scale) { 10617 case 0: 10618 case 1: 10619 case 2: 10620 case 4: 10621 case 8: 10622 // These scales always work. 10623 break; 10624 case 3: 10625 case 5: 10626 case 9: 10627 // These scales are formed with basereg+scalereg. Only accept if there is 10628 // no basereg yet. 10629 if (AM.HasBaseReg) 10630 return false; 10631 break; 10632 default: // Other stuff never works. 10633 return false; 10634 } 10635 10636 return true; 10637} 10638 10639 10640bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 10641 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 10642 return false; 10643 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 10644 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 10645 if (NumBits1 <= NumBits2) 10646 return false; 10647 return true; 10648} 10649 10650bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 10651 if (!VT1.isInteger() || !VT2.isInteger()) 10652 return false; 10653 unsigned NumBits1 = VT1.getSizeInBits(); 10654 unsigned NumBits2 = VT2.getSizeInBits(); 10655 if (NumBits1 <= NumBits2) 10656 return false; 10657 return true; 10658} 10659 10660bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 10661 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 10662 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 10663} 10664 10665bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 10666 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 10667 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 10668} 10669 10670bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 10671 // i16 instructions are longer (0x66 prefix) and potentially slower. 10672 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 10673} 10674 10675/// isShuffleMaskLegal - Targets can use this to indicate that they only 10676/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 10677/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 10678/// are assumed to be legal. 10679bool 10680X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 10681 EVT VT) const { 10682 // Very little shuffling can be done for 64-bit vectors right now. 10683 if (VT.getSizeInBits() == 64) 10684 return isPALIGNRMask(M, VT, Subtarget->hasSSSE3()); 10685 10686 // FIXME: pshufb, blends, shifts. 10687 return (VT.getVectorNumElements() == 2 || 10688 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 10689 isMOVLMask(M, VT) || 10690 isSHUFPMask(M, VT) || 10691 isPSHUFDMask(M, VT) || 10692 isPSHUFHWMask(M, VT) || 10693 isPSHUFLWMask(M, VT) || 10694 isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) || 10695 isUNPCKLMask(M, VT) || 10696 isUNPCKHMask(M, VT) || 10697 isUNPCKL_v_undef_Mask(M, VT) || 10698 isUNPCKH_v_undef_Mask(M, VT)); 10699} 10700 10701bool 10702X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 10703 EVT VT) const { 10704 unsigned NumElts = VT.getVectorNumElements(); 10705 // FIXME: This collection of masks seems suspect. 10706 if (NumElts == 2) 10707 return true; 10708 if (NumElts == 4 && VT.getSizeInBits() == 128) { 10709 return (isMOVLMask(Mask, VT) || 10710 isCommutedMOVLMask(Mask, VT, true) || 10711 isSHUFPMask(Mask, VT) || 10712 isCommutedSHUFPMask(Mask, VT)); 10713 } 10714 return false; 10715} 10716 10717//===----------------------------------------------------------------------===// 10718// X86 Scheduler Hooks 10719//===----------------------------------------------------------------------===// 10720 10721// private utility function 10722MachineBasicBlock * 10723X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 10724 MachineBasicBlock *MBB, 10725 unsigned regOpc, 10726 unsigned immOpc, 10727 unsigned LoadOpc, 10728 unsigned CXchgOpc, 10729 unsigned notOpc, 10730 unsigned EAXreg, 10731 TargetRegisterClass *RC, 10732 bool invSrc) const { 10733 // For the atomic bitwise operator, we generate 10734 // thisMBB: 10735 // newMBB: 10736 // ld t1 = [bitinstr.addr] 10737 // op t2 = t1, [bitinstr.val] 10738 // mov EAX = t1 10739 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 10740 // bz newMBB 10741 // fallthrough -->nextMBB 10742 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10743 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 10744 MachineFunction::iterator MBBIter = MBB; 10745 ++MBBIter; 10746 10747 /// First build the CFG 10748 MachineFunction *F = MBB->getParent(); 10749 MachineBasicBlock *thisMBB = MBB; 10750 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 10751 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 10752 F->insert(MBBIter, newMBB); 10753 F->insert(MBBIter, nextMBB); 10754 10755 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 10756 nextMBB->splice(nextMBB->begin(), thisMBB, 10757 llvm::next(MachineBasicBlock::iterator(bInstr)), 10758 thisMBB->end()); 10759 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 10760 10761 // Update thisMBB to fall through to newMBB 10762 thisMBB->addSuccessor(newMBB); 10763 10764 // newMBB jumps to itself and fall through to nextMBB 10765 newMBB->addSuccessor(nextMBB); 10766 newMBB->addSuccessor(newMBB); 10767 10768 // Insert instructions into newMBB based on incoming instruction 10769 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && 10770 "unexpected number of operands"); 10771 DebugLoc dl = bInstr->getDebugLoc(); 10772 MachineOperand& destOper = bInstr->getOperand(0); 10773 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 10774 int numArgs = bInstr->getNumOperands() - 1; 10775 for (int i=0; i < numArgs; ++i) 10776 argOpers[i] = &bInstr->getOperand(i+1); 10777 10778 // x86 address has 4 operands: base, index, scale, and displacement 10779 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 10780 int valArgIndx = lastAddrIndx + 1; 10781 10782 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 10783 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); 10784 for (int i=0; i <= lastAddrIndx; ++i) 10785 (*MIB).addOperand(*argOpers[i]); 10786 10787 unsigned tt = F->getRegInfo().createVirtualRegister(RC); 10788 if (invSrc) { 10789 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1); 10790 } 10791 else 10792 tt = t1; 10793 10794 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 10795 assert((argOpers[valArgIndx]->isReg() || 10796 argOpers[valArgIndx]->isImm()) && 10797 "invalid operand"); 10798 if (argOpers[valArgIndx]->isReg()) 10799 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); 10800 else 10801 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); 10802 MIB.addReg(tt); 10803 (*MIB).addOperand(*argOpers[valArgIndx]); 10804 10805 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); 10806 MIB.addReg(t1); 10807 10808 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); 10809 for (int i=0; i <= lastAddrIndx; ++i) 10810 (*MIB).addOperand(*argOpers[i]); 10811 MIB.addReg(t2); 10812 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 10813 (*MIB).setMemRefs(bInstr->memoperands_begin(), 10814 bInstr->memoperands_end()); 10815 10816 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 10817 MIB.addReg(EAXreg); 10818 10819 // insert branch 10820 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 10821 10822 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 10823 return nextMBB; 10824} 10825 10826// private utility function: 64 bit atomics on 32 bit host. 10827MachineBasicBlock * 10828X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, 10829 MachineBasicBlock *MBB, 10830 unsigned regOpcL, 10831 unsigned regOpcH, 10832 unsigned immOpcL, 10833 unsigned immOpcH, 10834 bool invSrc) const { 10835 // For the atomic bitwise operator, we generate 10836 // thisMBB (instructions are in pairs, except cmpxchg8b) 10837 // ld t1,t2 = [bitinstr.addr] 10838 // newMBB: 10839 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) 10840 // op t5, t6 <- out1, out2, [bitinstr.val] 10841 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) 10842 // mov ECX, EBX <- t5, t6 10843 // mov EAX, EDX <- t1, t2 10844 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] 10845 // mov t3, t4 <- EAX, EDX 10846 // bz newMBB 10847 // result in out1, out2 10848 // fallthrough -->nextMBB 10849 10850 const TargetRegisterClass *RC = X86::GR32RegisterClass; 10851 const unsigned LoadOpc = X86::MOV32rm; 10852 const unsigned NotOpc = X86::NOT32r; 10853 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10854 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 10855 MachineFunction::iterator MBBIter = MBB; 10856 ++MBBIter; 10857 10858 /// First build the CFG 10859 MachineFunction *F = MBB->getParent(); 10860 MachineBasicBlock *thisMBB = MBB; 10861 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 10862 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 10863 F->insert(MBBIter, newMBB); 10864 F->insert(MBBIter, nextMBB); 10865 10866 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 10867 nextMBB->splice(nextMBB->begin(), thisMBB, 10868 llvm::next(MachineBasicBlock::iterator(bInstr)), 10869 thisMBB->end()); 10870 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 10871 10872 // Update thisMBB to fall through to newMBB 10873 thisMBB->addSuccessor(newMBB); 10874 10875 // newMBB jumps to itself and fall through to nextMBB 10876 newMBB->addSuccessor(nextMBB); 10877 newMBB->addSuccessor(newMBB); 10878 10879 DebugLoc dl = bInstr->getDebugLoc(); 10880 // Insert instructions into newMBB based on incoming instruction 10881 // There are 8 "real" operands plus 9 implicit def/uses, ignored here. 10882 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && 10883 "unexpected number of operands"); 10884 MachineOperand& dest1Oper = bInstr->getOperand(0); 10885 MachineOperand& dest2Oper = bInstr->getOperand(1); 10886 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 10887 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { 10888 argOpers[i] = &bInstr->getOperand(i+2); 10889 10890 // We use some of the operands multiple times, so conservatively just 10891 // clear any kill flags that might be present. 10892 if (argOpers[i]->isReg() && argOpers[i]->isUse()) 10893 argOpers[i]->setIsKill(false); 10894 } 10895 10896 // x86 address has 5 operands: base, index, scale, displacement, and segment. 10897 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 10898 10899 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 10900 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); 10901 for (int i=0; i <= lastAddrIndx; ++i) 10902 (*MIB).addOperand(*argOpers[i]); 10903 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 10904 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); 10905 // add 4 to displacement. 10906 for (int i=0; i <= lastAddrIndx-2; ++i) 10907 (*MIB).addOperand(*argOpers[i]); 10908 MachineOperand newOp3 = *(argOpers[3]); 10909 if (newOp3.isImm()) 10910 newOp3.setImm(newOp3.getImm()+4); 10911 else 10912 newOp3.setOffset(newOp3.getOffset()+4); 10913 (*MIB).addOperand(newOp3); 10914 (*MIB).addOperand(*argOpers[lastAddrIndx]); 10915 10916 // t3/4 are defined later, at the bottom of the loop 10917 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 10918 unsigned t4 = F->getRegInfo().createVirtualRegister(RC); 10919 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) 10920 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); 10921 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) 10922 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); 10923 10924 // The subsequent operations should be using the destination registers of 10925 //the PHI instructions. 10926 if (invSrc) { 10927 t1 = F->getRegInfo().createVirtualRegister(RC); 10928 t2 = F->getRegInfo().createVirtualRegister(RC); 10929 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg()); 10930 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg()); 10931 } else { 10932 t1 = dest1Oper.getReg(); 10933 t2 = dest2Oper.getReg(); 10934 } 10935 10936 int valArgIndx = lastAddrIndx + 1; 10937 assert((argOpers[valArgIndx]->isReg() || 10938 argOpers[valArgIndx]->isImm()) && 10939 "invalid operand"); 10940 unsigned t5 = F->getRegInfo().createVirtualRegister(RC); 10941 unsigned t6 = F->getRegInfo().createVirtualRegister(RC); 10942 if (argOpers[valArgIndx]->isReg()) 10943 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); 10944 else 10945 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); 10946 if (regOpcL != X86::MOV32rr) 10947 MIB.addReg(t1); 10948 (*MIB).addOperand(*argOpers[valArgIndx]); 10949 assert(argOpers[valArgIndx + 1]->isReg() == 10950 argOpers[valArgIndx]->isReg()); 10951 assert(argOpers[valArgIndx + 1]->isImm() == 10952 argOpers[valArgIndx]->isImm()); 10953 if (argOpers[valArgIndx + 1]->isReg()) 10954 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); 10955 else 10956 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); 10957 if (regOpcH != X86::MOV32rr) 10958 MIB.addReg(t2); 10959 (*MIB).addOperand(*argOpers[valArgIndx + 1]); 10960 10961 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 10962 MIB.addReg(t1); 10963 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); 10964 MIB.addReg(t2); 10965 10966 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); 10967 MIB.addReg(t5); 10968 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); 10969 MIB.addReg(t6); 10970 10971 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); 10972 for (int i=0; i <= lastAddrIndx; ++i) 10973 (*MIB).addOperand(*argOpers[i]); 10974 10975 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 10976 (*MIB).setMemRefs(bInstr->memoperands_begin(), 10977 bInstr->memoperands_end()); 10978 10979 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); 10980 MIB.addReg(X86::EAX); 10981 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); 10982 MIB.addReg(X86::EDX); 10983 10984 // insert branch 10985 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 10986 10987 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 10988 return nextMBB; 10989} 10990 10991// private utility function 10992MachineBasicBlock * 10993X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 10994 MachineBasicBlock *MBB, 10995 unsigned cmovOpc) const { 10996 // For the atomic min/max operator, we generate 10997 // thisMBB: 10998 // newMBB: 10999 // ld t1 = [min/max.addr] 11000 // mov t2 = [min/max.val] 11001 // cmp t1, t2 11002 // cmov[cond] t2 = t1 11003 // mov EAX = t1 11004 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 11005 // bz newMBB 11006 // fallthrough -->nextMBB 11007 // 11008 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11009 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11010 MachineFunction::iterator MBBIter = MBB; 11011 ++MBBIter; 11012 11013 /// First build the CFG 11014 MachineFunction *F = MBB->getParent(); 11015 MachineBasicBlock *thisMBB = MBB; 11016 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11017 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11018 F->insert(MBBIter, newMBB); 11019 F->insert(MBBIter, nextMBB); 11020 11021 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11022 nextMBB->splice(nextMBB->begin(), thisMBB, 11023 llvm::next(MachineBasicBlock::iterator(mInstr)), 11024 thisMBB->end()); 11025 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11026 11027 // Update thisMBB to fall through to newMBB 11028 thisMBB->addSuccessor(newMBB); 11029 11030 // newMBB jumps to newMBB and fall through to nextMBB 11031 newMBB->addSuccessor(nextMBB); 11032 newMBB->addSuccessor(newMBB); 11033 11034 DebugLoc dl = mInstr->getDebugLoc(); 11035 // Insert instructions into newMBB based on incoming instruction 11036 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && 11037 "unexpected number of operands"); 11038 MachineOperand& destOper = mInstr->getOperand(0); 11039 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11040 int numArgs = mInstr->getNumOperands() - 1; 11041 for (int i=0; i < numArgs; ++i) 11042 argOpers[i] = &mInstr->getOperand(i+1); 11043 11044 // x86 address has 4 operands: base, index, scale, and displacement 11045 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11046 int valArgIndx = lastAddrIndx + 1; 11047 11048 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11049 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); 11050 for (int i=0; i <= lastAddrIndx; ++i) 11051 (*MIB).addOperand(*argOpers[i]); 11052 11053 // We only support register and immediate values 11054 assert((argOpers[valArgIndx]->isReg() || 11055 argOpers[valArgIndx]->isImm()) && 11056 "invalid operand"); 11057 11058 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11059 if (argOpers[valArgIndx]->isReg()) 11060 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); 11061 else 11062 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); 11063 (*MIB).addOperand(*argOpers[valArgIndx]); 11064 11065 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 11066 MIB.addReg(t1); 11067 11068 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); 11069 MIB.addReg(t1); 11070 MIB.addReg(t2); 11071 11072 // Generate movc 11073 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11074 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); 11075 MIB.addReg(t2); 11076 MIB.addReg(t1); 11077 11078 // Cmp and exchange if none has modified the memory location 11079 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); 11080 for (int i=0; i <= lastAddrIndx; ++i) 11081 (*MIB).addOperand(*argOpers[i]); 11082 MIB.addReg(t3); 11083 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11084 (*MIB).setMemRefs(mInstr->memoperands_begin(), 11085 mInstr->memoperands_end()); 11086 11087 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 11088 MIB.addReg(X86::EAX); 11089 11090 // insert branch 11091 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11092 11093 mInstr->eraseFromParent(); // The pseudo instruction is gone now. 11094 return nextMBB; 11095} 11096 11097// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 11098// or XMM0_V32I8 in AVX all of this code can be replaced with that 11099// in the .td file. 11100MachineBasicBlock * 11101X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 11102 unsigned numArgs, bool memArg) const { 11103 assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) && 11104 "Target must have SSE4.2 or AVX features enabled"); 11105 11106 DebugLoc dl = MI->getDebugLoc(); 11107 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11108 unsigned Opc; 11109 if (!Subtarget->hasAVX()) { 11110 if (memArg) 11111 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 11112 else 11113 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 11114 } else { 11115 if (memArg) 11116 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 11117 else 11118 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 11119 } 11120 11121 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 11122 for (unsigned i = 0; i < numArgs; ++i) { 11123 MachineOperand &Op = MI->getOperand(i+1); 11124 if (!(Op.isReg() && Op.isImplicit())) 11125 MIB.addOperand(Op); 11126 } 11127 BuildMI(*BB, MI, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg()) 11128 .addReg(X86::XMM0); 11129 11130 MI->eraseFromParent(); 11131 return BB; 11132} 11133 11134MachineBasicBlock * 11135X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 11136 DebugLoc dl = MI->getDebugLoc(); 11137 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11138 11139 // Address into RAX/EAX, other two args into ECX, EDX. 11140 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 11141 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 11142 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 11143 for (int i = 0; i < X86::AddrNumOperands; ++i) 11144 MIB.addOperand(MI->getOperand(i)); 11145 11146 unsigned ValOps = X86::AddrNumOperands; 11147 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 11148 .addReg(MI->getOperand(ValOps).getReg()); 11149 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 11150 .addReg(MI->getOperand(ValOps+1).getReg()); 11151 11152 // The instruction doesn't actually take any operands though. 11153 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 11154 11155 MI->eraseFromParent(); // The pseudo is gone now. 11156 return BB; 11157} 11158 11159MachineBasicBlock * 11160X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const { 11161 DebugLoc dl = MI->getDebugLoc(); 11162 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11163 11164 // First arg in ECX, the second in EAX. 11165 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 11166 .addReg(MI->getOperand(0).getReg()); 11167 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX) 11168 .addReg(MI->getOperand(1).getReg()); 11169 11170 // The instruction doesn't actually take any operands though. 11171 BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr)); 11172 11173 MI->eraseFromParent(); // The pseudo is gone now. 11174 return BB; 11175} 11176 11177MachineBasicBlock * 11178X86TargetLowering::EmitVAARG64WithCustomInserter( 11179 MachineInstr *MI, 11180 MachineBasicBlock *MBB) const { 11181 // Emit va_arg instruction on X86-64. 11182 11183 // Operands to this pseudo-instruction: 11184 // 0 ) Output : destination address (reg) 11185 // 1-5) Input : va_list address (addr, i64mem) 11186 // 6 ) ArgSize : Size (in bytes) of vararg type 11187 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 11188 // 8 ) Align : Alignment of type 11189 // 9 ) EFLAGS (implicit-def) 11190 11191 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 11192 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 11193 11194 unsigned DestReg = MI->getOperand(0).getReg(); 11195 MachineOperand &Base = MI->getOperand(1); 11196 MachineOperand &Scale = MI->getOperand(2); 11197 MachineOperand &Index = MI->getOperand(3); 11198 MachineOperand &Disp = MI->getOperand(4); 11199 MachineOperand &Segment = MI->getOperand(5); 11200 unsigned ArgSize = MI->getOperand(6).getImm(); 11201 unsigned ArgMode = MI->getOperand(7).getImm(); 11202 unsigned Align = MI->getOperand(8).getImm(); 11203 11204 // Memory Reference 11205 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 11206 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 11207 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 11208 11209 // Machine Information 11210 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11211 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 11212 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 11213 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 11214 DebugLoc DL = MI->getDebugLoc(); 11215 11216 // struct va_list { 11217 // i32 gp_offset 11218 // i32 fp_offset 11219 // i64 overflow_area (address) 11220 // i64 reg_save_area (address) 11221 // } 11222 // sizeof(va_list) = 24 11223 // alignment(va_list) = 8 11224 11225 unsigned TotalNumIntRegs = 6; 11226 unsigned TotalNumXMMRegs = 8; 11227 bool UseGPOffset = (ArgMode == 1); 11228 bool UseFPOffset = (ArgMode == 2); 11229 unsigned MaxOffset = TotalNumIntRegs * 8 + 11230 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 11231 11232 /* Align ArgSize to a multiple of 8 */ 11233 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 11234 bool NeedsAlign = (Align > 8); 11235 11236 MachineBasicBlock *thisMBB = MBB; 11237 MachineBasicBlock *overflowMBB; 11238 MachineBasicBlock *offsetMBB; 11239 MachineBasicBlock *endMBB; 11240 11241 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 11242 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 11243 unsigned OffsetReg = 0; 11244 11245 if (!UseGPOffset && !UseFPOffset) { 11246 // If we only pull from the overflow region, we don't create a branch. 11247 // We don't need to alter control flow. 11248 OffsetDestReg = 0; // unused 11249 OverflowDestReg = DestReg; 11250 11251 offsetMBB = NULL; 11252 overflowMBB = thisMBB; 11253 endMBB = thisMBB; 11254 } else { 11255 // First emit code to check if gp_offset (or fp_offset) is below the bound. 11256 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 11257 // If not, pull from overflow_area. (branch to overflowMBB) 11258 // 11259 // thisMBB 11260 // | . 11261 // | . 11262 // offsetMBB overflowMBB 11263 // | . 11264 // | . 11265 // endMBB 11266 11267 // Registers for the PHI in endMBB 11268 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 11269 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 11270 11271 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11272 MachineFunction *MF = MBB->getParent(); 11273 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11274 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11275 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11276 11277 MachineFunction::iterator MBBIter = MBB; 11278 ++MBBIter; 11279 11280 // Insert the new basic blocks 11281 MF->insert(MBBIter, offsetMBB); 11282 MF->insert(MBBIter, overflowMBB); 11283 MF->insert(MBBIter, endMBB); 11284 11285 // Transfer the remainder of MBB and its successor edges to endMBB. 11286 endMBB->splice(endMBB->begin(), thisMBB, 11287 llvm::next(MachineBasicBlock::iterator(MI)), 11288 thisMBB->end()); 11289 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11290 11291 // Make offsetMBB and overflowMBB successors of thisMBB 11292 thisMBB->addSuccessor(offsetMBB); 11293 thisMBB->addSuccessor(overflowMBB); 11294 11295 // endMBB is a successor of both offsetMBB and overflowMBB 11296 offsetMBB->addSuccessor(endMBB); 11297 overflowMBB->addSuccessor(endMBB); 11298 11299 // Load the offset value into a register 11300 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 11301 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 11302 .addOperand(Base) 11303 .addOperand(Scale) 11304 .addOperand(Index) 11305 .addDisp(Disp, UseFPOffset ? 4 : 0) 11306 .addOperand(Segment) 11307 .setMemRefs(MMOBegin, MMOEnd); 11308 11309 // Check if there is enough room left to pull this argument. 11310 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 11311 .addReg(OffsetReg) 11312 .addImm(MaxOffset + 8 - ArgSizeA8); 11313 11314 // Branch to "overflowMBB" if offset >= max 11315 // Fall through to "offsetMBB" otherwise 11316 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 11317 .addMBB(overflowMBB); 11318 } 11319 11320 // In offsetMBB, emit code to use the reg_save_area. 11321 if (offsetMBB) { 11322 assert(OffsetReg != 0); 11323 11324 // Read the reg_save_area address. 11325 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 11326 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 11327 .addOperand(Base) 11328 .addOperand(Scale) 11329 .addOperand(Index) 11330 .addDisp(Disp, 16) 11331 .addOperand(Segment) 11332 .setMemRefs(MMOBegin, MMOEnd); 11333 11334 // Zero-extend the offset 11335 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 11336 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 11337 .addImm(0) 11338 .addReg(OffsetReg) 11339 .addImm(X86::sub_32bit); 11340 11341 // Add the offset to the reg_save_area to get the final address. 11342 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 11343 .addReg(OffsetReg64) 11344 .addReg(RegSaveReg); 11345 11346 // Compute the offset for the next argument 11347 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 11348 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 11349 .addReg(OffsetReg) 11350 .addImm(UseFPOffset ? 16 : 8); 11351 11352 // Store it back into the va_list. 11353 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 11354 .addOperand(Base) 11355 .addOperand(Scale) 11356 .addOperand(Index) 11357 .addDisp(Disp, UseFPOffset ? 4 : 0) 11358 .addOperand(Segment) 11359 .addReg(NextOffsetReg) 11360 .setMemRefs(MMOBegin, MMOEnd); 11361 11362 // Jump to endMBB 11363 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 11364 .addMBB(endMBB); 11365 } 11366 11367 // 11368 // Emit code to use overflow area 11369 // 11370 11371 // Load the overflow_area address into a register. 11372 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 11373 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 11374 .addOperand(Base) 11375 .addOperand(Scale) 11376 .addOperand(Index) 11377 .addDisp(Disp, 8) 11378 .addOperand(Segment) 11379 .setMemRefs(MMOBegin, MMOEnd); 11380 11381 // If we need to align it, do so. Otherwise, just copy the address 11382 // to OverflowDestReg. 11383 if (NeedsAlign) { 11384 // Align the overflow address 11385 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 11386 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 11387 11388 // aligned_addr = (addr + (align-1)) & ~(align-1) 11389 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 11390 .addReg(OverflowAddrReg) 11391 .addImm(Align-1); 11392 11393 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 11394 .addReg(TmpReg) 11395 .addImm(~(uint64_t)(Align-1)); 11396 } else { 11397 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 11398 .addReg(OverflowAddrReg); 11399 } 11400 11401 // Compute the next overflow address after this argument. 11402 // (the overflow address should be kept 8-byte aligned) 11403 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 11404 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 11405 .addReg(OverflowDestReg) 11406 .addImm(ArgSizeA8); 11407 11408 // Store the new overflow address. 11409 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 11410 .addOperand(Base) 11411 .addOperand(Scale) 11412 .addOperand(Index) 11413 .addDisp(Disp, 8) 11414 .addOperand(Segment) 11415 .addReg(NextAddrReg) 11416 .setMemRefs(MMOBegin, MMOEnd); 11417 11418 // If we branched, emit the PHI to the front of endMBB. 11419 if (offsetMBB) { 11420 BuildMI(*endMBB, endMBB->begin(), DL, 11421 TII->get(X86::PHI), DestReg) 11422 .addReg(OffsetDestReg).addMBB(offsetMBB) 11423 .addReg(OverflowDestReg).addMBB(overflowMBB); 11424 } 11425 11426 // Erase the pseudo instruction 11427 MI->eraseFromParent(); 11428 11429 return endMBB; 11430} 11431 11432MachineBasicBlock * 11433X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 11434 MachineInstr *MI, 11435 MachineBasicBlock *MBB) const { 11436 // Emit code to save XMM registers to the stack. The ABI says that the 11437 // number of registers to save is given in %al, so it's theoretically 11438 // possible to do an indirect jump trick to avoid saving all of them, 11439 // however this code takes a simpler approach and just executes all 11440 // of the stores if %al is non-zero. It's less code, and it's probably 11441 // easier on the hardware branch predictor, and stores aren't all that 11442 // expensive anyway. 11443 11444 // Create the new basic blocks. One block contains all the XMM stores, 11445 // and one block is the final destination regardless of whether any 11446 // stores were performed. 11447 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11448 MachineFunction *F = MBB->getParent(); 11449 MachineFunction::iterator MBBIter = MBB; 11450 ++MBBIter; 11451 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 11452 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 11453 F->insert(MBBIter, XMMSaveMBB); 11454 F->insert(MBBIter, EndMBB); 11455 11456 // Transfer the remainder of MBB and its successor edges to EndMBB. 11457 EndMBB->splice(EndMBB->begin(), MBB, 11458 llvm::next(MachineBasicBlock::iterator(MI)), 11459 MBB->end()); 11460 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 11461 11462 // The original block will now fall through to the XMM save block. 11463 MBB->addSuccessor(XMMSaveMBB); 11464 // The XMMSaveMBB will fall through to the end block. 11465 XMMSaveMBB->addSuccessor(EndMBB); 11466 11467 // Now add the instructions. 11468 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11469 DebugLoc DL = MI->getDebugLoc(); 11470 11471 unsigned CountReg = MI->getOperand(0).getReg(); 11472 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 11473 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 11474 11475 if (!Subtarget->isTargetWin64()) { 11476 // If %al is 0, branch around the XMM save block. 11477 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 11478 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 11479 MBB->addSuccessor(EndMBB); 11480 } 11481 11482 // In the XMM save block, save all the XMM argument registers. 11483 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 11484 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 11485 MachineMemOperand *MMO = 11486 F->getMachineMemOperand( 11487 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 11488 MachineMemOperand::MOStore, 11489 /*Size=*/16, /*Align=*/16); 11490 BuildMI(XMMSaveMBB, DL, TII->get(X86::MOVAPSmr)) 11491 .addFrameIndex(RegSaveFrameIndex) 11492 .addImm(/*Scale=*/1) 11493 .addReg(/*IndexReg=*/0) 11494 .addImm(/*Disp=*/Offset) 11495 .addReg(/*Segment=*/0) 11496 .addReg(MI->getOperand(i).getReg()) 11497 .addMemOperand(MMO); 11498 } 11499 11500 MI->eraseFromParent(); // The pseudo instruction is gone now. 11501 11502 return EndMBB; 11503} 11504 11505MachineBasicBlock * 11506X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 11507 MachineBasicBlock *BB) const { 11508 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11509 DebugLoc DL = MI->getDebugLoc(); 11510 11511 // To "insert" a SELECT_CC instruction, we actually have to insert the 11512 // diamond control-flow pattern. The incoming instruction knows the 11513 // destination vreg to set, the condition code register to branch on, the 11514 // true/false values to select between, and a branch opcode to use. 11515 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11516 MachineFunction::iterator It = BB; 11517 ++It; 11518 11519 // thisMBB: 11520 // ... 11521 // TrueVal = ... 11522 // cmpTY ccX, r1, r2 11523 // bCC copy1MBB 11524 // fallthrough --> copy0MBB 11525 MachineBasicBlock *thisMBB = BB; 11526 MachineFunction *F = BB->getParent(); 11527 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11528 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11529 F->insert(It, copy0MBB); 11530 F->insert(It, sinkMBB); 11531 11532 // If the EFLAGS register isn't dead in the terminator, then claim that it's 11533 // live into the sink and copy blocks. 11534 const MachineFunction *MF = BB->getParent(); 11535 const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); 11536 BitVector ReservedRegs = TRI->getReservedRegs(*MF); 11537 11538 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { 11539 const MachineOperand &MO = MI->getOperand(I); 11540 if (!MO.isReg() || !MO.isUse() || MO.isKill()) continue; 11541 unsigned Reg = MO.getReg(); 11542 if (Reg != X86::EFLAGS) continue; 11543 copy0MBB->addLiveIn(Reg); 11544 sinkMBB->addLiveIn(Reg); 11545 } 11546 11547 // Transfer the remainder of BB and its successor edges to sinkMBB. 11548 sinkMBB->splice(sinkMBB->begin(), BB, 11549 llvm::next(MachineBasicBlock::iterator(MI)), 11550 BB->end()); 11551 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11552 11553 // Add the true and fallthrough blocks as its successors. 11554 BB->addSuccessor(copy0MBB); 11555 BB->addSuccessor(sinkMBB); 11556 11557 // Create the conditional branch instruction. 11558 unsigned Opc = 11559 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 11560 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 11561 11562 // copy0MBB: 11563 // %FalseValue = ... 11564 // # fallthrough to sinkMBB 11565 copy0MBB->addSuccessor(sinkMBB); 11566 11567 // sinkMBB: 11568 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11569 // ... 11570 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 11571 TII->get(X86::PHI), MI->getOperand(0).getReg()) 11572 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 11573 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 11574 11575 MI->eraseFromParent(); // The pseudo instruction is gone now. 11576 return sinkMBB; 11577} 11578 11579MachineBasicBlock * 11580X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 11581 MachineBasicBlock *BB) const { 11582 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11583 DebugLoc DL = MI->getDebugLoc(); 11584 11585 assert(!Subtarget->isTargetEnvMacho()); 11586 11587 // The lowering is pretty easy: we're just emitting the call to _alloca. The 11588 // non-trivial part is impdef of ESP. 11589 11590 if (Subtarget->isTargetWin64()) { 11591 if (Subtarget->isTargetCygMing()) { 11592 // ___chkstk(Mingw64): 11593 // Clobbers R10, R11, RAX and EFLAGS. 11594 // Updates RSP. 11595 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 11596 .addExternalSymbol("___chkstk") 11597 .addReg(X86::RAX, RegState::Implicit) 11598 .addReg(X86::RSP, RegState::Implicit) 11599 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 11600 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 11601 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 11602 } else { 11603 // __chkstk(MSVCRT): does not update stack pointer. 11604 // Clobbers R10, R11 and EFLAGS. 11605 // FIXME: RAX(allocated size) might be reused and not killed. 11606 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 11607 .addExternalSymbol("__chkstk") 11608 .addReg(X86::RAX, RegState::Implicit) 11609 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 11610 // RAX has the offset to subtracted from RSP. 11611 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 11612 .addReg(X86::RSP) 11613 .addReg(X86::RAX); 11614 } 11615 } else { 11616 const char *StackProbeSymbol = 11617 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 11618 11619 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 11620 .addExternalSymbol(StackProbeSymbol) 11621 .addReg(X86::EAX, RegState::Implicit) 11622 .addReg(X86::ESP, RegState::Implicit) 11623 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 11624 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 11625 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 11626 } 11627 11628 MI->eraseFromParent(); // The pseudo instruction is gone now. 11629 return BB; 11630} 11631 11632MachineBasicBlock * 11633X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 11634 MachineBasicBlock *BB) const { 11635 // This is pretty easy. We're taking the value that we received from 11636 // our load from the relocation, sticking it in either RDI (x86-64) 11637 // or EAX and doing an indirect call. The return value will then 11638 // be in the normal return register. 11639 const X86InstrInfo *TII 11640 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 11641 DebugLoc DL = MI->getDebugLoc(); 11642 MachineFunction *F = BB->getParent(); 11643 11644 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 11645 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 11646 11647 if (Subtarget->is64Bit()) { 11648 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 11649 TII->get(X86::MOV64rm), X86::RDI) 11650 .addReg(X86::RIP) 11651 .addImm(0).addReg(0) 11652 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 11653 MI->getOperand(3).getTargetFlags()) 11654 .addReg(0); 11655 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 11656 addDirectMem(MIB, X86::RDI); 11657 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 11658 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 11659 TII->get(X86::MOV32rm), X86::EAX) 11660 .addReg(0) 11661 .addImm(0).addReg(0) 11662 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 11663 MI->getOperand(3).getTargetFlags()) 11664 .addReg(0); 11665 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 11666 addDirectMem(MIB, X86::EAX); 11667 } else { 11668 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 11669 TII->get(X86::MOV32rm), X86::EAX) 11670 .addReg(TII->getGlobalBaseReg(F)) 11671 .addImm(0).addReg(0) 11672 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 11673 MI->getOperand(3).getTargetFlags()) 11674 .addReg(0); 11675 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 11676 addDirectMem(MIB, X86::EAX); 11677 } 11678 11679 MI->eraseFromParent(); // The pseudo instruction is gone now. 11680 return BB; 11681} 11682 11683MachineBasicBlock * 11684X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 11685 MachineBasicBlock *BB) const { 11686 switch (MI->getOpcode()) { 11687 default: assert(false && "Unexpected instr type to insert"); 11688 case X86::TAILJMPd64: 11689 case X86::TAILJMPr64: 11690 case X86::TAILJMPm64: 11691 assert(!"TAILJMP64 would not be touched here."); 11692 case X86::TCRETURNdi64: 11693 case X86::TCRETURNri64: 11694 case X86::TCRETURNmi64: 11695 // Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset. 11696 // On AMD64, additional defs should be added before register allocation. 11697 if (!Subtarget->isTargetWin64()) { 11698 MI->addRegisterDefined(X86::RSI); 11699 MI->addRegisterDefined(X86::RDI); 11700 MI->addRegisterDefined(X86::XMM6); 11701 MI->addRegisterDefined(X86::XMM7); 11702 MI->addRegisterDefined(X86::XMM8); 11703 MI->addRegisterDefined(X86::XMM9); 11704 MI->addRegisterDefined(X86::XMM10); 11705 MI->addRegisterDefined(X86::XMM11); 11706 MI->addRegisterDefined(X86::XMM12); 11707 MI->addRegisterDefined(X86::XMM13); 11708 MI->addRegisterDefined(X86::XMM14); 11709 MI->addRegisterDefined(X86::XMM15); 11710 } 11711 return BB; 11712 case X86::WIN_ALLOCA: 11713 return EmitLoweredWinAlloca(MI, BB); 11714 case X86::TLSCall_32: 11715 case X86::TLSCall_64: 11716 return EmitLoweredTLSCall(MI, BB); 11717 case X86::CMOV_GR8: 11718 case X86::CMOV_FR32: 11719 case X86::CMOV_FR64: 11720 case X86::CMOV_V4F32: 11721 case X86::CMOV_V2F64: 11722 case X86::CMOV_V2I64: 11723 case X86::CMOV_V8F32: 11724 case X86::CMOV_V4F64: 11725 case X86::CMOV_V4I64: 11726 case X86::CMOV_GR16: 11727 case X86::CMOV_GR32: 11728 case X86::CMOV_RFP32: 11729 case X86::CMOV_RFP64: 11730 case X86::CMOV_RFP80: 11731 return EmitLoweredSelect(MI, BB); 11732 11733 case X86::FP32_TO_INT16_IN_MEM: 11734 case X86::FP32_TO_INT32_IN_MEM: 11735 case X86::FP32_TO_INT64_IN_MEM: 11736 case X86::FP64_TO_INT16_IN_MEM: 11737 case X86::FP64_TO_INT32_IN_MEM: 11738 case X86::FP64_TO_INT64_IN_MEM: 11739 case X86::FP80_TO_INT16_IN_MEM: 11740 case X86::FP80_TO_INT32_IN_MEM: 11741 case X86::FP80_TO_INT64_IN_MEM: { 11742 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11743 DebugLoc DL = MI->getDebugLoc(); 11744 11745 // Change the floating point control register to use "round towards zero" 11746 // mode when truncating to an integer value. 11747 MachineFunction *F = BB->getParent(); 11748 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 11749 addFrameReference(BuildMI(*BB, MI, DL, 11750 TII->get(X86::FNSTCW16m)), CWFrameIdx); 11751 11752 // Load the old value of the high byte of the control word... 11753 unsigned OldCW = 11754 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 11755 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 11756 CWFrameIdx); 11757 11758 // Set the high part to be round to zero... 11759 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 11760 .addImm(0xC7F); 11761 11762 // Reload the modified control word now... 11763 addFrameReference(BuildMI(*BB, MI, DL, 11764 TII->get(X86::FLDCW16m)), CWFrameIdx); 11765 11766 // Restore the memory image of control word to original value 11767 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 11768 .addReg(OldCW); 11769 11770 // Get the X86 opcode to use. 11771 unsigned Opc; 11772 switch (MI->getOpcode()) { 11773 default: llvm_unreachable("illegal opcode!"); 11774 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 11775 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 11776 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 11777 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 11778 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 11779 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 11780 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 11781 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 11782 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 11783 } 11784 11785 X86AddressMode AM; 11786 MachineOperand &Op = MI->getOperand(0); 11787 if (Op.isReg()) { 11788 AM.BaseType = X86AddressMode::RegBase; 11789 AM.Base.Reg = Op.getReg(); 11790 } else { 11791 AM.BaseType = X86AddressMode::FrameIndexBase; 11792 AM.Base.FrameIndex = Op.getIndex(); 11793 } 11794 Op = MI->getOperand(1); 11795 if (Op.isImm()) 11796 AM.Scale = Op.getImm(); 11797 Op = MI->getOperand(2); 11798 if (Op.isImm()) 11799 AM.IndexReg = Op.getImm(); 11800 Op = MI->getOperand(3); 11801 if (Op.isGlobal()) { 11802 AM.GV = Op.getGlobal(); 11803 } else { 11804 AM.Disp = Op.getImm(); 11805 } 11806 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 11807 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 11808 11809 // Reload the original control word now. 11810 addFrameReference(BuildMI(*BB, MI, DL, 11811 TII->get(X86::FLDCW16m)), CWFrameIdx); 11812 11813 MI->eraseFromParent(); // The pseudo instruction is gone now. 11814 return BB; 11815 } 11816 // String/text processing lowering. 11817 case X86::PCMPISTRM128REG: 11818 case X86::VPCMPISTRM128REG: 11819 return EmitPCMP(MI, BB, 3, false /* in-mem */); 11820 case X86::PCMPISTRM128MEM: 11821 case X86::VPCMPISTRM128MEM: 11822 return EmitPCMP(MI, BB, 3, true /* in-mem */); 11823 case X86::PCMPESTRM128REG: 11824 case X86::VPCMPESTRM128REG: 11825 return EmitPCMP(MI, BB, 5, false /* in mem */); 11826 case X86::PCMPESTRM128MEM: 11827 case X86::VPCMPESTRM128MEM: 11828 return EmitPCMP(MI, BB, 5, true /* in mem */); 11829 11830 // Thread synchronization. 11831 case X86::MONITOR: 11832 return EmitMonitor(MI, BB); 11833 case X86::MWAIT: 11834 return EmitMwait(MI, BB); 11835 11836 // Atomic Lowering. 11837 case X86::ATOMAND32: 11838 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 11839 X86::AND32ri, X86::MOV32rm, 11840 X86::LCMPXCHG32, 11841 X86::NOT32r, X86::EAX, 11842 X86::GR32RegisterClass); 11843 case X86::ATOMOR32: 11844 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 11845 X86::OR32ri, X86::MOV32rm, 11846 X86::LCMPXCHG32, 11847 X86::NOT32r, X86::EAX, 11848 X86::GR32RegisterClass); 11849 case X86::ATOMXOR32: 11850 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 11851 X86::XOR32ri, X86::MOV32rm, 11852 X86::LCMPXCHG32, 11853 X86::NOT32r, X86::EAX, 11854 X86::GR32RegisterClass); 11855 case X86::ATOMNAND32: 11856 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 11857 X86::AND32ri, X86::MOV32rm, 11858 X86::LCMPXCHG32, 11859 X86::NOT32r, X86::EAX, 11860 X86::GR32RegisterClass, true); 11861 case X86::ATOMMIN32: 11862 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 11863 case X86::ATOMMAX32: 11864 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 11865 case X86::ATOMUMIN32: 11866 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 11867 case X86::ATOMUMAX32: 11868 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 11869 11870 case X86::ATOMAND16: 11871 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 11872 X86::AND16ri, X86::MOV16rm, 11873 X86::LCMPXCHG16, 11874 X86::NOT16r, X86::AX, 11875 X86::GR16RegisterClass); 11876 case X86::ATOMOR16: 11877 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 11878 X86::OR16ri, X86::MOV16rm, 11879 X86::LCMPXCHG16, 11880 X86::NOT16r, X86::AX, 11881 X86::GR16RegisterClass); 11882 case X86::ATOMXOR16: 11883 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, 11884 X86::XOR16ri, X86::MOV16rm, 11885 X86::LCMPXCHG16, 11886 X86::NOT16r, X86::AX, 11887 X86::GR16RegisterClass); 11888 case X86::ATOMNAND16: 11889 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 11890 X86::AND16ri, X86::MOV16rm, 11891 X86::LCMPXCHG16, 11892 X86::NOT16r, X86::AX, 11893 X86::GR16RegisterClass, true); 11894 case X86::ATOMMIN16: 11895 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); 11896 case X86::ATOMMAX16: 11897 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); 11898 case X86::ATOMUMIN16: 11899 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); 11900 case X86::ATOMUMAX16: 11901 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); 11902 11903 case X86::ATOMAND8: 11904 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 11905 X86::AND8ri, X86::MOV8rm, 11906 X86::LCMPXCHG8, 11907 X86::NOT8r, X86::AL, 11908 X86::GR8RegisterClass); 11909 case X86::ATOMOR8: 11910 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 11911 X86::OR8ri, X86::MOV8rm, 11912 X86::LCMPXCHG8, 11913 X86::NOT8r, X86::AL, 11914 X86::GR8RegisterClass); 11915 case X86::ATOMXOR8: 11916 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, 11917 X86::XOR8ri, X86::MOV8rm, 11918 X86::LCMPXCHG8, 11919 X86::NOT8r, X86::AL, 11920 X86::GR8RegisterClass); 11921 case X86::ATOMNAND8: 11922 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 11923 X86::AND8ri, X86::MOV8rm, 11924 X86::LCMPXCHG8, 11925 X86::NOT8r, X86::AL, 11926 X86::GR8RegisterClass, true); 11927 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. 11928 // This group is for 64-bit host. 11929 case X86::ATOMAND64: 11930 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 11931 X86::AND64ri32, X86::MOV64rm, 11932 X86::LCMPXCHG64, 11933 X86::NOT64r, X86::RAX, 11934 X86::GR64RegisterClass); 11935 case X86::ATOMOR64: 11936 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 11937 X86::OR64ri32, X86::MOV64rm, 11938 X86::LCMPXCHG64, 11939 X86::NOT64r, X86::RAX, 11940 X86::GR64RegisterClass); 11941 case X86::ATOMXOR64: 11942 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, 11943 X86::XOR64ri32, X86::MOV64rm, 11944 X86::LCMPXCHG64, 11945 X86::NOT64r, X86::RAX, 11946 X86::GR64RegisterClass); 11947 case X86::ATOMNAND64: 11948 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 11949 X86::AND64ri32, X86::MOV64rm, 11950 X86::LCMPXCHG64, 11951 X86::NOT64r, X86::RAX, 11952 X86::GR64RegisterClass, true); 11953 case X86::ATOMMIN64: 11954 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); 11955 case X86::ATOMMAX64: 11956 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); 11957 case X86::ATOMUMIN64: 11958 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); 11959 case X86::ATOMUMAX64: 11960 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); 11961 11962 // This group does 64-bit operations on a 32-bit host. 11963 case X86::ATOMAND6432: 11964 return EmitAtomicBit6432WithCustomInserter(MI, BB, 11965 X86::AND32rr, X86::AND32rr, 11966 X86::AND32ri, X86::AND32ri, 11967 false); 11968 case X86::ATOMOR6432: 11969 return EmitAtomicBit6432WithCustomInserter(MI, BB, 11970 X86::OR32rr, X86::OR32rr, 11971 X86::OR32ri, X86::OR32ri, 11972 false); 11973 case X86::ATOMXOR6432: 11974 return EmitAtomicBit6432WithCustomInserter(MI, BB, 11975 X86::XOR32rr, X86::XOR32rr, 11976 X86::XOR32ri, X86::XOR32ri, 11977 false); 11978 case X86::ATOMNAND6432: 11979 return EmitAtomicBit6432WithCustomInserter(MI, BB, 11980 X86::AND32rr, X86::AND32rr, 11981 X86::AND32ri, X86::AND32ri, 11982 true); 11983 case X86::ATOMADD6432: 11984 return EmitAtomicBit6432WithCustomInserter(MI, BB, 11985 X86::ADD32rr, X86::ADC32rr, 11986 X86::ADD32ri, X86::ADC32ri, 11987 false); 11988 case X86::ATOMSUB6432: 11989 return EmitAtomicBit6432WithCustomInserter(MI, BB, 11990 X86::SUB32rr, X86::SBB32rr, 11991 X86::SUB32ri, X86::SBB32ri, 11992 false); 11993 case X86::ATOMSWAP6432: 11994 return EmitAtomicBit6432WithCustomInserter(MI, BB, 11995 X86::MOV32rr, X86::MOV32rr, 11996 X86::MOV32ri, X86::MOV32ri, 11997 false); 11998 case X86::VASTART_SAVE_XMM_REGS: 11999 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 12000 12001 case X86::VAARG_64: 12002 return EmitVAARG64WithCustomInserter(MI, BB); 12003 } 12004} 12005 12006//===----------------------------------------------------------------------===// 12007// X86 Optimization Hooks 12008//===----------------------------------------------------------------------===// 12009 12010void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 12011 const APInt &Mask, 12012 APInt &KnownZero, 12013 APInt &KnownOne, 12014 const SelectionDAG &DAG, 12015 unsigned Depth) const { 12016 unsigned Opc = Op.getOpcode(); 12017 assert((Opc >= ISD::BUILTIN_OP_END || 12018 Opc == ISD::INTRINSIC_WO_CHAIN || 12019 Opc == ISD::INTRINSIC_W_CHAIN || 12020 Opc == ISD::INTRINSIC_VOID) && 12021 "Should use MaskedValueIsZero if you don't know whether Op" 12022 " is a target node!"); 12023 12024 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 12025 switch (Opc) { 12026 default: break; 12027 case X86ISD::ADD: 12028 case X86ISD::SUB: 12029 case X86ISD::ADC: 12030 case X86ISD::SBB: 12031 case X86ISD::SMUL: 12032 case X86ISD::UMUL: 12033 case X86ISD::INC: 12034 case X86ISD::DEC: 12035 case X86ISD::OR: 12036 case X86ISD::XOR: 12037 case X86ISD::AND: 12038 // These nodes' second result is a boolean. 12039 if (Op.getResNo() == 0) 12040 break; 12041 // Fallthrough 12042 case X86ISD::SETCC: 12043 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 12044 Mask.getBitWidth() - 1); 12045 break; 12046 } 12047} 12048 12049unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 12050 unsigned Depth) const { 12051 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 12052 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 12053 return Op.getValueType().getScalarType().getSizeInBits(); 12054 12055 // Fallback case. 12056 return 1; 12057} 12058 12059/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 12060/// node is a GlobalAddress + offset. 12061bool X86TargetLowering::isGAPlusOffset(SDNode *N, 12062 const GlobalValue* &GA, 12063 int64_t &Offset) const { 12064 if (N->getOpcode() == X86ISD::Wrapper) { 12065 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 12066 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 12067 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 12068 return true; 12069 } 12070 } 12071 return TargetLowering::isGAPlusOffset(N, GA, Offset); 12072} 12073 12074/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 12075/// same as extracting the high 128-bit part of 256-bit vector and then 12076/// inserting the result into the low part of a new 256-bit vector 12077static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 12078 EVT VT = SVOp->getValueType(0); 12079 int NumElems = VT.getVectorNumElements(); 12080 12081 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 12082 for (int i = 0, j = NumElems/2; i < NumElems/2; ++i, ++j) 12083 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 12084 SVOp->getMaskElt(j) >= 0) 12085 return false; 12086 12087 return true; 12088} 12089 12090/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 12091/// same as extracting the low 128-bit part of 256-bit vector and then 12092/// inserting the result into the high part of a new 256-bit vector 12093static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 12094 EVT VT = SVOp->getValueType(0); 12095 int NumElems = VT.getVectorNumElements(); 12096 12097 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 12098 for (int i = NumElems/2, j = 0; i < NumElems; ++i, ++j) 12099 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 12100 SVOp->getMaskElt(j) >= 0) 12101 return false; 12102 12103 return true; 12104} 12105 12106/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 12107static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 12108 TargetLowering::DAGCombinerInfo &DCI) { 12109 DebugLoc dl = N->getDebugLoc(); 12110 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 12111 SDValue V1 = SVOp->getOperand(0); 12112 SDValue V2 = SVOp->getOperand(1); 12113 EVT VT = SVOp->getValueType(0); 12114 int NumElems = VT.getVectorNumElements(); 12115 12116 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 12117 V2.getOpcode() == ISD::CONCAT_VECTORS) { 12118 // 12119 // 0,0,0,... 12120 // | 12121 // V UNDEF BUILD_VECTOR UNDEF 12122 // \ / \ / 12123 // CONCAT_VECTOR CONCAT_VECTOR 12124 // \ / 12125 // \ / 12126 // RESULT: V + zero extended 12127 // 12128 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 12129 V2.getOperand(1).getOpcode() != ISD::UNDEF || 12130 V1.getOperand(1).getOpcode() != ISD::UNDEF) 12131 return SDValue(); 12132 12133 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 12134 return SDValue(); 12135 12136 // To match the shuffle mask, the first half of the mask should 12137 // be exactly the first vector, and all the rest a splat with the 12138 // first element of the second one. 12139 for (int i = 0; i < NumElems/2; ++i) 12140 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 12141 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 12142 return SDValue(); 12143 12144 // Emit a zeroed vector and insert the desired subvector on its 12145 // first half. 12146 SDValue Zeros = getZeroVector(VT, true /* HasSSE2 */, DAG, dl); 12147 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 12148 DAG.getConstant(0, MVT::i32), DAG, dl); 12149 return DCI.CombineTo(N, InsV); 12150 } 12151 12152 //===--------------------------------------------------------------------===// 12153 // Combine some shuffles into subvector extracts and inserts: 12154 // 12155 12156 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 12157 if (isShuffleHigh128VectorInsertLow(SVOp)) { 12158 SDValue V = Extract128BitVector(V1, DAG.getConstant(NumElems/2, MVT::i32), 12159 DAG, dl); 12160 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), 12161 V, DAG.getConstant(0, MVT::i32), DAG, dl); 12162 return DCI.CombineTo(N, InsV); 12163 } 12164 12165 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 12166 if (isShuffleLow128VectorInsertHigh(SVOp)) { 12167 SDValue V = Extract128BitVector(V1, DAG.getConstant(0, MVT::i32), DAG, dl); 12168 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), 12169 V, DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); 12170 return DCI.CombineTo(N, InsV); 12171 } 12172 12173 return SDValue(); 12174} 12175 12176/// PerformShuffleCombine - Performs several different shuffle combines. 12177static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 12178 TargetLowering::DAGCombinerInfo &DCI, 12179 const X86Subtarget *Subtarget) { 12180 DebugLoc dl = N->getDebugLoc(); 12181 EVT VT = N->getValueType(0); 12182 12183 // Don't create instructions with illegal types after legalize types has run. 12184 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12185 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 12186 return SDValue(); 12187 12188 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 12189 if (Subtarget->hasAVX() && VT.getSizeInBits() == 256 && 12190 N->getOpcode() == ISD::VECTOR_SHUFFLE) 12191 return PerformShuffleCombine256(N, DAG, DCI); 12192 12193 // Only handle 128 wide vector from here on. 12194 if (VT.getSizeInBits() != 128) 12195 return SDValue(); 12196 12197 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 12198 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 12199 // consecutive, non-overlapping, and in the right order. 12200 SmallVector<SDValue, 16> Elts; 12201 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 12202 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 12203 12204 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 12205} 12206 12207/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 12208/// generation and convert it from being a bunch of shuffles and extracts 12209/// to a simple store and scalar loads to extract the elements. 12210static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 12211 const TargetLowering &TLI) { 12212 SDValue InputVector = N->getOperand(0); 12213 12214 // Only operate on vectors of 4 elements, where the alternative shuffling 12215 // gets to be more expensive. 12216 if (InputVector.getValueType() != MVT::v4i32) 12217 return SDValue(); 12218 12219 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 12220 // single use which is a sign-extend or zero-extend, and all elements are 12221 // used. 12222 SmallVector<SDNode *, 4> Uses; 12223 unsigned ExtractedElements = 0; 12224 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 12225 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 12226 if (UI.getUse().getResNo() != InputVector.getResNo()) 12227 return SDValue(); 12228 12229 SDNode *Extract = *UI; 12230 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12231 return SDValue(); 12232 12233 if (Extract->getValueType(0) != MVT::i32) 12234 return SDValue(); 12235 if (!Extract->hasOneUse()) 12236 return SDValue(); 12237 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 12238 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 12239 return SDValue(); 12240 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 12241 return SDValue(); 12242 12243 // Record which element was extracted. 12244 ExtractedElements |= 12245 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 12246 12247 Uses.push_back(Extract); 12248 } 12249 12250 // If not all the elements were used, this may not be worthwhile. 12251 if (ExtractedElements != 15) 12252 return SDValue(); 12253 12254 // Ok, we've now decided to do the transformation. 12255 DebugLoc dl = InputVector.getDebugLoc(); 12256 12257 // Store the value to a temporary stack slot. 12258 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 12259 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 12260 MachinePointerInfo(), false, false, 0); 12261 12262 // Replace each use (extract) with a load of the appropriate element. 12263 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 12264 UE = Uses.end(); UI != UE; ++UI) { 12265 SDNode *Extract = *UI; 12266 12267 // cOMpute the element's address. 12268 SDValue Idx = Extract->getOperand(1); 12269 unsigned EltSize = 12270 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 12271 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 12272 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 12273 12274 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 12275 StackPtr, OffsetVal); 12276 12277 // Load the scalar. 12278 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 12279 ScalarAddr, MachinePointerInfo(), 12280 false, false, 0); 12281 12282 // Replace the exact with the load. 12283 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 12284 } 12285 12286 // The replacement was made in place; don't return anything. 12287 return SDValue(); 12288} 12289 12290/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 12291static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 12292 const X86Subtarget *Subtarget) { 12293 DebugLoc DL = N->getDebugLoc(); 12294 SDValue Cond = N->getOperand(0); 12295 // Get the LHS/RHS of the select. 12296 SDValue LHS = N->getOperand(1); 12297 SDValue RHS = N->getOperand(2); 12298 12299 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 12300 // instructions match the semantics of the common C idiom x<y?x:y but not 12301 // x<=y?x:y, because of how they handle negative zero (which can be 12302 // ignored in unsafe-math mode). 12303 if (Subtarget->hasSSE2() && 12304 (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) && 12305 Cond.getOpcode() == ISD::SETCC) { 12306 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 12307 12308 unsigned Opcode = 0; 12309 // Check for x CC y ? x : y. 12310 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 12311 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 12312 switch (CC) { 12313 default: break; 12314 case ISD::SETULT: 12315 // Converting this to a min would handle NaNs incorrectly, and swapping 12316 // the operands would cause it to handle comparisons between positive 12317 // and negative zero incorrectly. 12318 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 12319 if (!UnsafeFPMath && 12320 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 12321 break; 12322 std::swap(LHS, RHS); 12323 } 12324 Opcode = X86ISD::FMIN; 12325 break; 12326 case ISD::SETOLE: 12327 // Converting this to a min would handle comparisons between positive 12328 // and negative zero incorrectly. 12329 if (!UnsafeFPMath && 12330 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 12331 break; 12332 Opcode = X86ISD::FMIN; 12333 break; 12334 case ISD::SETULE: 12335 // Converting this to a min would handle both negative zeros and NaNs 12336 // incorrectly, but we can swap the operands to fix both. 12337 std::swap(LHS, RHS); 12338 case ISD::SETOLT: 12339 case ISD::SETLT: 12340 case ISD::SETLE: 12341 Opcode = X86ISD::FMIN; 12342 break; 12343 12344 case ISD::SETOGE: 12345 // Converting this to a max would handle comparisons between positive 12346 // and negative zero incorrectly. 12347 if (!UnsafeFPMath && 12348 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 12349 break; 12350 Opcode = X86ISD::FMAX; 12351 break; 12352 case ISD::SETUGT: 12353 // Converting this to a max would handle NaNs incorrectly, and swapping 12354 // the operands would cause it to handle comparisons between positive 12355 // and negative zero incorrectly. 12356 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 12357 if (!UnsafeFPMath && 12358 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 12359 break; 12360 std::swap(LHS, RHS); 12361 } 12362 Opcode = X86ISD::FMAX; 12363 break; 12364 case ISD::SETUGE: 12365 // Converting this to a max would handle both negative zeros and NaNs 12366 // incorrectly, but we can swap the operands to fix both. 12367 std::swap(LHS, RHS); 12368 case ISD::SETOGT: 12369 case ISD::SETGT: 12370 case ISD::SETGE: 12371 Opcode = X86ISD::FMAX; 12372 break; 12373 } 12374 // Check for x CC y ? y : x -- a min/max with reversed arms. 12375 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 12376 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 12377 switch (CC) { 12378 default: break; 12379 case ISD::SETOGE: 12380 // Converting this to a min would handle comparisons between positive 12381 // and negative zero incorrectly, and swapping the operands would 12382 // cause it to handle NaNs incorrectly. 12383 if (!UnsafeFPMath && 12384 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 12385 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 12386 break; 12387 std::swap(LHS, RHS); 12388 } 12389 Opcode = X86ISD::FMIN; 12390 break; 12391 case ISD::SETUGT: 12392 // Converting this to a min would handle NaNs incorrectly. 12393 if (!UnsafeFPMath && 12394 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 12395 break; 12396 Opcode = X86ISD::FMIN; 12397 break; 12398 case ISD::SETUGE: 12399 // Converting this to a min would handle both negative zeros and NaNs 12400 // incorrectly, but we can swap the operands to fix both. 12401 std::swap(LHS, RHS); 12402 case ISD::SETOGT: 12403 case ISD::SETGT: 12404 case ISD::SETGE: 12405 Opcode = X86ISD::FMIN; 12406 break; 12407 12408 case ISD::SETULT: 12409 // Converting this to a max would handle NaNs incorrectly. 12410 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 12411 break; 12412 Opcode = X86ISD::FMAX; 12413 break; 12414 case ISD::SETOLE: 12415 // Converting this to a max would handle comparisons between positive 12416 // and negative zero incorrectly, and swapping the operands would 12417 // cause it to handle NaNs incorrectly. 12418 if (!UnsafeFPMath && 12419 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 12420 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 12421 break; 12422 std::swap(LHS, RHS); 12423 } 12424 Opcode = X86ISD::FMAX; 12425 break; 12426 case ISD::SETULE: 12427 // Converting this to a max would handle both negative zeros and NaNs 12428 // incorrectly, but we can swap the operands to fix both. 12429 std::swap(LHS, RHS); 12430 case ISD::SETOLT: 12431 case ISD::SETLT: 12432 case ISD::SETLE: 12433 Opcode = X86ISD::FMAX; 12434 break; 12435 } 12436 } 12437 12438 if (Opcode) 12439 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 12440 } 12441 12442 // If this is a select between two integer constants, try to do some 12443 // optimizations. 12444 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 12445 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 12446 // Don't do this for crazy integer types. 12447 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 12448 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 12449 // so that TrueC (the true value) is larger than FalseC. 12450 bool NeedsCondInvert = false; 12451 12452 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 12453 // Efficiently invertible. 12454 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 12455 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 12456 isa<ConstantSDNode>(Cond.getOperand(1))))) { 12457 NeedsCondInvert = true; 12458 std::swap(TrueC, FalseC); 12459 } 12460 12461 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 12462 if (FalseC->getAPIntValue() == 0 && 12463 TrueC->getAPIntValue().isPowerOf2()) { 12464 if (NeedsCondInvert) // Invert the condition if needed. 12465 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 12466 DAG.getConstant(1, Cond.getValueType())); 12467 12468 // Zero extend the condition if needed. 12469 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 12470 12471 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 12472 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 12473 DAG.getConstant(ShAmt, MVT::i8)); 12474 } 12475 12476 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 12477 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 12478 if (NeedsCondInvert) // Invert the condition if needed. 12479 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 12480 DAG.getConstant(1, Cond.getValueType())); 12481 12482 // Zero extend the condition if needed. 12483 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 12484 FalseC->getValueType(0), Cond); 12485 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 12486 SDValue(FalseC, 0)); 12487 } 12488 12489 // Optimize cases that will turn into an LEA instruction. This requires 12490 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 12491 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 12492 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 12493 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 12494 12495 bool isFastMultiplier = false; 12496 if (Diff < 10) { 12497 switch ((unsigned char)Diff) { 12498 default: break; 12499 case 1: // result = add base, cond 12500 case 2: // result = lea base( , cond*2) 12501 case 3: // result = lea base(cond, cond*2) 12502 case 4: // result = lea base( , cond*4) 12503 case 5: // result = lea base(cond, cond*4) 12504 case 8: // result = lea base( , cond*8) 12505 case 9: // result = lea base(cond, cond*8) 12506 isFastMultiplier = true; 12507 break; 12508 } 12509 } 12510 12511 if (isFastMultiplier) { 12512 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 12513 if (NeedsCondInvert) // Invert the condition if needed. 12514 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 12515 DAG.getConstant(1, Cond.getValueType())); 12516 12517 // Zero extend the condition if needed. 12518 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 12519 Cond); 12520 // Scale the condition by the difference. 12521 if (Diff != 1) 12522 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 12523 DAG.getConstant(Diff, Cond.getValueType())); 12524 12525 // Add the base if non-zero. 12526 if (FalseC->getAPIntValue() != 0) 12527 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 12528 SDValue(FalseC, 0)); 12529 return Cond; 12530 } 12531 } 12532 } 12533 } 12534 12535 return SDValue(); 12536} 12537 12538/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 12539static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 12540 TargetLowering::DAGCombinerInfo &DCI) { 12541 DebugLoc DL = N->getDebugLoc(); 12542 12543 // If the flag operand isn't dead, don't touch this CMOV. 12544 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 12545 return SDValue(); 12546 12547 SDValue FalseOp = N->getOperand(0); 12548 SDValue TrueOp = N->getOperand(1); 12549 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 12550 SDValue Cond = N->getOperand(3); 12551 if (CC == X86::COND_E || CC == X86::COND_NE) { 12552 switch (Cond.getOpcode()) { 12553 default: break; 12554 case X86ISD::BSR: 12555 case X86ISD::BSF: 12556 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 12557 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 12558 return (CC == X86::COND_E) ? FalseOp : TrueOp; 12559 } 12560 } 12561 12562 // If this is a select between two integer constants, try to do some 12563 // optimizations. Note that the operands are ordered the opposite of SELECT 12564 // operands. 12565 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 12566 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 12567 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 12568 // larger than FalseC (the false value). 12569 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 12570 CC = X86::GetOppositeBranchCondition(CC); 12571 std::swap(TrueC, FalseC); 12572 } 12573 12574 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 12575 // This is efficient for any integer data type (including i8/i16) and 12576 // shift amount. 12577 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 12578 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 12579 DAG.getConstant(CC, MVT::i8), Cond); 12580 12581 // Zero extend the condition if needed. 12582 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 12583 12584 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 12585 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 12586 DAG.getConstant(ShAmt, MVT::i8)); 12587 if (N->getNumValues() == 2) // Dead flag value? 12588 return DCI.CombineTo(N, Cond, SDValue()); 12589 return Cond; 12590 } 12591 12592 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 12593 // for any integer data type, including i8/i16. 12594 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 12595 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 12596 DAG.getConstant(CC, MVT::i8), Cond); 12597 12598 // Zero extend the condition if needed. 12599 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 12600 FalseC->getValueType(0), Cond); 12601 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 12602 SDValue(FalseC, 0)); 12603 12604 if (N->getNumValues() == 2) // Dead flag value? 12605 return DCI.CombineTo(N, Cond, SDValue()); 12606 return Cond; 12607 } 12608 12609 // Optimize cases that will turn into an LEA instruction. This requires 12610 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 12611 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 12612 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 12613 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 12614 12615 bool isFastMultiplier = false; 12616 if (Diff < 10) { 12617 switch ((unsigned char)Diff) { 12618 default: break; 12619 case 1: // result = add base, cond 12620 case 2: // result = lea base( , cond*2) 12621 case 3: // result = lea base(cond, cond*2) 12622 case 4: // result = lea base( , cond*4) 12623 case 5: // result = lea base(cond, cond*4) 12624 case 8: // result = lea base( , cond*8) 12625 case 9: // result = lea base(cond, cond*8) 12626 isFastMultiplier = true; 12627 break; 12628 } 12629 } 12630 12631 if (isFastMultiplier) { 12632 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 12633 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 12634 DAG.getConstant(CC, MVT::i8), Cond); 12635 // Zero extend the condition if needed. 12636 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 12637 Cond); 12638 // Scale the condition by the difference. 12639 if (Diff != 1) 12640 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 12641 DAG.getConstant(Diff, Cond.getValueType())); 12642 12643 // Add the base if non-zero. 12644 if (FalseC->getAPIntValue() != 0) 12645 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 12646 SDValue(FalseC, 0)); 12647 if (N->getNumValues() == 2) // Dead flag value? 12648 return DCI.CombineTo(N, Cond, SDValue()); 12649 return Cond; 12650 } 12651 } 12652 } 12653 } 12654 return SDValue(); 12655} 12656 12657 12658/// PerformMulCombine - Optimize a single multiply with constant into two 12659/// in order to implement it with two cheaper instructions, e.g. 12660/// LEA + SHL, LEA + LEA. 12661static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 12662 TargetLowering::DAGCombinerInfo &DCI) { 12663 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 12664 return SDValue(); 12665 12666 EVT VT = N->getValueType(0); 12667 if (VT != MVT::i64) 12668 return SDValue(); 12669 12670 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 12671 if (!C) 12672 return SDValue(); 12673 uint64_t MulAmt = C->getZExtValue(); 12674 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 12675 return SDValue(); 12676 12677 uint64_t MulAmt1 = 0; 12678 uint64_t MulAmt2 = 0; 12679 if ((MulAmt % 9) == 0) { 12680 MulAmt1 = 9; 12681 MulAmt2 = MulAmt / 9; 12682 } else if ((MulAmt % 5) == 0) { 12683 MulAmt1 = 5; 12684 MulAmt2 = MulAmt / 5; 12685 } else if ((MulAmt % 3) == 0) { 12686 MulAmt1 = 3; 12687 MulAmt2 = MulAmt / 3; 12688 } 12689 if (MulAmt2 && 12690 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 12691 DebugLoc DL = N->getDebugLoc(); 12692 12693 if (isPowerOf2_64(MulAmt2) && 12694 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 12695 // If second multiplifer is pow2, issue it first. We want the multiply by 12696 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 12697 // is an add. 12698 std::swap(MulAmt1, MulAmt2); 12699 12700 SDValue NewMul; 12701 if (isPowerOf2_64(MulAmt1)) 12702 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 12703 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 12704 else 12705 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 12706 DAG.getConstant(MulAmt1, VT)); 12707 12708 if (isPowerOf2_64(MulAmt2)) 12709 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 12710 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 12711 else 12712 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 12713 DAG.getConstant(MulAmt2, VT)); 12714 12715 // Do not add new nodes to DAG combiner worklist. 12716 DCI.CombineTo(N, NewMul, false); 12717 } 12718 return SDValue(); 12719} 12720 12721static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 12722 SDValue N0 = N->getOperand(0); 12723 SDValue N1 = N->getOperand(1); 12724 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 12725 EVT VT = N0.getValueType(); 12726 12727 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 12728 // since the result of setcc_c is all zero's or all ones. 12729 if (N1C && N0.getOpcode() == ISD::AND && 12730 N0.getOperand(1).getOpcode() == ISD::Constant) { 12731 SDValue N00 = N0.getOperand(0); 12732 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 12733 ((N00.getOpcode() == ISD::ANY_EXTEND || 12734 N00.getOpcode() == ISD::ZERO_EXTEND) && 12735 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 12736 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 12737 APInt ShAmt = N1C->getAPIntValue(); 12738 Mask = Mask.shl(ShAmt); 12739 if (Mask != 0) 12740 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 12741 N00, DAG.getConstant(Mask, VT)); 12742 } 12743 } 12744 12745 return SDValue(); 12746} 12747 12748/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 12749/// when possible. 12750static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 12751 const X86Subtarget *Subtarget) { 12752 EVT VT = N->getValueType(0); 12753 if (!VT.isVector() && VT.isInteger() && 12754 N->getOpcode() == ISD::SHL) 12755 return PerformSHLCombine(N, DAG); 12756 12757 // On X86 with SSE2 support, we can transform this to a vector shift if 12758 // all elements are shifted by the same amount. We can't do this in legalize 12759 // because the a constant vector is typically transformed to a constant pool 12760 // so we have no knowledge of the shift amount. 12761 if (!(Subtarget->hasSSE2() || Subtarget->hasAVX())) 12762 return SDValue(); 12763 12764 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16) 12765 return SDValue(); 12766 12767 SDValue ShAmtOp = N->getOperand(1); 12768 EVT EltVT = VT.getVectorElementType(); 12769 DebugLoc DL = N->getDebugLoc(); 12770 SDValue BaseShAmt = SDValue(); 12771 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 12772 unsigned NumElts = VT.getVectorNumElements(); 12773 unsigned i = 0; 12774 for (; i != NumElts; ++i) { 12775 SDValue Arg = ShAmtOp.getOperand(i); 12776 if (Arg.getOpcode() == ISD::UNDEF) continue; 12777 BaseShAmt = Arg; 12778 break; 12779 } 12780 for (; i != NumElts; ++i) { 12781 SDValue Arg = ShAmtOp.getOperand(i); 12782 if (Arg.getOpcode() == ISD::UNDEF) continue; 12783 if (Arg != BaseShAmt) { 12784 return SDValue(); 12785 } 12786 } 12787 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 12788 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 12789 SDValue InVec = ShAmtOp.getOperand(0); 12790 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 12791 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 12792 unsigned i = 0; 12793 for (; i != NumElts; ++i) { 12794 SDValue Arg = InVec.getOperand(i); 12795 if (Arg.getOpcode() == ISD::UNDEF) continue; 12796 BaseShAmt = Arg; 12797 break; 12798 } 12799 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 12800 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 12801 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 12802 if (C->getZExtValue() == SplatIdx) 12803 BaseShAmt = InVec.getOperand(1); 12804 } 12805 } 12806 if (BaseShAmt.getNode() == 0) 12807 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 12808 DAG.getIntPtrConstant(0)); 12809 } else 12810 return SDValue(); 12811 12812 // The shift amount is an i32. 12813 if (EltVT.bitsGT(MVT::i32)) 12814 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 12815 else if (EltVT.bitsLT(MVT::i32)) 12816 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 12817 12818 // The shift amount is identical so we can do a vector shift. 12819 SDValue ValOp = N->getOperand(0); 12820 switch (N->getOpcode()) { 12821 default: 12822 llvm_unreachable("Unknown shift opcode!"); 12823 break; 12824 case ISD::SHL: 12825 if (VT == MVT::v2i64) 12826 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12827 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 12828 ValOp, BaseShAmt); 12829 if (VT == MVT::v4i32) 12830 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12831 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 12832 ValOp, BaseShAmt); 12833 if (VT == MVT::v8i16) 12834 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12835 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 12836 ValOp, BaseShAmt); 12837 break; 12838 case ISD::SRA: 12839 if (VT == MVT::v4i32) 12840 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12841 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 12842 ValOp, BaseShAmt); 12843 if (VT == MVT::v8i16) 12844 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12845 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 12846 ValOp, BaseShAmt); 12847 break; 12848 case ISD::SRL: 12849 if (VT == MVT::v2i64) 12850 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12851 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 12852 ValOp, BaseShAmt); 12853 if (VT == MVT::v4i32) 12854 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12855 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 12856 ValOp, BaseShAmt); 12857 if (VT == MVT::v8i16) 12858 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 12859 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 12860 ValOp, BaseShAmt); 12861 break; 12862 } 12863 return SDValue(); 12864} 12865 12866 12867// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 12868// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 12869// and friends. Likewise for OR -> CMPNEQSS. 12870static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 12871 TargetLowering::DAGCombinerInfo &DCI, 12872 const X86Subtarget *Subtarget) { 12873 unsigned opcode; 12874 12875 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 12876 // we're requiring SSE2 for both. 12877 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 12878 SDValue N0 = N->getOperand(0); 12879 SDValue N1 = N->getOperand(1); 12880 SDValue CMP0 = N0->getOperand(1); 12881 SDValue CMP1 = N1->getOperand(1); 12882 DebugLoc DL = N->getDebugLoc(); 12883 12884 // The SETCCs should both refer to the same CMP. 12885 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 12886 return SDValue(); 12887 12888 SDValue CMP00 = CMP0->getOperand(0); 12889 SDValue CMP01 = CMP0->getOperand(1); 12890 EVT VT = CMP00.getValueType(); 12891 12892 if (VT == MVT::f32 || VT == MVT::f64) { 12893 bool ExpectingFlags = false; 12894 // Check for any users that want flags: 12895 for (SDNode::use_iterator UI = N->use_begin(), 12896 UE = N->use_end(); 12897 !ExpectingFlags && UI != UE; ++UI) 12898 switch (UI->getOpcode()) { 12899 default: 12900 case ISD::BR_CC: 12901 case ISD::BRCOND: 12902 case ISD::SELECT: 12903 ExpectingFlags = true; 12904 break; 12905 case ISD::CopyToReg: 12906 case ISD::SIGN_EXTEND: 12907 case ISD::ZERO_EXTEND: 12908 case ISD::ANY_EXTEND: 12909 break; 12910 } 12911 12912 if (!ExpectingFlags) { 12913 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 12914 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 12915 12916 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 12917 X86::CondCode tmp = cc0; 12918 cc0 = cc1; 12919 cc1 = tmp; 12920 } 12921 12922 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 12923 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 12924 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 12925 X86ISD::NodeType NTOperator = is64BitFP ? 12926 X86ISD::FSETCCsd : X86ISD::FSETCCss; 12927 // FIXME: need symbolic constants for these magic numbers. 12928 // See X86ATTInstPrinter.cpp:printSSECC(). 12929 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 12930 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 12931 DAG.getConstant(x86cc, MVT::i8)); 12932 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 12933 OnesOrZeroesF); 12934 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 12935 DAG.getConstant(1, MVT::i32)); 12936 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 12937 return OneBitOfTruth; 12938 } 12939 } 12940 } 12941 } 12942 return SDValue(); 12943} 12944 12945/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 12946/// so it can be folded inside ANDNP. 12947static bool CanFoldXORWithAllOnes(const SDNode *N) { 12948 EVT VT = N->getValueType(0); 12949 12950 // Match direct AllOnes for 128 and 256-bit vectors 12951 if (ISD::isBuildVectorAllOnes(N)) 12952 return true; 12953 12954 // Look through a bit convert. 12955 if (N->getOpcode() == ISD::BITCAST) 12956 N = N->getOperand(0).getNode(); 12957 12958 // Sometimes the operand may come from a insert_subvector building a 256-bit 12959 // allones vector 12960 if (VT.getSizeInBits() == 256 && 12961 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 12962 SDValue V1 = N->getOperand(0); 12963 SDValue V2 = N->getOperand(1); 12964 12965 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 12966 V1.getOperand(0).getOpcode() == ISD::UNDEF && 12967 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 12968 ISD::isBuildVectorAllOnes(V2.getNode())) 12969 return true; 12970 } 12971 12972 return false; 12973} 12974 12975static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 12976 TargetLowering::DAGCombinerInfo &DCI, 12977 const X86Subtarget *Subtarget) { 12978 if (DCI.isBeforeLegalizeOps()) 12979 return SDValue(); 12980 12981 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 12982 if (R.getNode()) 12983 return R; 12984 12985 // Want to form ANDNP nodes: 12986 // 1) In the hopes of then easily combining them with OR and AND nodes 12987 // to form PBLEND/PSIGN. 12988 // 2) To match ANDN packed intrinsics 12989 EVT VT = N->getValueType(0); 12990 if (VT != MVT::v2i64 && VT != MVT::v4i64) 12991 return SDValue(); 12992 12993 SDValue N0 = N->getOperand(0); 12994 SDValue N1 = N->getOperand(1); 12995 DebugLoc DL = N->getDebugLoc(); 12996 12997 // Check LHS for vnot 12998 if (N0.getOpcode() == ISD::XOR && 12999 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 13000 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 13001 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 13002 13003 // Check RHS for vnot 13004 if (N1.getOpcode() == ISD::XOR && 13005 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 13006 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 13007 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 13008 13009 return SDValue(); 13010} 13011 13012static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 13013 TargetLowering::DAGCombinerInfo &DCI, 13014 const X86Subtarget *Subtarget) { 13015 if (DCI.isBeforeLegalizeOps()) 13016 return SDValue(); 13017 13018 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 13019 if (R.getNode()) 13020 return R; 13021 13022 EVT VT = N->getValueType(0); 13023 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64 && VT != MVT::v2i64) 13024 return SDValue(); 13025 13026 SDValue N0 = N->getOperand(0); 13027 SDValue N1 = N->getOperand(1); 13028 13029 // look for psign/blend 13030 if (Subtarget->hasSSSE3()) { 13031 if (VT == MVT::v2i64) { 13032 // Canonicalize pandn to RHS 13033 if (N0.getOpcode() == X86ISD::ANDNP) 13034 std::swap(N0, N1); 13035 // or (and (m, x), (pandn m, y)) 13036 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 13037 SDValue Mask = N1.getOperand(0); 13038 SDValue X = N1.getOperand(1); 13039 SDValue Y; 13040 if (N0.getOperand(0) == Mask) 13041 Y = N0.getOperand(1); 13042 if (N0.getOperand(1) == Mask) 13043 Y = N0.getOperand(0); 13044 13045 // Check to see if the mask appeared in both the AND and ANDNP and 13046 if (!Y.getNode()) 13047 return SDValue(); 13048 13049 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 13050 if (Mask.getOpcode() != ISD::BITCAST || 13051 X.getOpcode() != ISD::BITCAST || 13052 Y.getOpcode() != ISD::BITCAST) 13053 return SDValue(); 13054 13055 // Look through mask bitcast. 13056 Mask = Mask.getOperand(0); 13057 EVT MaskVT = Mask.getValueType(); 13058 13059 // Validate that the Mask operand is a vector sra node. The sra node 13060 // will be an intrinsic. 13061 if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN) 13062 return SDValue(); 13063 13064 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 13065 // there is no psrai.b 13066 switch (cast<ConstantSDNode>(Mask.getOperand(0))->getZExtValue()) { 13067 case Intrinsic::x86_sse2_psrai_w: 13068 case Intrinsic::x86_sse2_psrai_d: 13069 break; 13070 default: return SDValue(); 13071 } 13072 13073 // Check that the SRA is all signbits. 13074 SDValue SraC = Mask.getOperand(2); 13075 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 13076 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 13077 if ((SraAmt + 1) != EltBits) 13078 return SDValue(); 13079 13080 DebugLoc DL = N->getDebugLoc(); 13081 13082 // Now we know we at least have a plendvb with the mask val. See if 13083 // we can form a psignb/w/d. 13084 // psign = x.type == y.type == mask.type && y = sub(0, x); 13085 X = X.getOperand(0); 13086 Y = Y.getOperand(0); 13087 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 13088 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 13089 X.getValueType() == MaskVT && X.getValueType() == Y.getValueType()){ 13090 unsigned Opc = 0; 13091 switch (EltBits) { 13092 case 8: Opc = X86ISD::PSIGNB; break; 13093 case 16: Opc = X86ISD::PSIGNW; break; 13094 case 32: Opc = X86ISD::PSIGND; break; 13095 default: break; 13096 } 13097 if (Opc) { 13098 SDValue Sign = DAG.getNode(Opc, DL, MaskVT, X, Mask.getOperand(1)); 13099 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Sign); 13100 } 13101 } 13102 // PBLENDVB only available on SSE 4.1 13103 if (!Subtarget->hasSSE41()) 13104 return SDValue(); 13105 13106 X = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, X); 13107 Y = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Y); 13108 Mask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Mask); 13109 Mask = DAG.getNode(X86ISD::PBLENDVB, DL, MVT::v16i8, X, Y, Mask); 13110 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Mask); 13111 } 13112 } 13113 } 13114 13115 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 13116 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 13117 std::swap(N0, N1); 13118 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 13119 return SDValue(); 13120 if (!N0.hasOneUse() || !N1.hasOneUse()) 13121 return SDValue(); 13122 13123 SDValue ShAmt0 = N0.getOperand(1); 13124 if (ShAmt0.getValueType() != MVT::i8) 13125 return SDValue(); 13126 SDValue ShAmt1 = N1.getOperand(1); 13127 if (ShAmt1.getValueType() != MVT::i8) 13128 return SDValue(); 13129 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 13130 ShAmt0 = ShAmt0.getOperand(0); 13131 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 13132 ShAmt1 = ShAmt1.getOperand(0); 13133 13134 DebugLoc DL = N->getDebugLoc(); 13135 unsigned Opc = X86ISD::SHLD; 13136 SDValue Op0 = N0.getOperand(0); 13137 SDValue Op1 = N1.getOperand(0); 13138 if (ShAmt0.getOpcode() == ISD::SUB) { 13139 Opc = X86ISD::SHRD; 13140 std::swap(Op0, Op1); 13141 std::swap(ShAmt0, ShAmt1); 13142 } 13143 13144 unsigned Bits = VT.getSizeInBits(); 13145 if (ShAmt1.getOpcode() == ISD::SUB) { 13146 SDValue Sum = ShAmt1.getOperand(0); 13147 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 13148 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 13149 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 13150 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 13151 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 13152 return DAG.getNode(Opc, DL, VT, 13153 Op0, Op1, 13154 DAG.getNode(ISD::TRUNCATE, DL, 13155 MVT::i8, ShAmt0)); 13156 } 13157 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 13158 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 13159 if (ShAmt0C && 13160 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 13161 return DAG.getNode(Opc, DL, VT, 13162 N0.getOperand(0), N1.getOperand(0), 13163 DAG.getNode(ISD::TRUNCATE, DL, 13164 MVT::i8, ShAmt0)); 13165 } 13166 13167 return SDValue(); 13168} 13169 13170/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 13171static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 13172 const X86Subtarget *Subtarget) { 13173 StoreSDNode *St = cast<StoreSDNode>(N); 13174 EVT VT = St->getValue().getValueType(); 13175 EVT StVT = St->getMemoryVT(); 13176 DebugLoc dl = St->getDebugLoc(); 13177 SDValue StoredVal = St->getOperand(1); 13178 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13179 13180 // If we are saving a concatination of two XMM registers, perform two stores. 13181 // This is better in Sandy Bridge cause one 256-bit mem op is done via two 13182 // 128-bit ones. If in the future the cost becomes only one memory access the 13183 // first version would be better. 13184 if (VT.getSizeInBits() == 256 && 13185 StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && 13186 StoredVal.getNumOperands() == 2) { 13187 13188 SDValue Value0 = StoredVal.getOperand(0); 13189 SDValue Value1 = StoredVal.getOperand(1); 13190 13191 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 13192 SDValue Ptr0 = St->getBasePtr(); 13193 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 13194 13195 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 13196 St->getPointerInfo(), St->isVolatile(), 13197 St->isNonTemporal(), St->getAlignment()); 13198 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 13199 St->getPointerInfo(), St->isVolatile(), 13200 St->isNonTemporal(), St->getAlignment()); 13201 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 13202 } 13203 13204 // Optimize trunc store (of multiple scalars) to shuffle and store. 13205 // First, pack all of the elements in one place. Next, store to memory 13206 // in fewer chunks. 13207 if (St->isTruncatingStore() && VT.isVector()) { 13208 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13209 unsigned NumElems = VT.getVectorNumElements(); 13210 assert(StVT != VT && "Cannot truncate to the same type"); 13211 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 13212 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 13213 13214 // From, To sizes and ElemCount must be pow of two 13215 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 13216 // We are going to use the original vector elt for storing. 13217 // accumulated smaller vector elements must be a multiple of bigger size. 13218 if (0 != (NumElems * ToSz) % FromSz) return SDValue(); 13219 unsigned SizeRatio = FromSz / ToSz; 13220 13221 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 13222 13223 // Create a type on which we perform the shuffle 13224 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 13225 StVT.getScalarType(), NumElems*SizeRatio); 13226 13227 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 13228 13229 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 13230 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 13231 for (unsigned i = 0; i < NumElems; i++ ) ShuffleVec[i] = i * SizeRatio; 13232 13233 // Can't shuffle using an illegal type 13234 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 13235 13236 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 13237 DAG.getUNDEF(WideVec.getValueType()), 13238 ShuffleVec.data()); 13239 // At this point all of the data is stored at the bottom of the 13240 // register. We now need to save it to mem. 13241 13242 // Find the largest store unit 13243 MVT StoreType = MVT::i8; 13244 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 13245 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 13246 MVT Tp = (MVT::SimpleValueType)tp; 13247 if (TLI.isTypeLegal(Tp) && StoreType.getSizeInBits() < NumElems * ToSz) 13248 StoreType = Tp; 13249 } 13250 13251 // Bitcast the original vector into a vector of store-size units 13252 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 13253 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 13254 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 13255 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 13256 SmallVector<SDValue, 8> Chains; 13257 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 13258 TLI.getPointerTy()); 13259 SDValue Ptr = St->getBasePtr(); 13260 13261 // Perform one or more big stores into memory. 13262 for (unsigned i = 0; i < (ToSz*NumElems)/StoreType.getSizeInBits() ; i++) { 13263 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 13264 StoreType, ShuffWide, 13265 DAG.getIntPtrConstant(i)); 13266 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 13267 St->getPointerInfo(), St->isVolatile(), 13268 St->isNonTemporal(), St->getAlignment()); 13269 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 13270 Chains.push_back(Ch); 13271 } 13272 13273 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 13274 Chains.size()); 13275 } 13276 13277 13278 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 13279 // the FP state in cases where an emms may be missing. 13280 // A preferable solution to the general problem is to figure out the right 13281 // places to insert EMMS. This qualifies as a quick hack. 13282 13283 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 13284 if (VT.getSizeInBits() != 64) 13285 return SDValue(); 13286 13287 const Function *F = DAG.getMachineFunction().getFunction(); 13288 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); 13289 bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps 13290 && Subtarget->hasSSE2(); 13291 if ((VT.isVector() || 13292 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 13293 isa<LoadSDNode>(St->getValue()) && 13294 !cast<LoadSDNode>(St->getValue())->isVolatile() && 13295 St->getChain().hasOneUse() && !St->isVolatile()) { 13296 SDNode* LdVal = St->getValue().getNode(); 13297 LoadSDNode *Ld = 0; 13298 int TokenFactorIndex = -1; 13299 SmallVector<SDValue, 8> Ops; 13300 SDNode* ChainVal = St->getChain().getNode(); 13301 // Must be a store of a load. We currently handle two cases: the load 13302 // is a direct child, and it's under an intervening TokenFactor. It is 13303 // possible to dig deeper under nested TokenFactors. 13304 if (ChainVal == LdVal) 13305 Ld = cast<LoadSDNode>(St->getChain()); 13306 else if (St->getValue().hasOneUse() && 13307 ChainVal->getOpcode() == ISD::TokenFactor) { 13308 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 13309 if (ChainVal->getOperand(i).getNode() == LdVal) { 13310 TokenFactorIndex = i; 13311 Ld = cast<LoadSDNode>(St->getValue()); 13312 } else 13313 Ops.push_back(ChainVal->getOperand(i)); 13314 } 13315 } 13316 13317 if (!Ld || !ISD::isNormalLoad(Ld)) 13318 return SDValue(); 13319 13320 // If this is not the MMX case, i.e. we are just turning i64 load/store 13321 // into f64 load/store, avoid the transformation if there are multiple 13322 // uses of the loaded value. 13323 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 13324 return SDValue(); 13325 13326 DebugLoc LdDL = Ld->getDebugLoc(); 13327 DebugLoc StDL = N->getDebugLoc(); 13328 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 13329 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 13330 // pair instead. 13331 if (Subtarget->is64Bit() || F64IsLegal) { 13332 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 13333 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 13334 Ld->getPointerInfo(), Ld->isVolatile(), 13335 Ld->isNonTemporal(), Ld->getAlignment()); 13336 SDValue NewChain = NewLd.getValue(1); 13337 if (TokenFactorIndex != -1) { 13338 Ops.push_back(NewChain); 13339 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 13340 Ops.size()); 13341 } 13342 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 13343 St->getPointerInfo(), 13344 St->isVolatile(), St->isNonTemporal(), 13345 St->getAlignment()); 13346 } 13347 13348 // Otherwise, lower to two pairs of 32-bit loads / stores. 13349 SDValue LoAddr = Ld->getBasePtr(); 13350 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 13351 DAG.getConstant(4, MVT::i32)); 13352 13353 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 13354 Ld->getPointerInfo(), 13355 Ld->isVolatile(), Ld->isNonTemporal(), 13356 Ld->getAlignment()); 13357 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 13358 Ld->getPointerInfo().getWithOffset(4), 13359 Ld->isVolatile(), Ld->isNonTemporal(), 13360 MinAlign(Ld->getAlignment(), 4)); 13361 13362 SDValue NewChain = LoLd.getValue(1); 13363 if (TokenFactorIndex != -1) { 13364 Ops.push_back(LoLd); 13365 Ops.push_back(HiLd); 13366 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 13367 Ops.size()); 13368 } 13369 13370 LoAddr = St->getBasePtr(); 13371 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 13372 DAG.getConstant(4, MVT::i32)); 13373 13374 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 13375 St->getPointerInfo(), 13376 St->isVolatile(), St->isNonTemporal(), 13377 St->getAlignment()); 13378 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 13379 St->getPointerInfo().getWithOffset(4), 13380 St->isVolatile(), 13381 St->isNonTemporal(), 13382 MinAlign(St->getAlignment(), 4)); 13383 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 13384 } 13385 return SDValue(); 13386} 13387 13388/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 13389/// X86ISD::FXOR nodes. 13390static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 13391 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 13392 // F[X]OR(0.0, x) -> x 13393 // F[X]OR(x, 0.0) -> x 13394 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 13395 if (C->getValueAPF().isPosZero()) 13396 return N->getOperand(1); 13397 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 13398 if (C->getValueAPF().isPosZero()) 13399 return N->getOperand(0); 13400 return SDValue(); 13401} 13402 13403/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 13404static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 13405 // FAND(0.0, x) -> 0.0 13406 // FAND(x, 0.0) -> 0.0 13407 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 13408 if (C->getValueAPF().isPosZero()) 13409 return N->getOperand(0); 13410 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 13411 if (C->getValueAPF().isPosZero()) 13412 return N->getOperand(1); 13413 return SDValue(); 13414} 13415 13416static SDValue PerformBTCombine(SDNode *N, 13417 SelectionDAG &DAG, 13418 TargetLowering::DAGCombinerInfo &DCI) { 13419 // BT ignores high bits in the bit index operand. 13420 SDValue Op1 = N->getOperand(1); 13421 if (Op1.hasOneUse()) { 13422 unsigned BitWidth = Op1.getValueSizeInBits(); 13423 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 13424 APInt KnownZero, KnownOne; 13425 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 13426 !DCI.isBeforeLegalizeOps()); 13427 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13428 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 13429 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 13430 DCI.CommitTargetLoweringOpt(TLO); 13431 } 13432 return SDValue(); 13433} 13434 13435static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 13436 SDValue Op = N->getOperand(0); 13437 if (Op.getOpcode() == ISD::BITCAST) 13438 Op = Op.getOperand(0); 13439 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 13440 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 13441 VT.getVectorElementType().getSizeInBits() == 13442 OpVT.getVectorElementType().getSizeInBits()) { 13443 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 13444 } 13445 return SDValue(); 13446} 13447 13448static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) { 13449 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 13450 // (and (i32 x86isd::setcc_carry), 1) 13451 // This eliminates the zext. This transformation is necessary because 13452 // ISD::SETCC is always legalized to i8. 13453 DebugLoc dl = N->getDebugLoc(); 13454 SDValue N0 = N->getOperand(0); 13455 EVT VT = N->getValueType(0); 13456 if (N0.getOpcode() == ISD::AND && 13457 N0.hasOneUse() && 13458 N0.getOperand(0).hasOneUse()) { 13459 SDValue N00 = N0.getOperand(0); 13460 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 13461 return SDValue(); 13462 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 13463 if (!C || C->getZExtValue() != 1) 13464 return SDValue(); 13465 return DAG.getNode(ISD::AND, dl, VT, 13466 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 13467 N00.getOperand(0), N00.getOperand(1)), 13468 DAG.getConstant(1, VT)); 13469 } 13470 13471 return SDValue(); 13472} 13473 13474// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 13475static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { 13476 unsigned X86CC = N->getConstantOperandVal(0); 13477 SDValue EFLAG = N->getOperand(1); 13478 DebugLoc DL = N->getDebugLoc(); 13479 13480 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 13481 // a zext and produces an all-ones bit which is more useful than 0/1 in some 13482 // cases. 13483 if (X86CC == X86::COND_B) 13484 return DAG.getNode(ISD::AND, DL, MVT::i8, 13485 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 13486 DAG.getConstant(X86CC, MVT::i8), EFLAG), 13487 DAG.getConstant(1, MVT::i8)); 13488 13489 return SDValue(); 13490} 13491 13492static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 13493 const X86TargetLowering *XTLI) { 13494 SDValue Op0 = N->getOperand(0); 13495 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 13496 // a 32-bit target where SSE doesn't support i64->FP operations. 13497 if (Op0.getOpcode() == ISD::LOAD) { 13498 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 13499 EVT VT = Ld->getValueType(0); 13500 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 13501 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 13502 !XTLI->getSubtarget()->is64Bit() && 13503 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 13504 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 13505 Ld->getChain(), Op0, DAG); 13506 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 13507 return FILDChain; 13508 } 13509 } 13510 return SDValue(); 13511} 13512 13513// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 13514static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 13515 X86TargetLowering::DAGCombinerInfo &DCI) { 13516 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 13517 // the result is either zero or one (depending on the input carry bit). 13518 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 13519 if (X86::isZeroNode(N->getOperand(0)) && 13520 X86::isZeroNode(N->getOperand(1)) && 13521 // We don't have a good way to replace an EFLAGS use, so only do this when 13522 // dead right now. 13523 SDValue(N, 1).use_empty()) { 13524 DebugLoc DL = N->getDebugLoc(); 13525 EVT VT = N->getValueType(0); 13526 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 13527 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 13528 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 13529 DAG.getConstant(X86::COND_B,MVT::i8), 13530 N->getOperand(2)), 13531 DAG.getConstant(1, VT)); 13532 return DCI.CombineTo(N, Res1, CarryOut); 13533 } 13534 13535 return SDValue(); 13536} 13537 13538// fold (add Y, (sete X, 0)) -> adc 0, Y 13539// (add Y, (setne X, 0)) -> sbb -1, Y 13540// (sub (sete X, 0), Y) -> sbb 0, Y 13541// (sub (setne X, 0), Y) -> adc -1, Y 13542static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 13543 DebugLoc DL = N->getDebugLoc(); 13544 13545 // Look through ZExts. 13546 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 13547 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 13548 return SDValue(); 13549 13550 SDValue SetCC = Ext.getOperand(0); 13551 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 13552 return SDValue(); 13553 13554 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 13555 if (CC != X86::COND_E && CC != X86::COND_NE) 13556 return SDValue(); 13557 13558 SDValue Cmp = SetCC.getOperand(1); 13559 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 13560 !X86::isZeroNode(Cmp.getOperand(1)) || 13561 !Cmp.getOperand(0).getValueType().isInteger()) 13562 return SDValue(); 13563 13564 SDValue CmpOp0 = Cmp.getOperand(0); 13565 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 13566 DAG.getConstant(1, CmpOp0.getValueType())); 13567 13568 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 13569 if (CC == X86::COND_NE) 13570 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 13571 DL, OtherVal.getValueType(), OtherVal, 13572 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 13573 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 13574 DL, OtherVal.getValueType(), OtherVal, 13575 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 13576} 13577 13578static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG) { 13579 SDValue Op0 = N->getOperand(0); 13580 SDValue Op1 = N->getOperand(1); 13581 13582 // X86 can't encode an immediate LHS of a sub. See if we can push the 13583 // negation into a preceding instruction. 13584 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 13585 // If the RHS of the sub is a XOR with one use and a constant, invert the 13586 // immediate. Then add one to the LHS of the sub so we can turn 13587 // X-Y -> X+~Y+1, saving one register. 13588 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 13589 isa<ConstantSDNode>(Op1.getOperand(1))) { 13590 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 13591 EVT VT = Op0.getValueType(); 13592 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 13593 Op1.getOperand(0), 13594 DAG.getConstant(~XorC, VT)); 13595 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 13596 DAG.getConstant(C->getAPIntValue()+1, VT)); 13597 } 13598 } 13599 13600 return OptimizeConditionalInDecrement(N, DAG); 13601} 13602 13603SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 13604 DAGCombinerInfo &DCI) const { 13605 SelectionDAG &DAG = DCI.DAG; 13606 switch (N->getOpcode()) { 13607 default: break; 13608 case ISD::EXTRACT_VECTOR_ELT: 13609 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); 13610 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 13611 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); 13612 case ISD::ADD: return OptimizeConditionalInDecrement(N, DAG); 13613 case ISD::SUB: return PerformSubCombine(N, DAG); 13614 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 13615 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 13616 case ISD::SHL: 13617 case ISD::SRA: 13618 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget); 13619 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 13620 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 13621 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 13622 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 13623 case X86ISD::FXOR: 13624 case X86ISD::FOR: return PerformFORCombine(N, DAG); 13625 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 13626 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 13627 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 13628 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); 13629 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); 13630 case X86ISD::SHUFPS: // Handle all target specific shuffles 13631 case X86ISD::SHUFPD: 13632 case X86ISD::PALIGN: 13633 case X86ISD::PUNPCKHBW: 13634 case X86ISD::PUNPCKHWD: 13635 case X86ISD::PUNPCKHDQ: 13636 case X86ISD::PUNPCKHQDQ: 13637 case X86ISD::UNPCKHPS: 13638 case X86ISD::UNPCKHPD: 13639 case X86ISD::VUNPCKHPSY: 13640 case X86ISD::VUNPCKHPDY: 13641 case X86ISD::PUNPCKLBW: 13642 case X86ISD::PUNPCKLWD: 13643 case X86ISD::PUNPCKLDQ: 13644 case X86ISD::PUNPCKLQDQ: 13645 case X86ISD::UNPCKLPS: 13646 case X86ISD::UNPCKLPD: 13647 case X86ISD::VUNPCKLPSY: 13648 case X86ISD::VUNPCKLPDY: 13649 case X86ISD::MOVHLPS: 13650 case X86ISD::MOVLHPS: 13651 case X86ISD::PSHUFD: 13652 case X86ISD::PSHUFHW: 13653 case X86ISD::PSHUFLW: 13654 case X86ISD::MOVSS: 13655 case X86ISD::MOVSD: 13656 case X86ISD::VPERMILPS: 13657 case X86ISD::VPERMILPSY: 13658 case X86ISD::VPERMILPD: 13659 case X86ISD::VPERMILPDY: 13660 case X86ISD::VPERM2F128: 13661 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 13662 } 13663 13664 return SDValue(); 13665} 13666 13667/// isTypeDesirableForOp - Return true if the target has native support for 13668/// the specified value type and it is 'desirable' to use the type for the 13669/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 13670/// instruction encodings are longer and some i16 instructions are slow. 13671bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 13672 if (!isTypeLegal(VT)) 13673 return false; 13674 if (VT != MVT::i16) 13675 return true; 13676 13677 switch (Opc) { 13678 default: 13679 return true; 13680 case ISD::LOAD: 13681 case ISD::SIGN_EXTEND: 13682 case ISD::ZERO_EXTEND: 13683 case ISD::ANY_EXTEND: 13684 case ISD::SHL: 13685 case ISD::SRL: 13686 case ISD::SUB: 13687 case ISD::ADD: 13688 case ISD::MUL: 13689 case ISD::AND: 13690 case ISD::OR: 13691 case ISD::XOR: 13692 return false; 13693 } 13694} 13695 13696/// IsDesirableToPromoteOp - This method query the target whether it is 13697/// beneficial for dag combiner to promote the specified node. If true, it 13698/// should return the desired promotion type by reference. 13699bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 13700 EVT VT = Op.getValueType(); 13701 if (VT != MVT::i16) 13702 return false; 13703 13704 bool Promote = false; 13705 bool Commute = false; 13706 switch (Op.getOpcode()) { 13707 default: break; 13708 case ISD::LOAD: { 13709 LoadSDNode *LD = cast<LoadSDNode>(Op); 13710 // If the non-extending load has a single use and it's not live out, then it 13711 // might be folded. 13712 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 13713 Op.hasOneUse()*/) { 13714 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 13715 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 13716 // The only case where we'd want to promote LOAD (rather then it being 13717 // promoted as an operand is when it's only use is liveout. 13718 if (UI->getOpcode() != ISD::CopyToReg) 13719 return false; 13720 } 13721 } 13722 Promote = true; 13723 break; 13724 } 13725 case ISD::SIGN_EXTEND: 13726 case ISD::ZERO_EXTEND: 13727 case ISD::ANY_EXTEND: 13728 Promote = true; 13729 break; 13730 case ISD::SHL: 13731 case ISD::SRL: { 13732 SDValue N0 = Op.getOperand(0); 13733 // Look out for (store (shl (load), x)). 13734 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 13735 return false; 13736 Promote = true; 13737 break; 13738 } 13739 case ISD::ADD: 13740 case ISD::MUL: 13741 case ISD::AND: 13742 case ISD::OR: 13743 case ISD::XOR: 13744 Commute = true; 13745 // fallthrough 13746 case ISD::SUB: { 13747 SDValue N0 = Op.getOperand(0); 13748 SDValue N1 = Op.getOperand(1); 13749 if (!Commute && MayFoldLoad(N1)) 13750 return false; 13751 // Avoid disabling potential load folding opportunities. 13752 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 13753 return false; 13754 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 13755 return false; 13756 Promote = true; 13757 } 13758 } 13759 13760 PVT = MVT::i32; 13761 return Promote; 13762} 13763 13764//===----------------------------------------------------------------------===// 13765// X86 Inline Assembly Support 13766//===----------------------------------------------------------------------===// 13767 13768bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 13769 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 13770 13771 std::string AsmStr = IA->getAsmString(); 13772 13773 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 13774 SmallVector<StringRef, 4> AsmPieces; 13775 SplitString(AsmStr, AsmPieces, ";\n"); 13776 13777 switch (AsmPieces.size()) { 13778 default: return false; 13779 case 1: 13780 AsmStr = AsmPieces[0]; 13781 AsmPieces.clear(); 13782 SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace. 13783 13784 // FIXME: this should verify that we are targeting a 486 or better. If not, 13785 // we will turn this bswap into something that will be lowered to logical ops 13786 // instead of emitting the bswap asm. For now, we don't support 486 or lower 13787 // so don't worry about this. 13788 // bswap $0 13789 if (AsmPieces.size() == 2 && 13790 (AsmPieces[0] == "bswap" || 13791 AsmPieces[0] == "bswapq" || 13792 AsmPieces[0] == "bswapl") && 13793 (AsmPieces[1] == "$0" || 13794 AsmPieces[1] == "${0:q}")) { 13795 // No need to check constraints, nothing other than the equivalent of 13796 // "=r,0" would be valid here. 13797 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 13798 if (!Ty || Ty->getBitWidth() % 16 != 0) 13799 return false; 13800 return IntrinsicLowering::LowerToByteSwap(CI); 13801 } 13802 // rorw $$8, ${0:w} --> llvm.bswap.i16 13803 if (CI->getType()->isIntegerTy(16) && 13804 AsmPieces.size() == 3 && 13805 (AsmPieces[0] == "rorw" || AsmPieces[0] == "rolw") && 13806 AsmPieces[1] == "$$8," && 13807 AsmPieces[2] == "${0:w}" && 13808 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) { 13809 AsmPieces.clear(); 13810 const std::string &ConstraintsStr = IA->getConstraintString(); 13811 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 13812 std::sort(AsmPieces.begin(), AsmPieces.end()); 13813 if (AsmPieces.size() == 4 && 13814 AsmPieces[0] == "~{cc}" && 13815 AsmPieces[1] == "~{dirflag}" && 13816 AsmPieces[2] == "~{flags}" && 13817 AsmPieces[3] == "~{fpsr}") { 13818 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 13819 if (!Ty || Ty->getBitWidth() % 16 != 0) 13820 return false; 13821 return IntrinsicLowering::LowerToByteSwap(CI); 13822 } 13823 } 13824 break; 13825 case 3: 13826 if (CI->getType()->isIntegerTy(32) && 13827 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) { 13828 SmallVector<StringRef, 4> Words; 13829 SplitString(AsmPieces[0], Words, " \t,"); 13830 if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" && 13831 Words[2] == "${0:w}") { 13832 Words.clear(); 13833 SplitString(AsmPieces[1], Words, " \t,"); 13834 if (Words.size() == 3 && Words[0] == "rorl" && Words[1] == "$$16" && 13835 Words[2] == "$0") { 13836 Words.clear(); 13837 SplitString(AsmPieces[2], Words, " \t,"); 13838 if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" && 13839 Words[2] == "${0:w}") { 13840 AsmPieces.clear(); 13841 const std::string &ConstraintsStr = IA->getConstraintString(); 13842 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 13843 std::sort(AsmPieces.begin(), AsmPieces.end()); 13844 if (AsmPieces.size() == 4 && 13845 AsmPieces[0] == "~{cc}" && 13846 AsmPieces[1] == "~{dirflag}" && 13847 AsmPieces[2] == "~{flags}" && 13848 AsmPieces[3] == "~{fpsr}") { 13849 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 13850 if (!Ty || Ty->getBitWidth() % 16 != 0) 13851 return false; 13852 return IntrinsicLowering::LowerToByteSwap(CI); 13853 } 13854 } 13855 } 13856 } 13857 } 13858 13859 if (CI->getType()->isIntegerTy(64)) { 13860 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 13861 if (Constraints.size() >= 2 && 13862 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 13863 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 13864 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 13865 SmallVector<StringRef, 4> Words; 13866 SplitString(AsmPieces[0], Words, " \t"); 13867 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") { 13868 Words.clear(); 13869 SplitString(AsmPieces[1], Words, " \t"); 13870 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") { 13871 Words.clear(); 13872 SplitString(AsmPieces[2], Words, " \t,"); 13873 if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" && 13874 Words[2] == "%edx") { 13875 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 13876 if (!Ty || Ty->getBitWidth() % 16 != 0) 13877 return false; 13878 return IntrinsicLowering::LowerToByteSwap(CI); 13879 } 13880 } 13881 } 13882 } 13883 } 13884 break; 13885 } 13886 return false; 13887} 13888 13889 13890 13891/// getConstraintType - Given a constraint letter, return the type of 13892/// constraint it is for this target. 13893X86TargetLowering::ConstraintType 13894X86TargetLowering::getConstraintType(const std::string &Constraint) const { 13895 if (Constraint.size() == 1) { 13896 switch (Constraint[0]) { 13897 case 'R': 13898 case 'q': 13899 case 'Q': 13900 case 'f': 13901 case 't': 13902 case 'u': 13903 case 'y': 13904 case 'x': 13905 case 'Y': 13906 case 'l': 13907 return C_RegisterClass; 13908 case 'a': 13909 case 'b': 13910 case 'c': 13911 case 'd': 13912 case 'S': 13913 case 'D': 13914 case 'A': 13915 return C_Register; 13916 case 'I': 13917 case 'J': 13918 case 'K': 13919 case 'L': 13920 case 'M': 13921 case 'N': 13922 case 'G': 13923 case 'C': 13924 case 'e': 13925 case 'Z': 13926 return C_Other; 13927 default: 13928 break; 13929 } 13930 } 13931 return TargetLowering::getConstraintType(Constraint); 13932} 13933 13934/// Examine constraint type and operand type and determine a weight value. 13935/// This object must already have been set up with the operand type 13936/// and the current alternative constraint selected. 13937TargetLowering::ConstraintWeight 13938 X86TargetLowering::getSingleConstraintMatchWeight( 13939 AsmOperandInfo &info, const char *constraint) const { 13940 ConstraintWeight weight = CW_Invalid; 13941 Value *CallOperandVal = info.CallOperandVal; 13942 // If we don't have a value, we can't do a match, 13943 // but allow it at the lowest weight. 13944 if (CallOperandVal == NULL) 13945 return CW_Default; 13946 Type *type = CallOperandVal->getType(); 13947 // Look at the constraint type. 13948 switch (*constraint) { 13949 default: 13950 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 13951 case 'R': 13952 case 'q': 13953 case 'Q': 13954 case 'a': 13955 case 'b': 13956 case 'c': 13957 case 'd': 13958 case 'S': 13959 case 'D': 13960 case 'A': 13961 if (CallOperandVal->getType()->isIntegerTy()) 13962 weight = CW_SpecificReg; 13963 break; 13964 case 'f': 13965 case 't': 13966 case 'u': 13967 if (type->isFloatingPointTy()) 13968 weight = CW_SpecificReg; 13969 break; 13970 case 'y': 13971 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 13972 weight = CW_SpecificReg; 13973 break; 13974 case 'x': 13975 case 'Y': 13976 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasXMM()) 13977 weight = CW_Register; 13978 break; 13979 case 'I': 13980 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 13981 if (C->getZExtValue() <= 31) 13982 weight = CW_Constant; 13983 } 13984 break; 13985 case 'J': 13986 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 13987 if (C->getZExtValue() <= 63) 13988 weight = CW_Constant; 13989 } 13990 break; 13991 case 'K': 13992 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 13993 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 13994 weight = CW_Constant; 13995 } 13996 break; 13997 case 'L': 13998 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 13999 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 14000 weight = CW_Constant; 14001 } 14002 break; 14003 case 'M': 14004 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14005 if (C->getZExtValue() <= 3) 14006 weight = CW_Constant; 14007 } 14008 break; 14009 case 'N': 14010 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14011 if (C->getZExtValue() <= 0xff) 14012 weight = CW_Constant; 14013 } 14014 break; 14015 case 'G': 14016 case 'C': 14017 if (dyn_cast<ConstantFP>(CallOperandVal)) { 14018 weight = CW_Constant; 14019 } 14020 break; 14021 case 'e': 14022 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14023 if ((C->getSExtValue() >= -0x80000000LL) && 14024 (C->getSExtValue() <= 0x7fffffffLL)) 14025 weight = CW_Constant; 14026 } 14027 break; 14028 case 'Z': 14029 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14030 if (C->getZExtValue() <= 0xffffffff) 14031 weight = CW_Constant; 14032 } 14033 break; 14034 } 14035 return weight; 14036} 14037 14038/// LowerXConstraint - try to replace an X constraint, which matches anything, 14039/// with another that has more specific requirements based on the type of the 14040/// corresponding operand. 14041const char *X86TargetLowering:: 14042LowerXConstraint(EVT ConstraintVT) const { 14043 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 14044 // 'f' like normal targets. 14045 if (ConstraintVT.isFloatingPoint()) { 14046 if (Subtarget->hasXMMInt()) 14047 return "Y"; 14048 if (Subtarget->hasXMM()) 14049 return "x"; 14050 } 14051 14052 return TargetLowering::LowerXConstraint(ConstraintVT); 14053} 14054 14055/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 14056/// vector. If it is invalid, don't add anything to Ops. 14057void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 14058 std::string &Constraint, 14059 std::vector<SDValue>&Ops, 14060 SelectionDAG &DAG) const { 14061 SDValue Result(0, 0); 14062 14063 // Only support length 1 constraints for now. 14064 if (Constraint.length() > 1) return; 14065 14066 char ConstraintLetter = Constraint[0]; 14067 switch (ConstraintLetter) { 14068 default: break; 14069 case 'I': 14070 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 14071 if (C->getZExtValue() <= 31) { 14072 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 14073 break; 14074 } 14075 } 14076 return; 14077 case 'J': 14078 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 14079 if (C->getZExtValue() <= 63) { 14080 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 14081 break; 14082 } 14083 } 14084 return; 14085 case 'K': 14086 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 14087 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 14088 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 14089 break; 14090 } 14091 } 14092 return; 14093 case 'N': 14094 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 14095 if (C->getZExtValue() <= 255) { 14096 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 14097 break; 14098 } 14099 } 14100 return; 14101 case 'e': { 14102 // 32-bit signed value 14103 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 14104 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 14105 C->getSExtValue())) { 14106 // Widen to 64 bits here to get it sign extended. 14107 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 14108 break; 14109 } 14110 // FIXME gcc accepts some relocatable values here too, but only in certain 14111 // memory models; it's complicated. 14112 } 14113 return; 14114 } 14115 case 'Z': { 14116 // 32-bit unsigned value 14117 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 14118 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 14119 C->getZExtValue())) { 14120 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 14121 break; 14122 } 14123 } 14124 // FIXME gcc accepts some relocatable values here too, but only in certain 14125 // memory models; it's complicated. 14126 return; 14127 } 14128 case 'i': { 14129 // Literal immediates are always ok. 14130 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 14131 // Widen to 64 bits here to get it sign extended. 14132 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 14133 break; 14134 } 14135 14136 // In any sort of PIC mode addresses need to be computed at runtime by 14137 // adding in a register or some sort of table lookup. These can't 14138 // be used as immediates. 14139 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 14140 return; 14141 14142 // If we are in non-pic codegen mode, we allow the address of a global (with 14143 // an optional displacement) to be used with 'i'. 14144 GlobalAddressSDNode *GA = 0; 14145 int64_t Offset = 0; 14146 14147 // Match either (GA), (GA+C), (GA+C1+C2), etc. 14148 while (1) { 14149 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 14150 Offset += GA->getOffset(); 14151 break; 14152 } else if (Op.getOpcode() == ISD::ADD) { 14153 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 14154 Offset += C->getZExtValue(); 14155 Op = Op.getOperand(0); 14156 continue; 14157 } 14158 } else if (Op.getOpcode() == ISD::SUB) { 14159 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 14160 Offset += -C->getZExtValue(); 14161 Op = Op.getOperand(0); 14162 continue; 14163 } 14164 } 14165 14166 // Otherwise, this isn't something we can handle, reject it. 14167 return; 14168 } 14169 14170 const GlobalValue *GV = GA->getGlobal(); 14171 // If we require an extra load to get this address, as in PIC mode, we 14172 // can't accept it. 14173 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 14174 getTargetMachine()))) 14175 return; 14176 14177 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 14178 GA->getValueType(0), Offset); 14179 break; 14180 } 14181 } 14182 14183 if (Result.getNode()) { 14184 Ops.push_back(Result); 14185 return; 14186 } 14187 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 14188} 14189 14190std::pair<unsigned, const TargetRegisterClass*> 14191X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 14192 EVT VT) const { 14193 // First, see if this is a constraint that directly corresponds to an LLVM 14194 // register class. 14195 if (Constraint.size() == 1) { 14196 // GCC Constraint Letters 14197 switch (Constraint[0]) { 14198 default: break; 14199 // TODO: Slight differences here in allocation order and leaving 14200 // RIP in the class. Do they matter any more here than they do 14201 // in the normal allocation? 14202 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 14203 if (Subtarget->is64Bit()) { 14204 if (VT == MVT::i32 || VT == MVT::f32) 14205 return std::make_pair(0U, X86::GR32RegisterClass); 14206 else if (VT == MVT::i16) 14207 return std::make_pair(0U, X86::GR16RegisterClass); 14208 else if (VT == MVT::i8 || VT == MVT::i1) 14209 return std::make_pair(0U, X86::GR8RegisterClass); 14210 else if (VT == MVT::i64 || VT == MVT::f64) 14211 return std::make_pair(0U, X86::GR64RegisterClass); 14212 break; 14213 } 14214 // 32-bit fallthrough 14215 case 'Q': // Q_REGS 14216 if (VT == MVT::i32 || VT == MVT::f32) 14217 return std::make_pair(0U, X86::GR32_ABCDRegisterClass); 14218 else if (VT == MVT::i16) 14219 return std::make_pair(0U, X86::GR16_ABCDRegisterClass); 14220 else if (VT == MVT::i8 || VT == MVT::i1) 14221 return std::make_pair(0U, X86::GR8_ABCD_LRegisterClass); 14222 else if (VT == MVT::i64) 14223 return std::make_pair(0U, X86::GR64_ABCDRegisterClass); 14224 break; 14225 case 'r': // GENERAL_REGS 14226 case 'l': // INDEX_REGS 14227 if (VT == MVT::i8 || VT == MVT::i1) 14228 return std::make_pair(0U, X86::GR8RegisterClass); 14229 if (VT == MVT::i16) 14230 return std::make_pair(0U, X86::GR16RegisterClass); 14231 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 14232 return std::make_pair(0U, X86::GR32RegisterClass); 14233 return std::make_pair(0U, X86::GR64RegisterClass); 14234 case 'R': // LEGACY_REGS 14235 if (VT == MVT::i8 || VT == MVT::i1) 14236 return std::make_pair(0U, X86::GR8_NOREXRegisterClass); 14237 if (VT == MVT::i16) 14238 return std::make_pair(0U, X86::GR16_NOREXRegisterClass); 14239 if (VT == MVT::i32 || !Subtarget->is64Bit()) 14240 return std::make_pair(0U, X86::GR32_NOREXRegisterClass); 14241 return std::make_pair(0U, X86::GR64_NOREXRegisterClass); 14242 case 'f': // FP Stack registers. 14243 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 14244 // value to the correct fpstack register class. 14245 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 14246 return std::make_pair(0U, X86::RFP32RegisterClass); 14247 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 14248 return std::make_pair(0U, X86::RFP64RegisterClass); 14249 return std::make_pair(0U, X86::RFP80RegisterClass); 14250 case 'y': // MMX_REGS if MMX allowed. 14251 if (!Subtarget->hasMMX()) break; 14252 return std::make_pair(0U, X86::VR64RegisterClass); 14253 case 'Y': // SSE_REGS if SSE2 allowed 14254 if (!Subtarget->hasXMMInt()) break; 14255 // FALL THROUGH. 14256 case 'x': // SSE_REGS if SSE1 allowed 14257 if (!Subtarget->hasXMM()) break; 14258 14259 switch (VT.getSimpleVT().SimpleTy) { 14260 default: break; 14261 // Scalar SSE types. 14262 case MVT::f32: 14263 case MVT::i32: 14264 return std::make_pair(0U, X86::FR32RegisterClass); 14265 case MVT::f64: 14266 case MVT::i64: 14267 return std::make_pair(0U, X86::FR64RegisterClass); 14268 // Vector types. 14269 case MVT::v16i8: 14270 case MVT::v8i16: 14271 case MVT::v4i32: 14272 case MVT::v2i64: 14273 case MVT::v4f32: 14274 case MVT::v2f64: 14275 return std::make_pair(0U, X86::VR128RegisterClass); 14276 } 14277 break; 14278 } 14279 } 14280 14281 // Use the default implementation in TargetLowering to convert the register 14282 // constraint into a member of a register class. 14283 std::pair<unsigned, const TargetRegisterClass*> Res; 14284 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 14285 14286 // Not found as a standard register? 14287 if (Res.second == 0) { 14288 // Map st(0) -> st(7) -> ST0 14289 if (Constraint.size() == 7 && Constraint[0] == '{' && 14290 tolower(Constraint[1]) == 's' && 14291 tolower(Constraint[2]) == 't' && 14292 Constraint[3] == '(' && 14293 (Constraint[4] >= '0' && Constraint[4] <= '7') && 14294 Constraint[5] == ')' && 14295 Constraint[6] == '}') { 14296 14297 Res.first = X86::ST0+Constraint[4]-'0'; 14298 Res.second = X86::RFP80RegisterClass; 14299 return Res; 14300 } 14301 14302 // GCC allows "st(0)" to be called just plain "st". 14303 if (StringRef("{st}").equals_lower(Constraint)) { 14304 Res.first = X86::ST0; 14305 Res.second = X86::RFP80RegisterClass; 14306 return Res; 14307 } 14308 14309 // flags -> EFLAGS 14310 if (StringRef("{flags}").equals_lower(Constraint)) { 14311 Res.first = X86::EFLAGS; 14312 Res.second = X86::CCRRegisterClass; 14313 return Res; 14314 } 14315 14316 // 'A' means EAX + EDX. 14317 if (Constraint == "A") { 14318 Res.first = X86::EAX; 14319 Res.second = X86::GR32_ADRegisterClass; 14320 return Res; 14321 } 14322 return Res; 14323 } 14324 14325 // Otherwise, check to see if this is a register class of the wrong value 14326 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 14327 // turn into {ax},{dx}. 14328 if (Res.second->hasType(VT)) 14329 return Res; // Correct type already, nothing to do. 14330 14331 // All of the single-register GCC register classes map their values onto 14332 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 14333 // really want an 8-bit or 32-bit register, map to the appropriate register 14334 // class and return the appropriate register. 14335 if (Res.second == X86::GR16RegisterClass) { 14336 if (VT == MVT::i8) { 14337 unsigned DestReg = 0; 14338 switch (Res.first) { 14339 default: break; 14340 case X86::AX: DestReg = X86::AL; break; 14341 case X86::DX: DestReg = X86::DL; break; 14342 case X86::CX: DestReg = X86::CL; break; 14343 case X86::BX: DestReg = X86::BL; break; 14344 } 14345 if (DestReg) { 14346 Res.first = DestReg; 14347 Res.second = X86::GR8RegisterClass; 14348 } 14349 } else if (VT == MVT::i32) { 14350 unsigned DestReg = 0; 14351 switch (Res.first) { 14352 default: break; 14353 case X86::AX: DestReg = X86::EAX; break; 14354 case X86::DX: DestReg = X86::EDX; break; 14355 case X86::CX: DestReg = X86::ECX; break; 14356 case X86::BX: DestReg = X86::EBX; break; 14357 case X86::SI: DestReg = X86::ESI; break; 14358 case X86::DI: DestReg = X86::EDI; break; 14359 case X86::BP: DestReg = X86::EBP; break; 14360 case X86::SP: DestReg = X86::ESP; break; 14361 } 14362 if (DestReg) { 14363 Res.first = DestReg; 14364 Res.second = X86::GR32RegisterClass; 14365 } 14366 } else if (VT == MVT::i64) { 14367 unsigned DestReg = 0; 14368 switch (Res.first) { 14369 default: break; 14370 case X86::AX: DestReg = X86::RAX; break; 14371 case X86::DX: DestReg = X86::RDX; break; 14372 case X86::CX: DestReg = X86::RCX; break; 14373 case X86::BX: DestReg = X86::RBX; break; 14374 case X86::SI: DestReg = X86::RSI; break; 14375 case X86::DI: DestReg = X86::RDI; break; 14376 case X86::BP: DestReg = X86::RBP; break; 14377 case X86::SP: DestReg = X86::RSP; break; 14378 } 14379 if (DestReg) { 14380 Res.first = DestReg; 14381 Res.second = X86::GR64RegisterClass; 14382 } 14383 } 14384 } else if (Res.second == X86::FR32RegisterClass || 14385 Res.second == X86::FR64RegisterClass || 14386 Res.second == X86::VR128RegisterClass) { 14387 // Handle references to XMM physical registers that got mapped into the 14388 // wrong class. This can happen with constraints like {xmm0} where the 14389 // target independent register mapper will just pick the first match it can 14390 // find, ignoring the required type. 14391 if (VT == MVT::f32) 14392 Res.second = X86::FR32RegisterClass; 14393 else if (VT == MVT::f64) 14394 Res.second = X86::FR64RegisterClass; 14395 else if (X86::VR128RegisterClass->hasType(VT)) 14396 Res.second = X86::VR128RegisterClass; 14397 } 14398 14399 return Res; 14400} 14401