Lines Matching refs:VT

92 void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
94 if (VT != PromotedLdStVT) {
95 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
96 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
99 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
100 AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
104 EVT ElemTy = VT.getVectorElementType();
106 setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom);
107 setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom);
108 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
110 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Custom);
111 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Custom);
112 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom);
113 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom);
115 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
116 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
117 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
118 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
120 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
121 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
122 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
123 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
124 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
125 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
126 setOperationAction(ISD::SIGN_EXTEND_INREG, VT.getSimpleVT(), Expand);
127 if (VT.isInteger()) {
128 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
129 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
130 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
134 if (VT.isInteger() && VT != PromotedBitwiseVT) {
135 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
136 AddPromotedToType (ISD::AND, VT.getSimpleVT(),
138 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
139 AddPromotedToType (ISD::OR, VT.getSimpleVT(),
141 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
142 AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
147 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
148 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
149 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
150 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
151 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
152 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
155 void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
156 addRegisterClass(VT, ARM::DPRRegisterClass);
157 addTypeForNEON(VT, MVT::f64, MVT::v2i32);
160 void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
161 addRegisterClass(VT, ARM::QPRRegisterClass);
162 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
446 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
447 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
450 setTruncStoreAction((MVT::SimpleValueType)VT,
452 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
453 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
454 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
841 ARMTargetLowering::findRepresentativeClass(EVT VT) const{
844 switch (VT.getSimpleVT().SimpleTy) {
846 return TargetLowering::findRepresentativeClass(VT);
1017 EVT ARMTargetLowering::getSetCCResultType(EVT VT) const {
1018 if (!VT.isVector()) return getPointerTy();
1019 return VT.changeVectorElementTypeToInteger();
1024 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
1029 if (VT == MVT::v4i64)
1031 else if (VT == MVT::v8i64)
1034 return TargetLowering::getRegClassFor(VT);
1055 EVT VT = N->getValueType(i);
1056 if (VT == MVT::Glue || VT == MVT::Other)
1058 if (VT.isFloatingPoint() || VT.isVector())
2859 EVT VT = Op.getValueType();
2863 assert(True.getValueType() == VT);
2864 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp);
2880 EVT VT = Op.getValueType();
2892 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp);
2901 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
2907 Result = DAG.getNode(ARMISD::CMOV, dl, VT,
2923 EVT VT = Op.getValueType();
2924 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
3115 EVT VT = Op.getValueType();
3126 if (VT != MVT::v4i16)
3130 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
3134 EVT VT = Op.getValueType();
3135 if (VT.isVector())
3155 EVT VT = Op.getValueType();
3159 if (VT.getVectorElementType() == MVT::f32)
3166 if (VT != MVT::v4f32)
3184 return DAG.getNode(Opc, dl, VT, Op);
3188 EVT VT = Op.getValueType();
3189 if (VT.isVector())
3206 return DAG.getNode(Opc, dl, VT, Op);
3214 EVT VT = Op.getValueType();
3225 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
3226 if (VT == MVT::f64)
3230 else /*if (VT == MVT::f32)*/
3234 if (VT == MVT::f64)
3238 } else if (VT == MVT::f32)
3254 if (VT == MVT::f32) {
3275 if (VT == MVT::f32) {
3296 EVT VT = Op.getValueType();
3302 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
3303 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
3309 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
3316 EVT VT = Op.getValueType();
3321 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
3323 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
3373 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
3374 assert(VT.isVector() && "Expected a vector type");
3377 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
3379 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
3387 EVT VT = Op.getValueType();
3388 unsigned VTBits = VT.getSizeInBits();
3400 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
3403 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
3404 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
3405 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
3410 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
3411 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
3423 EVT VT = Op.getValueType();
3424 unsigned VTBits = VT.getSizeInBits();
3434 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
3437 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
3438 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
3440 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
3444 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
3445 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
3472 EVT VT = N->getValueType(0);
3478 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
3479 return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
3484 EVT VT = N->getValueType(0);
3487 if (!VT.isVector())
3495 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
3512 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
3519 EVT VT = N->getValueType(0);
3523 if (VT != MVT::i64)
3564 EVT VT = Op.getValueType();
3593 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3594 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
3602 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
3603 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
3637 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
3638 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
3664 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
3666 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
3668 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
3670 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
3672 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
3674 Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3677 Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
3681 Result = DAG.getNOT(dl, Result, VT);
3691 EVT &VT, bool is128Bits, NEONModImmType type) {
3710 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
3715 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
3735 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
3810 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3874 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
3876 unsigned NumElts = VT.getVectorNumElements();
3913 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
3917 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3921 unsigned NumElts = VT.getVectorNumElements();
3939 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
3943 return VT == MVT::v8i8 && M.size() == 8;
3946 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
3947 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3951 unsigned NumElts = VT.getVectorNumElements();
3964 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
3965 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3969 unsigned NumElts = VT.getVectorNumElements();
3979 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
3980 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
3984 unsigned NumElts = VT.getVectorNumElements();
3993 if (VT.is64BitVector() && EltSz == 32)
4002 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
4003 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
4007 unsigned Half = VT.getVectorNumElements() / 2;
4020 if (VT.is64BitVector() && EltSz == 32)
4026 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
4027 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
4031 unsigned NumElts = VT.getVectorNumElements();
4042 if (VT.is64BitVector() && EltSz == 32)
4051 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
4052 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
4056 unsigned NumElts = VT.getVectorNumElements();
4067 if (VT.is64BitVector() && EltSz == 32)
4099 EVT VT = Op.getValueType();
4110 DAG, VmovVT, VT.is128BitVector(),
4114 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
4121 DAG, VmovVT, VT.is128BitVector(),
4125 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
4129 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
4133 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
4140 unsigned NumElts = VT.getVectorNumElements();
4161 return DAG.getUNDEF(VT);
4164 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
4166 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4172 return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
4173 if (VT.getVectorElementType().isFloatingPoint()) {
4182 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4186 return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
4214 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4225 EVT VT = Op.getValueType();
4226 unsigned NumElts = VT.getVectorNumElements();
4241 VT.getVectorElementType()) {
4278 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) };
4284 if (SourceVecs[i].getValueType() == VT) {
4308 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4314 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4320 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4323 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
4326 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
4351 if (isShuffleMaskLegal(Mask, VT))
4352 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1],
4364 EVT VT) const {
4365 if (VT.getVectorNumElements() == 4 &&
4366 (VT.is128BitVector() || VT.is64BitVector())) {
4388 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4390 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
4391 isVREVMask(M, VT, 64) ||
4392 isVREVMask(M, VT, 32) ||
4393 isVREVMask(M, VT, 16) ||
4394 isVEXTMask(M, VT, ReverseVEXT, Imm) ||
4395 isVTBLMask(M, VT) ||
4396 isVTRNMask(M, VT, WhichResult) ||
4397 isVUZPMask(M, VT, WhichResult) ||
4398 isVZIPMask(M, VT, WhichResult) ||
4399 isVTRN_v_undef_Mask(M, VT, WhichResult) ||
4400 isVUZP_v_undef_Mask(M, VT, WhichResult) ||
4401 isVZIP_v_undef_Mask(M, VT, WhichResult));
4440 EVT VT = OpLHS.getValueType();
4446 if (VT.getVectorElementType() == MVT::i32 ||
4447 VT.getVectorElementType() == MVT::f32)
4448 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
4450 if (VT.getVectorElementType() == MVT::i16)
4451 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
4453 assert(VT.getVectorElementType() == MVT::i8);
4454 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
4459 return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
4464 return DAG.getNode(ARMISD::VEXT, dl, VT,
4469 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4473 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4477 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4509 EVT VT = Op.getValueType();
4520 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4522 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
4529 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
4543 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
4545 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
4551 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
4554 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
4558 if (isVREVMask(ShuffleMask, VT, 64))
4559 return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
4560 if (isVREVMask(ShuffleMask, VT, 32))
4561 return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
4562 if (isVREVMask(ShuffleMask, VT, 16))
4563 return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
4571 if (isVTRNMask(ShuffleMask, VT, WhichResult))
4572 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4574 if (isVUZPMask(ShuffleMask, VT, WhichResult))
4575 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4577 if (isVZIPMask(ShuffleMask, VT, WhichResult))
4578 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4581 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
4582 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
4584 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
4585 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
4587 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
4588 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
4594 unsigned NumElts = VT.getVectorNumElements();
4633 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
4636 if (VT == MVT::v8i8) {
4696 EVT VT = N->getValueType(0);
4697 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
4727 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4786 EVT VT = N->getValueType(0);
4787 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
4788 unsigned NumElts = VT.getVectorNumElements();
4825 EVT VT = Op.getValueType();
4826 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL");
4857 if (VT == MVT::v2i64)
4875 return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
4889 return DAG.getNode(N0->getOpcode(), DL, VT,
4890 DAG.getNode(NewOpc, DL, VT,
4892 DAG.getNode(NewOpc, DL, VT,
4963 EVT VT = Op.getValueType();
4964 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
4972 if (VT == MVT::v8i8) {
4998 EVT VT = Op.getValueType();
4999 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
5007 if (VT == MVT::v8i8) {
5072 EVT VT = Op.getNode()->getValueType(0);
5073 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
6654 EVT VT = N->getValueType(0);
6695 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
6703 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
6723 EVT VT = N->getValueType(0);
6724 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
6784 unsigned numElem = VT.getVectorNumElements();
6785 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
6795 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp);
6879 EVT VT = N->getValueType(0);
6883 return DAG.getNode(Opcode, DL, VT,
6884 DAG.getNode(ISD::MUL, DL, VT, N00, N1),
6885 DAG.getNode(ISD::MUL, DL, VT, N01, N1));
6899 EVT VT = N->getValueType(0);
6900 if (VT.is64BitVector() || VT.is128BitVector())
6902 if (VT != MVT::i32)
6922 Res = DAG.getNode(ISD::ADD, DL, VT,
6924 DAG.getNode(ISD::SHL, DL, VT,
6930 Res = DAG.getNode(ISD::SUB, DL, VT,
6931 DAG.getNode(ISD::SHL, DL, VT,
6942 Res = DAG.getNode(ISD::SUB, DL, VT,
6944 DAG.getNode(ISD::SHL, DL, VT,
6950 Res = DAG.getNode(ISD::ADD, DL, VT,
6952 DAG.getNode(ISD::SHL, DL, VT,
6956 Res = DAG.getNode(ISD::SUB, DL, VT,
6964 Res = DAG.getNode(ISD::SHL, DL, VT,
7022 EVT VT = N->getValueType(0);
7025 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
7037 DAG, VbicVT, VT.is128BitVector(),
7043 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
7065 EVT VT = N->getValueType(0);
7068 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
7080 DAG, VorrVT, VT.is128BitVector(),
7086 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
7104 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
7105 DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
7120 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
7124 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
7147 if (VT != MVT::i32)
7173 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
7199 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
7201 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
7215 Res = DAG.getNode(ISD::SRL, DL, VT, N00,
7217 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
7236 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
7249 EVT VT = N->getValueType(0);
7252 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
7360 EVT VT = StVal.getValueType();
7361 if (St->isTruncatingStore() && VT.isVector()) {
7365 unsigned NumElems = VT.getVectorNumElements();
7366 assert(StVT != VT && "Cannot truncate to the same type");
7367 unsigned FromEltSz = VT.getVectorElementType().getSizeInBits();
7378 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
7383 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
7413 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
7414 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
7518 EVT VT = N->getValueType(0);
7519 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
7523 unsigned NumElts = VT.getVectorNumElements();
7532 return DAG.getNode(ISD::BITCAST, dl, VT, BV);
7541 EVT VT = N->getValueType(0);
7543 if (VT.getVectorElementType() != MVT::i64 ||
7550 VT.getVectorNumElements());
7558 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
7588 EVT VT = N->getValueType(0);
7589 if (!TLI.isTypeLegal(VT) ||
7594 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
7598 unsigned NumElts = VT.getVectorNumElements();
7610 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat,
7611 DAG.getUNDEF(VT), NewMask.data());
7752 EVT VT = N->getValueType(0);
7754 if (!VT.is64BitVector())
7796 Tys[n] = VT;
7852 EVT VT = N->getValueType(0);
7853 if (EltSize > VT.getVectorElementType().getSizeInBits())
7856 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
7980 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
7981 assert(VT.isVector() && "vector shift count is not a vector type");
7982 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
7994 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
7996 assert(VT.isVector() && "vector shift count is not a vector type");
7997 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
8035 EVT VT = N->getOperand(1).getValueType();
8042 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
8046 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
8055 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
8061 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
8067 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
8072 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
8085 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
8101 if (Cnt == VT.getVectorElementType().getSizeInBits())
8140 EVT VT = N->getOperand(1).getValueType();
8144 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
8146 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
8173 EVT VT = N->getValueType(0);
8174 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
8183 return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, N0, N1);
8189 if (!VT.isVector() || !TLI.isTypeLegal(VT))
8199 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
8200 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
8206 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
8209 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
8229 EVT VT = N->getValueType(0);
8233 if (VT == MVT::i32 &&
8249 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
8349 EVT VT = N->getValueType(0);
8378 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
8383 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
8465 EVT VT) const {
8466 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
8469 bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
8473 switch (VT.getSimpleVT().SimpleTy) {
8519 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
8524 switch (VT.getSimpleVT().SimpleTy) {
8546 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
8554 switch (VT.getSimpleVT().SimpleTy) {
8579 static bool isLegalAddressImmediate(int64_t V, EVT VT,
8584 if (!VT.isSimple())
8588 return isLegalT1AddressImmediate(V, VT);
8590 return isLegalT2AddressImmediate(V, VT, Subtarget);
8595 switch (VT.getSimpleVT().SimpleTy) {
8617 EVT VT) const {
8622 switch (VT.getSimpleVT().SimpleTy) {
8653 EVT VT = getValueType(Ty, true);
8654 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
8673 if (!VT.isSimple())
8677 return isLegalT2ScaledAddressingMode(AM, VT);
8680 switch (VT.getSimpleVT().SimpleTy) {
8732 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
8739 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
8754 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
8791 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
8827 EVT VT;
8832 VT = LD->getMemoryVT();
8836 VT = ST->getMemoryVT();
8843 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
8846 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
8866 EVT VT;
8870 VT = LD->getMemoryVT();
8874 VT = ST->getMemoryVT();
8882 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
8885 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
9027 EVT VT) const {
9043 if (VT == MVT::f32)
9045 if (VT.getSizeInBits() == 64)
9047 if (VT.getSizeInBits() == 128)
9051 if (VT == MVT::f32)
9053 if (VT.getSizeInBits() == 64)
9055 if (VT.getSizeInBits() == 128)
9059 if (VT == MVT::f32)
9067 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
9261 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
9264 if (VT == MVT::f32)
9266 if (VT == MVT::f64)