ARMISelLowering.cpp revision f26e03bc7e30162197641406e37e662a15d80f7e
1014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// 3014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// The LLVM Compiler Infrastructure 4014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// 5014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// This file is distributed under the University of Illinois Open Source 6014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// License. See LICENSE.TXT for details. 7014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// 8342c50ce1624b485728b9a4fc41d8bbf37eb46cfBen Murdoch//===----------------------------------------------------------------------===// 9014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// 10014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// This file defines the interfaces that ARM uses to lower LLVM code into a 11014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// selection DAG. 12014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch// 13014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch//===----------------------------------------------------------------------===// 14014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch 15014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARM.h" 16014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARMAddressingModes.h" 17014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARMConstantPoolValue.h" 18014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARMISelLowering.h" 19014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARMMachineFunctionInfo.h" 20014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARMRegisterInfo.h" 21014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARMSubtarget.h" 22014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "ARMTargetMachine.h" 23014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CallingConv.h" 24014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/Constants.h" 25014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/Function.h" 26014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/Instruction.h" 27014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/Intrinsics.h" 28014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/GlobalValue.h" 29014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/CallingConvLower.h" 30014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/MachineBasicBlock.h" 31014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/MachineFrameInfo.h" 32014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/MachineFunction.h" 33014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/MachineInstrBuilder.h" 34014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/MachineRegisterInfo.h" 35014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/PseudoSourceValue.h" 36014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/CodeGen/SelectionDAG.h" 37014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/Target/TargetLoweringObjectFile.h" 38014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/Target/TargetOptions.h" 39014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/ADT/VectorExtras.h" 40342c50ce1624b485728b9a4fc41d8bbf37eb46cfBen Murdoch#include "llvm/Support/ErrorHandling.h" 41014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch#include "llvm/Support/MathExtras.h" 42014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdochusing namespace llvm; 43014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch 44014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdochstatic bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 45014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCValAssign::LocInfo &LocInfo, 46014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch ISD::ArgFlagsTy &ArgFlags, 47014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCState &State); 48014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdochstatic bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 49014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCValAssign::LocInfo &LocInfo, 50014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch ISD::ArgFlagsTy &ArgFlags, 51014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCState &State); 52342c50ce1624b485728b9a4fc41d8bbf37eb46cfBen Murdochstatic bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 53014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCValAssign::LocInfo &LocInfo, 54014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch ISD::ArgFlagsTy &ArgFlags, 55014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCState &State); 56014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdochstatic bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 57014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCValAssign::LocInfo &LocInfo, 58014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch ISD::ArgFlagsTy &ArgFlags, 59014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch CCState &State); 60014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch 61014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdochvoid ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 62014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch MVT PromotedBitwiseVT) { 63014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch if (VT != PromotedLdStVT) { 64014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::LOAD, VT, Promote); 65014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 66014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch 67014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::STORE, VT, Promote); 68014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 69014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch } 70014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch 71014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch MVT ElemTy = VT.getVectorElementType(); 72014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 73014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::VSETCC, VT, Custom); 74014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch if (ElemTy == MVT::i8 || ElemTy == MVT::i16) 75014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 76014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 77014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 78014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 79014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 80014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch if (VT.isInteger()) { 81014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::SHL, VT, Custom); 82014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::SRA, VT, Custom); 83014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::SRL, VT, Custom); 84014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch } 85014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch 86014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch // Promote all bit-wise operations. 87014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch if (VT.isInteger() && VT != PromotedBitwiseVT) { 88014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::AND, VT, Promote); 89014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 90014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch setOperationAction(ISD::OR, VT, Promote); 91342c50ce1624b485728b9a4fc41d8bbf37eb46cfBen Murdoch AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 92342c50ce1624b485728b9a4fc41d8bbf37eb46cfBen Murdoch setOperationAction(ISD::XOR, VT, Promote); 93342c50ce1624b485728b9a4fc41d8bbf37eb46cfBen Murdoch AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 94014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch } 95014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch} 96014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch 97014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdochvoid ARMTargetLowering::addDRTypeForNEON(MVT VT) { 98014dc512cdd3e367bee49a713fdc5ed92584a3e5Ben Murdoch addRegisterClass(VT, ARM::DPRRegisterClass); 99 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 100} 101 102void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 103 addRegisterClass(VT, ARM::QPRRegisterClass); 104 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 105} 106 107static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 108 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 109 return new TargetLoweringObjectFileMachO(); 110 return new TargetLoweringObjectFileELF(true); 111} 112 113ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 114 : TargetLowering(TM, createTLOF(TM)), ARMPCLabelIndex(0) { 115 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 116 117 if (Subtarget->isTargetDarwin()) { 118 // Uses VFP for Thumb libfuncs if available. 119 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 120 // Single-precision floating-point arithmetic. 121 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 122 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 123 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 124 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 125 126 // Double-precision floating-point arithmetic. 127 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 128 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 129 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 130 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 131 132 // Single-precision comparisons. 133 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 134 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 135 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 136 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 137 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 138 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 139 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 140 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 141 142 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 143 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 144 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 145 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 146 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 147 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 148 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 149 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 150 151 // Double-precision comparisons. 152 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 153 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 154 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 155 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 156 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 157 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 158 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 159 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 160 161 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 162 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 163 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 164 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 165 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 166 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 167 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 168 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 169 170 // Floating-point to integer conversions. 171 // i64 conversions are done via library routines even when generating VFP 172 // instructions, so use the same ones. 173 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 174 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 175 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 176 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 177 178 // Conversions between floating types. 179 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 180 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 181 182 // Integer to floating-point conversions. 183 // i64 conversions are done via library routines even when generating VFP 184 // instructions, so use the same ones. 185 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 186 // e.g., __floatunsidf vs. __floatunssidfvfp. 187 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 188 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 189 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 190 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 191 } 192 } 193 194 // These libcalls are not available in 32-bit. 195 setLibcallName(RTLIB::SHL_I128, 0); 196 setLibcallName(RTLIB::SRL_I128, 0); 197 setLibcallName(RTLIB::SRA_I128, 0); 198 199 if (Subtarget->isThumb1Only()) 200 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 201 else 202 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 203 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 204 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 205 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 206 207 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 208 } 209 210 if (Subtarget->hasNEON()) { 211 addDRTypeForNEON(MVT::v2f32); 212 addDRTypeForNEON(MVT::v8i8); 213 addDRTypeForNEON(MVT::v4i16); 214 addDRTypeForNEON(MVT::v2i32); 215 addDRTypeForNEON(MVT::v1i64); 216 217 addQRTypeForNEON(MVT::v4f32); 218 addQRTypeForNEON(MVT::v2f64); 219 addQRTypeForNEON(MVT::v16i8); 220 addQRTypeForNEON(MVT::v8i16); 221 addQRTypeForNEON(MVT::v4i32); 222 addQRTypeForNEON(MVT::v2i64); 223 224 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 225 setTargetDAGCombine(ISD::SHL); 226 setTargetDAGCombine(ISD::SRL); 227 setTargetDAGCombine(ISD::SRA); 228 setTargetDAGCombine(ISD::SIGN_EXTEND); 229 setTargetDAGCombine(ISD::ZERO_EXTEND); 230 setTargetDAGCombine(ISD::ANY_EXTEND); 231 } 232 233 computeRegisterProperties(); 234 235 // ARM does not have f32 extending load. 236 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 237 238 // ARM does not have i1 sign extending load. 239 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 240 241 // ARM supports all 4 flavors of integer indexed load / store. 242 if (!Subtarget->isThumb1Only()) { 243 for (unsigned im = (unsigned)ISD::PRE_INC; 244 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 245 setIndexedLoadAction(im, MVT::i1, Legal); 246 setIndexedLoadAction(im, MVT::i8, Legal); 247 setIndexedLoadAction(im, MVT::i16, Legal); 248 setIndexedLoadAction(im, MVT::i32, Legal); 249 setIndexedStoreAction(im, MVT::i1, Legal); 250 setIndexedStoreAction(im, MVT::i8, Legal); 251 setIndexedStoreAction(im, MVT::i16, Legal); 252 setIndexedStoreAction(im, MVT::i32, Legal); 253 } 254 } 255 256 // i64 operation support. 257 if (Subtarget->isThumb1Only()) { 258 setOperationAction(ISD::MUL, MVT::i64, Expand); 259 setOperationAction(ISD::MULHU, MVT::i32, Expand); 260 setOperationAction(ISD::MULHS, MVT::i32, Expand); 261 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 262 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 263 } else { 264 setOperationAction(ISD::MUL, MVT::i64, Expand); 265 setOperationAction(ISD::MULHU, MVT::i32, Expand); 266 if (!Subtarget->isThumb1Only() && !Subtarget->hasV6Ops()) 267 setOperationAction(ISD::MULHS, MVT::i32, Expand); 268 } 269 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 270 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 271 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 272 setOperationAction(ISD::SRL, MVT::i64, Custom); 273 setOperationAction(ISD::SRA, MVT::i64, Custom); 274 275 // ARM does not have ROTL. 276 setOperationAction(ISD::ROTL, MVT::i32, Expand); 277 setOperationAction(ISD::CTTZ, MVT::i32, Expand); 278 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 279 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 280 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 281 282 // Only ARMv6 has BSWAP. 283 if (!Subtarget->hasV6Ops()) 284 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 285 286 // These are expanded into libcalls. 287 setOperationAction(ISD::SDIV, MVT::i32, Expand); 288 setOperationAction(ISD::UDIV, MVT::i32, Expand); 289 setOperationAction(ISD::SREM, MVT::i32, Expand); 290 setOperationAction(ISD::UREM, MVT::i32, Expand); 291 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 292 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 293 294 // Support label based line numbers. 295 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); 296 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 297 298 setOperationAction(ISD::RET, MVT::Other, Custom); 299 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 300 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 301 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 302 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 303 304 // Use the default implementation. 305 setOperationAction(ISD::VASTART, MVT::Other, Custom); 306 setOperationAction(ISD::VAARG, MVT::Other, Expand); 307 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 308 setOperationAction(ISD::VAEND, MVT::Other, Expand); 309 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 310 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 311 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 312 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 313 314 if (!Subtarget->hasV6Ops() && !Subtarget->isThumb2()) { 315 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 316 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 317 } 318 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 319 320 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) 321 // Turn f64->i64 into FMRRD, i64 -> f64 to FMDRR iff target supports vfp2. 322 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom); 323 324 // We want to custom lower some of our intrinsics. 325 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 326 327 setOperationAction(ISD::SETCC, MVT::i32, Expand); 328 setOperationAction(ISD::SETCC, MVT::f32, Expand); 329 setOperationAction(ISD::SETCC, MVT::f64, Expand); 330 setOperationAction(ISD::SELECT, MVT::i32, Expand); 331 setOperationAction(ISD::SELECT, MVT::f32, Expand); 332 setOperationAction(ISD::SELECT, MVT::f64, Expand); 333 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 334 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 335 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 336 337 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 338 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 339 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 340 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 341 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 342 343 // We don't support sin/cos/fmod/copysign/pow 344 setOperationAction(ISD::FSIN, MVT::f64, Expand); 345 setOperationAction(ISD::FSIN, MVT::f32, Expand); 346 setOperationAction(ISD::FCOS, MVT::f32, Expand); 347 setOperationAction(ISD::FCOS, MVT::f64, Expand); 348 setOperationAction(ISD::FREM, MVT::f64, Expand); 349 setOperationAction(ISD::FREM, MVT::f32, Expand); 350 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 351 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 352 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 353 } 354 setOperationAction(ISD::FPOW, MVT::f64, Expand); 355 setOperationAction(ISD::FPOW, MVT::f32, Expand); 356 357 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 358 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 359 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 360 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 361 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 362 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 363 } 364 365 // We have target-specific dag combine patterns for the following nodes: 366 // ARMISD::FMRRD - No need to call setTargetDAGCombine 367 setTargetDAGCombine(ISD::ADD); 368 setTargetDAGCombine(ISD::SUB); 369 370 setStackPointerRegisterToSaveRestore(ARM::SP); 371 setSchedulingPreference(SchedulingForRegPressure); 372 setIfCvtBlockSizeLimit(Subtarget->isThumb() ? 0 : 10); 373 setIfCvtDupBlockSizeLimit(Subtarget->isThumb() ? 0 : 2); 374 375 if (!Subtarget->isThumb()) { 376 // Use branch latency information to determine if-conversion limits. 377 // FIXME: If-converter should use instruction latency of the branch being 378 // eliminated to compute the threshold. For ARMv6, the branch "latency" 379 // varies depending on whether it's dynamically or statically predicted 380 // and on whether the destination is in the prefetch buffer. 381 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 382 const InstrItineraryData &InstrItins = Subtarget->getInstrItineraryData(); 383 unsigned Latency= InstrItins.getLatency(TII->get(ARM::Bcc).getSchedClass()); 384 if (Latency > 1) { 385 setIfCvtBlockSizeLimit(Latency-1); 386 if (Latency > 2) 387 setIfCvtDupBlockSizeLimit(Latency-2); 388 } else { 389 setIfCvtBlockSizeLimit(10); 390 setIfCvtDupBlockSizeLimit(2); 391 } 392 } 393 394 maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type 395 // Do not enable CodePlacementOpt for now: it currently runs after the 396 // ARMConstantIslandPass and messes up branch relaxation and placement 397 // of constant islands. 398 // benefitFromCodePlacementOpt = true; 399} 400 401const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 402 switch (Opcode) { 403 default: return 0; 404 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 405 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 406 case ARMISD::CALL: return "ARMISD::CALL"; 407 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 408 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 409 case ARMISD::tCALL: return "ARMISD::tCALL"; 410 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 411 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 412 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 413 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 414 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 415 case ARMISD::CMP: return "ARMISD::CMP"; 416 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 417 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 418 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 419 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 420 case ARMISD::CMOV: return "ARMISD::CMOV"; 421 case ARMISD::CNEG: return "ARMISD::CNEG"; 422 423 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 424 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 425 case ARMISD::SITOF: return "ARMISD::SITOF"; 426 case ARMISD::UITOF: return "ARMISD::UITOF"; 427 428 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 429 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 430 case ARMISD::RRX: return "ARMISD::RRX"; 431 432 case ARMISD::FMRRD: return "ARMISD::FMRRD"; 433 case ARMISD::FMDRR: return "ARMISD::FMDRR"; 434 435 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 436 437 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 438 case ARMISD::VCGE: return "ARMISD::VCGE"; 439 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 440 case ARMISD::VCGT: return "ARMISD::VCGT"; 441 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 442 case ARMISD::VTST: return "ARMISD::VTST"; 443 444 case ARMISD::VSHL: return "ARMISD::VSHL"; 445 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 446 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 447 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 448 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 449 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 450 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 451 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 452 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 453 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 454 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 455 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 456 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 457 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 458 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 459 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 460 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 461 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 462 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 463 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 464 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 465 case ARMISD::VDUPLANEQ: return "ARMISD::VDUPLANEQ"; 466 } 467} 468 469/// getFunctionAlignment - Return the Log2 alignment of this function. 470unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 471 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 472} 473 474//===----------------------------------------------------------------------===// 475// Lowering Code 476//===----------------------------------------------------------------------===// 477 478/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 479static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 480 switch (CC) { 481 default: llvm_unreachable("Unknown condition code!"); 482 case ISD::SETNE: return ARMCC::NE; 483 case ISD::SETEQ: return ARMCC::EQ; 484 case ISD::SETGT: return ARMCC::GT; 485 case ISD::SETGE: return ARMCC::GE; 486 case ISD::SETLT: return ARMCC::LT; 487 case ISD::SETLE: return ARMCC::LE; 488 case ISD::SETUGT: return ARMCC::HI; 489 case ISD::SETUGE: return ARMCC::HS; 490 case ISD::SETULT: return ARMCC::LO; 491 case ISD::SETULE: return ARMCC::LS; 492 } 493} 494 495/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. It 496/// returns true if the operands should be inverted to form the proper 497/// comparison. 498static bool FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 499 ARMCC::CondCodes &CondCode2) { 500 bool Invert = false; 501 CondCode2 = ARMCC::AL; 502 switch (CC) { 503 default: llvm_unreachable("Unknown FP condition!"); 504 case ISD::SETEQ: 505 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 506 case ISD::SETGT: 507 case ISD::SETOGT: CondCode = ARMCC::GT; break; 508 case ISD::SETGE: 509 case ISD::SETOGE: CondCode = ARMCC::GE; break; 510 case ISD::SETOLT: CondCode = ARMCC::MI; break; 511 case ISD::SETOLE: CondCode = ARMCC::GT; Invert = true; break; 512 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 513 case ISD::SETO: CondCode = ARMCC::VC; break; 514 case ISD::SETUO: CondCode = ARMCC::VS; break; 515 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 516 case ISD::SETUGT: CondCode = ARMCC::HI; break; 517 case ISD::SETUGE: CondCode = ARMCC::PL; break; 518 case ISD::SETLT: 519 case ISD::SETULT: CondCode = ARMCC::LT; break; 520 case ISD::SETLE: 521 case ISD::SETULE: CondCode = ARMCC::LE; break; 522 case ISD::SETNE: 523 case ISD::SETUNE: CondCode = ARMCC::NE; break; 524 } 525 return Invert; 526} 527 528//===----------------------------------------------------------------------===// 529// Calling Convention Implementation 530// 531// The lower operations present on calling convention works on this order: 532// LowerCALL (virt regs --> phys regs, virt regs --> stack) 533// LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs) 534// LowerRET (virt regs --> phys regs) 535// LowerCALL (phys regs --> virt regs) 536// 537//===----------------------------------------------------------------------===// 538 539#include "ARMGenCallingConv.inc" 540 541// APCS f64 is in register pairs, possibly split to stack 542static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 543 CCValAssign::LocInfo &LocInfo, 544 CCState &State, bool CanFail) { 545 static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; 546 547 // Try to get the first register. 548 if (unsigned Reg = State.AllocateReg(RegList, 4)) 549 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 550 else { 551 // For the 2nd half of a v2f64, do not fail. 552 if (CanFail) 553 return false; 554 555 // Put the whole thing on the stack. 556 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 557 State.AllocateStack(8, 4), 558 LocVT, LocInfo)); 559 return true; 560 } 561 562 // Try to get the second register. 563 if (unsigned Reg = State.AllocateReg(RegList, 4)) 564 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 565 else 566 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 567 State.AllocateStack(4, 4), 568 LocVT, LocInfo)); 569 return true; 570} 571 572static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 573 CCValAssign::LocInfo &LocInfo, 574 ISD::ArgFlagsTy &ArgFlags, 575 CCState &State) { 576 if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true)) 577 return false; 578 if (LocVT == MVT::v2f64 && 579 !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false)) 580 return false; 581 return true; // we handled it 582} 583 584// AAPCS f64 is in aligned register pairs 585static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 586 CCValAssign::LocInfo &LocInfo, 587 CCState &State, bool CanFail) { 588 static const unsigned HiRegList[] = { ARM::R0, ARM::R2 }; 589 static const unsigned LoRegList[] = { ARM::R1, ARM::R3 }; 590 591 unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2); 592 if (Reg == 0) { 593 // For the 2nd half of a v2f64, do not just fail. 594 if (CanFail) 595 return false; 596 597 // Put the whole thing on the stack. 598 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 599 State.AllocateStack(8, 8), 600 LocVT, LocInfo)); 601 return true; 602 } 603 604 unsigned i; 605 for (i = 0; i < 2; ++i) 606 if (HiRegList[i] == Reg) 607 break; 608 609 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 610 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i], 611 LocVT, LocInfo)); 612 return true; 613} 614 615static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 616 CCValAssign::LocInfo &LocInfo, 617 ISD::ArgFlagsTy &ArgFlags, 618 CCState &State) { 619 if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true)) 620 return false; 621 if (LocVT == MVT::v2f64 && 622 !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false)) 623 return false; 624 return true; // we handled it 625} 626 627static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 628 CCValAssign::LocInfo &LocInfo, CCState &State) { 629 static const unsigned HiRegList[] = { ARM::R0, ARM::R2 }; 630 static const unsigned LoRegList[] = { ARM::R1, ARM::R3 }; 631 632 unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2); 633 if (Reg == 0) 634 return false; // we didn't handle it 635 636 unsigned i; 637 for (i = 0; i < 2; ++i) 638 if (HiRegList[i] == Reg) 639 break; 640 641 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 642 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i], 643 LocVT, LocInfo)); 644 return true; 645} 646 647static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 648 CCValAssign::LocInfo &LocInfo, 649 ISD::ArgFlagsTy &ArgFlags, 650 CCState &State) { 651 if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) 652 return false; 653 if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) 654 return false; 655 return true; // we handled it 656} 657 658static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 659 CCValAssign::LocInfo &LocInfo, 660 ISD::ArgFlagsTy &ArgFlags, 661 CCState &State) { 662 return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, 663 State); 664} 665 666/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 667/// given CallingConvention value. 668CCAssignFn *ARMTargetLowering::CCAssignFnForNode(unsigned CC, 669 bool Return) const { 670 switch (CC) { 671 default: 672 llvm_unreachable("Unsupported calling convention"); 673 case CallingConv::C: 674 case CallingConv::Fast: 675 // Use target triple & subtarget features to do actual dispatch. 676 if (Subtarget->isAAPCS_ABI()) { 677 if (Subtarget->hasVFP2() && 678 FloatABIType == FloatABI::Hard) 679 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 680 else 681 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 682 } else 683 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 684 case CallingConv::ARM_AAPCS_VFP: 685 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 686 case CallingConv::ARM_AAPCS: 687 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 688 case CallingConv::ARM_APCS: 689 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 690 } 691} 692 693/// LowerCallResult - Lower the result values of an ISD::CALL into the 694/// appropriate copies out of appropriate physical registers. This assumes that 695/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 696/// being lowered. The returns a SDNode with the same number of values as the 697/// ISD::CALL. 698SDNode *ARMTargetLowering:: 699LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, 700 unsigned CallingConv, SelectionDAG &DAG) { 701 702 DebugLoc dl = TheCall->getDebugLoc(); 703 // Assign locations to each value returned by this call. 704 SmallVector<CCValAssign, 16> RVLocs; 705 bool isVarArg = TheCall->isVarArg(); 706 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), 707 RVLocs, *DAG.getContext()); 708 CCInfo.AnalyzeCallResult(TheCall, 709 CCAssignFnForNode(CallingConv, /* Return*/ true)); 710 711 SmallVector<SDValue, 8> ResultVals; 712 713 // Copy all of the result registers out of their specified physreg. 714 for (unsigned i = 0; i != RVLocs.size(); ++i) { 715 CCValAssign VA = RVLocs[i]; 716 717 SDValue Val; 718 if (VA.needsCustom()) { 719 // Handle f64 or half of a v2f64. 720 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 721 InFlag); 722 Chain = Lo.getValue(1); 723 InFlag = Lo.getValue(2); 724 VA = RVLocs[++i]; // skip ahead to next loc 725 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 726 InFlag); 727 Chain = Hi.getValue(1); 728 InFlag = Hi.getValue(2); 729 Val = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi); 730 731 if (VA.getLocVT() == MVT::v2f64) { 732 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 733 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 734 DAG.getConstant(0, MVT::i32)); 735 736 VA = RVLocs[++i]; // skip ahead to next loc 737 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 738 Chain = Lo.getValue(1); 739 InFlag = Lo.getValue(2); 740 VA = RVLocs[++i]; // skip ahead to next loc 741 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 742 Chain = Hi.getValue(1); 743 InFlag = Hi.getValue(2); 744 Val = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi); 745 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 746 DAG.getConstant(1, MVT::i32)); 747 } 748 } else { 749 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 750 InFlag); 751 Chain = Val.getValue(1); 752 InFlag = Val.getValue(2); 753 } 754 755 switch (VA.getLocInfo()) { 756 default: llvm_unreachable("Unknown loc info!"); 757 case CCValAssign::Full: break; 758 case CCValAssign::BCvt: 759 Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val); 760 break; 761 } 762 763 ResultVals.push_back(Val); 764 } 765 766 // Merge everything together with a MERGE_VALUES node. 767 ResultVals.push_back(Chain); 768 return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(), 769 &ResultVals[0], ResultVals.size()).getNode(); 770} 771 772/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 773/// by "Src" to address "Dst" of size "Size". Alignment information is 774/// specified by the specific parameter attribute. The copy will be passed as 775/// a byval function parameter. 776/// Sometimes what we are copying is the end of a larger object, the part that 777/// does not fit in registers. 778static SDValue 779CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 780 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 781 DebugLoc dl) { 782 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 783 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 784 /*AlwaysInline=*/false, NULL, 0, NULL, 0); 785} 786 787/// LowerMemOpCallTo - Store the argument to the stack. 788SDValue 789ARMTargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG, 790 const SDValue &StackPtr, 791 const CCValAssign &VA, SDValue Chain, 792 SDValue Arg, ISD::ArgFlagsTy Flags) { 793 DebugLoc dl = TheCall->getDebugLoc(); 794 unsigned LocMemOffset = VA.getLocMemOffset(); 795 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 796 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 797 if (Flags.isByVal()) { 798 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 799 } 800 return DAG.getStore(Chain, dl, Arg, PtrOff, 801 PseudoSourceValue::getStack(), LocMemOffset); 802} 803 804void ARMTargetLowering::PassF64ArgInRegs(CallSDNode *TheCall, SelectionDAG &DAG, 805 SDValue Chain, SDValue &Arg, 806 RegsToPassVector &RegsToPass, 807 CCValAssign &VA, CCValAssign &NextVA, 808 SDValue &StackPtr, 809 SmallVector<SDValue, 8> &MemOpChains, 810 ISD::ArgFlagsTy Flags) { 811 DebugLoc dl = TheCall->getDebugLoc(); 812 813 SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl, 814 DAG.getVTList(MVT::i32, MVT::i32), Arg); 815 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 816 817 if (NextVA.isRegLoc()) 818 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 819 else { 820 assert(NextVA.isMemLoc()); 821 if (StackPtr.getNode() == 0) 822 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 823 824 MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, NextVA, 825 Chain, fmrrd.getValue(1), Flags)); 826 } 827} 828 829/// LowerCALL - Lowering a ISD::CALL node into a callseq_start <- 830/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 831/// nodes. 832SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { 833 CallSDNode *TheCall = cast<CallSDNode>(Op.getNode()); 834 MVT RetVT = TheCall->getRetValType(0); 835 SDValue Chain = TheCall->getChain(); 836 unsigned CC = TheCall->getCallingConv(); 837 bool isVarArg = TheCall->isVarArg(); 838 SDValue Callee = TheCall->getCallee(); 839 DebugLoc dl = TheCall->getDebugLoc(); 840 841 // Analyze operands of the call, assigning locations to each operand. 842 SmallVector<CCValAssign, 16> ArgLocs; 843 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); 844 CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC, /* Return*/ false)); 845 846 // Get a count of how many bytes are to be pushed on the stack. 847 unsigned NumBytes = CCInfo.getNextStackOffset(); 848 849 // Adjust the stack pointer for the new arguments... 850 // These operations are automatically eliminated by the prolog/epilog pass 851 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 852 853 SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32); 854 855 RegsToPassVector RegsToPass; 856 SmallVector<SDValue, 8> MemOpChains; 857 858 // Walk the register/memloc assignments, inserting copies/loads. In the case 859 // of tail call optimization, arguments are handled later. 860 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 861 i != e; 862 ++i, ++realArgIdx) { 863 CCValAssign &VA = ArgLocs[i]; 864 SDValue Arg = TheCall->getArg(realArgIdx); 865 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(realArgIdx); 866 867 // Promote the value if needed. 868 switch (VA.getLocInfo()) { 869 default: llvm_unreachable("Unknown loc info!"); 870 case CCValAssign::Full: break; 871 case CCValAssign::SExt: 872 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 873 break; 874 case CCValAssign::ZExt: 875 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 876 break; 877 case CCValAssign::AExt: 878 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 879 break; 880 case CCValAssign::BCvt: 881 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 882 break; 883 } 884 885 // f64 and v2f64 are passed in i32 pairs and must be split into pieces 886 if (VA.needsCustom()) { 887 if (VA.getLocVT() == MVT::v2f64) { 888 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 889 DAG.getConstant(0, MVT::i32)); 890 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 891 DAG.getConstant(1, MVT::i32)); 892 893 PassF64ArgInRegs(TheCall, DAG, Chain, Op0, RegsToPass, 894 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 895 896 VA = ArgLocs[++i]; // skip ahead to next loc 897 if (VA.isRegLoc()) { 898 PassF64ArgInRegs(TheCall, DAG, Chain, Op1, RegsToPass, 899 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 900 } else { 901 assert(VA.isMemLoc()); 902 if (StackPtr.getNode() == 0) 903 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 904 905 MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA, 906 Chain, Op1, Flags)); 907 } 908 } else { 909 PassF64ArgInRegs(TheCall, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 910 StackPtr, MemOpChains, Flags); 911 } 912 } else if (VA.isRegLoc()) { 913 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 914 } else { 915 assert(VA.isMemLoc()); 916 if (StackPtr.getNode() == 0) 917 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 918 919 MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA, 920 Chain, Arg, Flags)); 921 } 922 } 923 924 if (!MemOpChains.empty()) 925 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 926 &MemOpChains[0], MemOpChains.size()); 927 928 // Build a sequence of copy-to-reg nodes chained together with token chain 929 // and flag operands which copy the outgoing args into the appropriate regs. 930 SDValue InFlag; 931 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 932 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 933 RegsToPass[i].second, InFlag); 934 InFlag = Chain.getValue(1); 935 } 936 937 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 938 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 939 // node so that legalize doesn't hack it. 940 bool isDirect = false; 941 bool isARMFunc = false; 942 bool isLocalARMFunc = false; 943 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 944 GlobalValue *GV = G->getGlobal(); 945 isDirect = true; 946 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 947 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 948 getTargetMachine().getRelocationModel() != Reloc::Static; 949 isARMFunc = !Subtarget->isThumb() || isStub; 950 // ARM call to a local ARM function is predicable. 951 isLocalARMFunc = !Subtarget->isThumb() && !isExt; 952 // tBX takes a register source operand. 953 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 954 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex, 955 ARMCP::CPStub, 4); 956 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 957 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 958 Callee = DAG.getLoad(getPointerTy(), dl, 959 DAG.getEntryNode(), CPAddr, NULL, 0); 960 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); 961 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 962 getPointerTy(), Callee, PICLabel); 963 } else 964 Callee = DAG.getTargetGlobalAddress(GV, getPointerTy()); 965 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 966 isDirect = true; 967 bool isStub = Subtarget->isTargetDarwin() && 968 getTargetMachine().getRelocationModel() != Reloc::Static; 969 isARMFunc = !Subtarget->isThumb() || isStub; 970 // tBX takes a register source operand. 971 const char *Sym = S->getSymbol(); 972 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 973 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(Sym, ARMPCLabelIndex, 974 ARMCP::CPStub, 4); 975 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 976 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 977 Callee = DAG.getLoad(getPointerTy(), dl, 978 DAG.getEntryNode(), CPAddr, NULL, 0); 979 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); 980 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 981 getPointerTy(), Callee, PICLabel); 982 } else 983 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 984 } 985 986 // FIXME: handle tail calls differently. 987 unsigned CallOpc; 988 if (Subtarget->isThumb1Only()) { 989 if (!Subtarget->hasV5TOps() && (!isDirect || isARMFunc)) 990 CallOpc = ARMISD::CALL_NOLINK; 991 else 992 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 993 } else { 994 CallOpc = (isDirect || Subtarget->hasV5TOps()) 995 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 996 : ARMISD::CALL_NOLINK; 997 } 998 if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) { 999 // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK 1000 Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag); 1001 InFlag = Chain.getValue(1); 1002 } 1003 1004 std::vector<SDValue> Ops; 1005 Ops.push_back(Chain); 1006 Ops.push_back(Callee); 1007 1008 // Add argument registers to the end of the list so that they are known live 1009 // into the call. 1010 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1011 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1012 RegsToPass[i].second.getValueType())); 1013 1014 if (InFlag.getNode()) 1015 Ops.push_back(InFlag); 1016 // Returns a chain and a flag for retval copy to use. 1017 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag), 1018 &Ops[0], Ops.size()); 1019 InFlag = Chain.getValue(1); 1020 1021 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1022 DAG.getIntPtrConstant(0, true), InFlag); 1023 if (RetVT != MVT::Other) 1024 InFlag = Chain.getValue(1); 1025 1026 // Handle result values, copying them out of physregs into vregs that we 1027 // return. 1028 return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), 1029 Op.getResNo()); 1030} 1031 1032SDValue ARMTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { 1033 // The chain is always operand #0 1034 SDValue Chain = Op.getOperand(0); 1035 DebugLoc dl = Op.getDebugLoc(); 1036 1037 // CCValAssign - represent the assignment of the return value to a location. 1038 SmallVector<CCValAssign, 16> RVLocs; 1039 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 1040 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 1041 1042 // CCState - Info about the registers and stack slots. 1043 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); 1044 1045 // Analyze return values of ISD::RET. 1046 CCInfo.AnalyzeReturn(Op.getNode(), CCAssignFnForNode(CC, /* Return */ true)); 1047 1048 // If this is the first return lowered for this function, add 1049 // the regs to the liveout set for the function. 1050 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1051 for (unsigned i = 0; i != RVLocs.size(); ++i) 1052 if (RVLocs[i].isRegLoc()) 1053 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1054 } 1055 1056 SDValue Flag; 1057 1058 // Copy the result values into the output registers. 1059 for (unsigned i = 0, realRVLocIdx = 0; 1060 i != RVLocs.size(); 1061 ++i, ++realRVLocIdx) { 1062 CCValAssign &VA = RVLocs[i]; 1063 assert(VA.isRegLoc() && "Can only return in registers!"); 1064 1065 // ISD::RET => ret chain, (regnum1,val1), ... 1066 // So i*2+1 index only the regnums 1067 SDValue Arg = Op.getOperand(realRVLocIdx*2+1); 1068 1069 switch (VA.getLocInfo()) { 1070 default: llvm_unreachable("Unknown loc info!"); 1071 case CCValAssign::Full: break; 1072 case CCValAssign::BCvt: 1073 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1074 break; 1075 } 1076 1077 if (VA.needsCustom()) { 1078 if (VA.getLocVT() == MVT::v2f64) { 1079 // Extract the first half and return it in two registers. 1080 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1081 DAG.getConstant(0, MVT::i32)); 1082 SDValue HalfGPRs = DAG.getNode(ARMISD::FMRRD, dl, 1083 DAG.getVTList(MVT::i32, MVT::i32), Half); 1084 1085 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1086 Flag = Chain.getValue(1); 1087 VA = RVLocs[++i]; // skip ahead to next loc 1088 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1089 HalfGPRs.getValue(1), Flag); 1090 Flag = Chain.getValue(1); 1091 VA = RVLocs[++i]; // skip ahead to next loc 1092 1093 // Extract the 2nd half and fall through to handle it as an f64 value. 1094 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1095 DAG.getConstant(1, MVT::i32)); 1096 } 1097 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1098 // available. 1099 SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl, 1100 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1101 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1102 Flag = Chain.getValue(1); 1103 VA = RVLocs[++i]; // skip ahead to next loc 1104 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1105 Flag); 1106 } else 1107 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1108 1109 // Guarantee that all emitted copies are 1110 // stuck together, avoiding something bad. 1111 Flag = Chain.getValue(1); 1112 } 1113 1114 SDValue result; 1115 if (Flag.getNode()) 1116 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1117 else // Return Void 1118 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1119 1120 return result; 1121} 1122 1123// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1124// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1125// one of the above mentioned nodes. It has to be wrapped because otherwise 1126// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1127// be used to form addressing mode. These wrapped nodes will be selected 1128// into MOVi. 1129static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1130 MVT PtrVT = Op.getValueType(); 1131 // FIXME there is no actual debug info here 1132 DebugLoc dl = Op.getDebugLoc(); 1133 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1134 SDValue Res; 1135 if (CP->isMachineConstantPoolEntry()) 1136 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1137 CP->getAlignment()); 1138 else 1139 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1140 CP->getAlignment()); 1141 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1142} 1143 1144// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1145SDValue 1146ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1147 SelectionDAG &DAG) { 1148 DebugLoc dl = GA->getDebugLoc(); 1149 MVT PtrVT = getPointerTy(); 1150 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1151 ARMConstantPoolValue *CPV = 1152 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue, 1153 PCAdj, "tlsgd", true); 1154 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1155 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1156 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, NULL, 0); 1157 SDValue Chain = Argument.getValue(1); 1158 1159 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); 1160 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1161 1162 // call __tls_get_addr. 1163 ArgListTy Args; 1164 ArgListEntry Entry; 1165 Entry.Node = Argument; 1166 Entry.Ty = (const Type *) Type::Int32Ty; 1167 Args.push_back(Entry); 1168 // FIXME: is there useful debug info available here? 1169 std::pair<SDValue, SDValue> CallResult = 1170 LowerCallTo(Chain, (const Type *) Type::Int32Ty, false, false, false, false, 1171 0, CallingConv::C, false, 1172 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1173 return CallResult.first; 1174} 1175 1176// Lower ISD::GlobalTLSAddress using the "initial exec" or 1177// "local exec" model. 1178SDValue 1179ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1180 SelectionDAG &DAG) { 1181 GlobalValue *GV = GA->getGlobal(); 1182 DebugLoc dl = GA->getDebugLoc(); 1183 SDValue Offset; 1184 SDValue Chain = DAG.getEntryNode(); 1185 MVT PtrVT = getPointerTy(); 1186 // Get the Thread Pointer 1187 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1188 1189 if (GV->isDeclaration()) { 1190 // initial exec model 1191 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1192 ARMConstantPoolValue *CPV = 1193 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue, 1194 PCAdj, "gottpoff", true); 1195 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1196 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1197 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0); 1198 Chain = Offset.getValue(1); 1199 1200 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); 1201 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1202 1203 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0); 1204 } else { 1205 // local exec model 1206 ARMConstantPoolValue *CPV = 1207 new ARMConstantPoolValue(GV, ARMCP::CPValue, "tpoff"); 1208 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1209 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1210 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0); 1211 } 1212 1213 // The address of the thread local variable is the add of the thread 1214 // pointer with the offset of the variable. 1215 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1216} 1217 1218SDValue 1219ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) { 1220 // TODO: implement the "local dynamic" model 1221 assert(Subtarget->isTargetELF() && 1222 "TLS not implemented for non-ELF targets"); 1223 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1224 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1225 // otherwise use the "Local Exec" TLS Model 1226 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1227 return LowerToTLSGeneralDynamicModel(GA, DAG); 1228 else 1229 return LowerToTLSExecModels(GA, DAG); 1230} 1231 1232SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1233 SelectionDAG &DAG) { 1234 MVT PtrVT = getPointerTy(); 1235 DebugLoc dl = Op.getDebugLoc(); 1236 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1237 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1238 if (RelocM == Reloc::PIC_) { 1239 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1240 ARMConstantPoolValue *CPV = 1241 new ARMConstantPoolValue(GV, ARMCP::CPValue, UseGOTOFF ? "GOTOFF":"GOT"); 1242 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1243 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1244 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1245 CPAddr, NULL, 0); 1246 SDValue Chain = Result.getValue(1); 1247 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1248 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1249 if (!UseGOTOFF) 1250 Result = DAG.getLoad(PtrVT, dl, Chain, Result, NULL, 0); 1251 return Result; 1252 } else { 1253 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1254 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1255 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0); 1256 } 1257} 1258 1259/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol 1260/// even in non-static mode. 1261static bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) { 1262 // If symbol visibility is hidden, the extra load is not needed if 1263 // the symbol is definitely defined in the current translation unit. 1264 bool isDecl = GV->isDeclaration() || GV->hasAvailableExternallyLinkage(); 1265 if (GV->hasHiddenVisibility() && (!isDecl && !GV->hasCommonLinkage())) 1266 return false; 1267 return RelocM != Reloc::Static && (isDecl || GV->isWeakForLinker()); 1268} 1269 1270SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1271 SelectionDAG &DAG) { 1272 MVT PtrVT = getPointerTy(); 1273 DebugLoc dl = Op.getDebugLoc(); 1274 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1275 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1276 bool IsIndirect = GVIsIndirectSymbol(GV, RelocM); 1277 SDValue CPAddr; 1278 if (RelocM == Reloc::Static) 1279 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1280 else { 1281 unsigned PCAdj = (RelocM != Reloc::PIC_) 1282 ? 0 : (Subtarget->isThumb() ? 4 : 8); 1283 ARMCP::ARMCPKind Kind = IsIndirect ? ARMCP::CPNonLazyPtr 1284 : ARMCP::CPValue; 1285 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex, 1286 Kind, PCAdj); 1287 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1288 } 1289 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1290 1291 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0); 1292 SDValue Chain = Result.getValue(1); 1293 1294 if (RelocM == Reloc::PIC_) { 1295 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); 1296 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1297 } 1298 if (IsIndirect) 1299 Result = DAG.getLoad(PtrVT, dl, Chain, Result, NULL, 0); 1300 1301 return Result; 1302} 1303 1304SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 1305 SelectionDAG &DAG){ 1306 assert(Subtarget->isTargetELF() && 1307 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 1308 MVT PtrVT = getPointerTy(); 1309 DebugLoc dl = Op.getDebugLoc(); 1310 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1311 ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_", 1312 ARMPCLabelIndex, 1313 ARMCP::CPValue, PCAdj); 1314 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1315 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1316 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0); 1317 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); 1318 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1319} 1320 1321SDValue 1322ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { 1323 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1324 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1325 DebugLoc dl = Op.getDebugLoc(); 1326 switch (IntNo) { 1327 default: return SDValue(); // Don't custom lower most intrinsics. 1328 case Intrinsic::arm_thread_pointer: 1329 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1330 case Intrinsic::eh_sjlj_setjmp: 1331 SDValue Res = DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, 1332 Op.getOperand(1)); 1333 return Res; 1334 } 1335} 1336 1337static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 1338 unsigned VarArgsFrameIndex) { 1339 // vastart just stores the address of the VarArgsFrameIndex slot into the 1340 // memory location argument. 1341 DebugLoc dl = Op.getDebugLoc(); 1342 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1343 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1344 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1345 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0); 1346} 1347 1348SDValue 1349ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 1350 SDValue &Root, SelectionDAG &DAG, 1351 DebugLoc dl) { 1352 MachineFunction &MF = DAG.getMachineFunction(); 1353 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1354 1355 TargetRegisterClass *RC; 1356 if (AFI->isThumb1OnlyFunction()) 1357 RC = ARM::tGPRRegisterClass; 1358 else 1359 RC = ARM::GPRRegisterClass; 1360 1361 // Transform the arguments stored in physical registers into virtual ones. 1362 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1363 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 1364 1365 SDValue ArgValue2; 1366 if (NextVA.isMemLoc()) { 1367 unsigned ArgSize = NextVA.getLocVT().getSizeInBits()/8; 1368 MachineFrameInfo *MFI = MF.getFrameInfo(); 1369 int FI = MFI->CreateFixedObject(ArgSize, NextVA.getLocMemOffset()); 1370 1371 // Create load node to retrieve arguments from the stack. 1372 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1373 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, NULL, 0); 1374 } else { 1375 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 1376 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 1377 } 1378 1379 return DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, ArgValue, ArgValue2); 1380} 1381 1382SDValue 1383ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { 1384 MachineFunction &MF = DAG.getMachineFunction(); 1385 MachineFrameInfo *MFI = MF.getFrameInfo(); 1386 1387 SDValue Root = Op.getOperand(0); 1388 DebugLoc dl = Op.getDebugLoc(); 1389 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0; 1390 unsigned CC = MF.getFunction()->getCallingConv(); 1391 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1392 1393 // Assign locations to all of the incoming arguments. 1394 SmallVector<CCValAssign, 16> ArgLocs; 1395 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); 1396 CCInfo.AnalyzeFormalArguments(Op.getNode(), 1397 CCAssignFnForNode(CC, /* Return*/ false)); 1398 1399 SmallVector<SDValue, 16> ArgValues; 1400 1401 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1402 CCValAssign &VA = ArgLocs[i]; 1403 1404 // Arguments stored in registers. 1405 if (VA.isRegLoc()) { 1406 MVT RegVT = VA.getLocVT(); 1407 1408 SDValue ArgValue; 1409 if (VA.needsCustom()) { 1410 // f64 and vector types are split up into multiple registers or 1411 // combinations of registers and stack slots. 1412 RegVT = MVT::i32; 1413 1414 if (VA.getLocVT() == MVT::v2f64) { 1415 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 1416 Root, DAG, dl); 1417 VA = ArgLocs[++i]; // skip ahead to next loc 1418 SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 1419 Root, DAG, dl); 1420 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1421 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 1422 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 1423 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 1424 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 1425 } else 1426 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Root, DAG, dl); 1427 1428 } else { 1429 TargetRegisterClass *RC; 1430 if (FloatABIType == FloatABI::Hard && RegVT == MVT::f32) 1431 RC = ARM::SPRRegisterClass; 1432 else if (FloatABIType == FloatABI::Hard && RegVT == MVT::f64) 1433 RC = ARM::DPRRegisterClass; 1434 else if (AFI->isThumb1OnlyFunction()) 1435 RC = ARM::tGPRRegisterClass; 1436 else 1437 RC = ARM::GPRRegisterClass; 1438 1439 assert((RegVT == MVT::i32 || RegVT == MVT::f32 || 1440 (FloatABIType == FloatABI::Hard && RegVT == MVT::f64)) && 1441 "RegVT not supported by FORMAL_ARGUMENTS Lowering"); 1442 1443 // Transform the arguments in physical registers into virtual ones. 1444 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1445 ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT); 1446 } 1447 1448 // If this is an 8 or 16-bit value, it is really passed promoted 1449 // to 32 bits. Insert an assert[sz]ext to capture this, then 1450 // truncate to the right size. 1451 switch (VA.getLocInfo()) { 1452 default: llvm_unreachable("Unknown loc info!"); 1453 case CCValAssign::Full: break; 1454 case CCValAssign::BCvt: 1455 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); 1456 break; 1457 case CCValAssign::SExt: 1458 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1459 DAG.getValueType(VA.getValVT())); 1460 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1461 break; 1462 case CCValAssign::ZExt: 1463 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1464 DAG.getValueType(VA.getValVT())); 1465 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1466 break; 1467 } 1468 1469 ArgValues.push_back(ArgValue); 1470 1471 } else { // VA.isRegLoc() 1472 1473 // sanity check 1474 assert(VA.isMemLoc()); 1475 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 1476 1477 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 1478 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset()); 1479 1480 // Create load nodes to retrieve arguments from the stack. 1481 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1482 ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0)); 1483 } 1484 } 1485 1486 // varargs 1487 if (isVarArg) { 1488 static const unsigned GPRArgRegs[] = { 1489 ARM::R0, ARM::R1, ARM::R2, ARM::R3 1490 }; 1491 1492 unsigned NumGPRs = CCInfo.getFirstUnallocated 1493 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 1494 1495 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 1496 unsigned VARegSize = (4 - NumGPRs) * 4; 1497 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 1498 unsigned ArgOffset = 0; 1499 if (VARegSaveSize) { 1500 // If this function is vararg, store any remaining integer argument regs 1501 // to their spots on the stack so that they may be loaded by deferencing 1502 // the result of va_next. 1503 AFI->setVarArgsRegSaveSize(VARegSaveSize); 1504 ArgOffset = CCInfo.getNextStackOffset(); 1505 VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset + 1506 VARegSaveSize - VARegSize); 1507 SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 1508 1509 SmallVector<SDValue, 4> MemOps; 1510 for (; NumGPRs < 4; ++NumGPRs) { 1511 TargetRegisterClass *RC; 1512 if (AFI->isThumb1OnlyFunction()) 1513 RC = ARM::tGPRRegisterClass; 1514 else 1515 RC = ARM::GPRRegisterClass; 1516 1517 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 1518 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); 1519 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); 1520 MemOps.push_back(Store); 1521 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 1522 DAG.getConstant(4, getPointerTy())); 1523 } 1524 if (!MemOps.empty()) 1525 Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1526 &MemOps[0], MemOps.size()); 1527 } else 1528 // This will point to the next argument passed via stack. 1529 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset); 1530 } 1531 1532 ArgValues.push_back(Root); 1533 1534 // Return the new list of results. 1535 return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(), 1536 &ArgValues[0], ArgValues.size()).getValue(Op.getResNo()); 1537} 1538 1539/// isFloatingPointZero - Return true if this is +0.0. 1540static bool isFloatingPointZero(SDValue Op) { 1541 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1542 return CFP->getValueAPF().isPosZero(); 1543 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1544 // Maybe this has already been legalized into the constant pool? 1545 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 1546 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 1547 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 1548 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1549 return CFP->getValueAPF().isPosZero(); 1550 } 1551 } 1552 return false; 1553} 1554 1555static bool isLegalCmpImmediate(unsigned C, bool isThumb1Only) { 1556 return ( isThumb1Only && (C & ~255U) == 0) || 1557 (!isThumb1Only && ARM_AM::getSOImmVal(C) != -1); 1558} 1559 1560/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 1561/// the given operands. 1562static SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 1563 SDValue &ARMCC, SelectionDAG &DAG, bool isThumb1Only, 1564 DebugLoc dl) { 1565 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 1566 unsigned C = RHSC->getZExtValue(); 1567 if (!isLegalCmpImmediate(C, isThumb1Only)) { 1568 // Constant does not fit, try adjusting it by one? 1569 switch (CC) { 1570 default: break; 1571 case ISD::SETLT: 1572 case ISD::SETGE: 1573 if (isLegalCmpImmediate(C-1, isThumb1Only)) { 1574 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 1575 RHS = DAG.getConstant(C-1, MVT::i32); 1576 } 1577 break; 1578 case ISD::SETULT: 1579 case ISD::SETUGE: 1580 if (C > 0 && isLegalCmpImmediate(C-1, isThumb1Only)) { 1581 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 1582 RHS = DAG.getConstant(C-1, MVT::i32); 1583 } 1584 break; 1585 case ISD::SETLE: 1586 case ISD::SETGT: 1587 if (isLegalCmpImmediate(C+1, isThumb1Only)) { 1588 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 1589 RHS = DAG.getConstant(C+1, MVT::i32); 1590 } 1591 break; 1592 case ISD::SETULE: 1593 case ISD::SETUGT: 1594 if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb1Only)) { 1595 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 1596 RHS = DAG.getConstant(C+1, MVT::i32); 1597 } 1598 break; 1599 } 1600 } 1601 } 1602 1603 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 1604 ARMISD::NodeType CompareType; 1605 switch (CondCode) { 1606 default: 1607 CompareType = ARMISD::CMP; 1608 break; 1609 case ARMCC::EQ: 1610 case ARMCC::NE: 1611 // Uses only Z Flag 1612 CompareType = ARMISD::CMPZ; 1613 break; 1614 } 1615 ARMCC = DAG.getConstant(CondCode, MVT::i32); 1616 return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS); 1617} 1618 1619/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 1620static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 1621 DebugLoc dl) { 1622 SDValue Cmp; 1623 if (!isFloatingPointZero(RHS)) 1624 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS); 1625 else 1626 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS); 1627 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp); 1628} 1629 1630static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, 1631 const ARMSubtarget *ST) { 1632 MVT VT = Op.getValueType(); 1633 SDValue LHS = Op.getOperand(0); 1634 SDValue RHS = Op.getOperand(1); 1635 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1636 SDValue TrueVal = Op.getOperand(2); 1637 SDValue FalseVal = Op.getOperand(3); 1638 DebugLoc dl = Op.getDebugLoc(); 1639 1640 if (LHS.getValueType() == MVT::i32) { 1641 SDValue ARMCC; 1642 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 1643 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl); 1644 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp); 1645 } 1646 1647 ARMCC::CondCodes CondCode, CondCode2; 1648 if (FPCCToARMCC(CC, CondCode, CondCode2)) 1649 std::swap(TrueVal, FalseVal); 1650 1651 SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32); 1652 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 1653 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 1654 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 1655 ARMCC, CCR, Cmp); 1656 if (CondCode2 != ARMCC::AL) { 1657 SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32); 1658 // FIXME: Needs another CMP because flag can have but one use. 1659 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 1660 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 1661 Result, TrueVal, ARMCC2, CCR, Cmp2); 1662 } 1663 return Result; 1664} 1665 1666static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, 1667 const ARMSubtarget *ST) { 1668 SDValue Chain = Op.getOperand(0); 1669 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1670 SDValue LHS = Op.getOperand(2); 1671 SDValue RHS = Op.getOperand(3); 1672 SDValue Dest = Op.getOperand(4); 1673 DebugLoc dl = Op.getDebugLoc(); 1674 1675 if (LHS.getValueType() == MVT::i32) { 1676 SDValue ARMCC; 1677 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 1678 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl); 1679 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 1680 Chain, Dest, ARMCC, CCR,Cmp); 1681 } 1682 1683 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 1684 ARMCC::CondCodes CondCode, CondCode2; 1685 if (FPCCToARMCC(CC, CondCode, CondCode2)) 1686 // Swap the LHS/RHS of the comparison if needed. 1687 std::swap(LHS, RHS); 1688 1689 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 1690 SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32); 1691 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 1692 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 1693 SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp }; 1694 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 1695 if (CondCode2 != ARMCC::AL) { 1696 ARMCC = DAG.getConstant(CondCode2, MVT::i32); 1697 SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) }; 1698 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 1699 } 1700 return Res; 1701} 1702 1703SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) { 1704 SDValue Chain = Op.getOperand(0); 1705 SDValue Table = Op.getOperand(1); 1706 SDValue Index = Op.getOperand(2); 1707 DebugLoc dl = Op.getDebugLoc(); 1708 1709 MVT PTy = getPointerTy(); 1710 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 1711 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 1712 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 1713 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 1714 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 1715 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 1716 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 1717 if (Subtarget->isThumb2()) { 1718 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 1719 // which does another jump to the destination. This also makes it easier 1720 // to translate it to TBB / TBH later. 1721 // FIXME: This might not work if the function is extremely large. 1722 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 1723 Addr, Op.getOperand(2), JTI, UId); 1724 } 1725 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 1726 Addr = DAG.getLoad((MVT)MVT::i32, dl, Chain, Addr, NULL, 0); 1727 Chain = Addr.getValue(1); 1728 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 1729 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 1730 } else { 1731 Addr = DAG.getLoad(PTy, dl, Chain, Addr, NULL, 0); 1732 Chain = Addr.getValue(1); 1733 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 1734 } 1735} 1736 1737static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 1738 DebugLoc dl = Op.getDebugLoc(); 1739 unsigned Opc = 1740 Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI; 1741 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 1742 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); 1743} 1744 1745static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 1746 MVT VT = Op.getValueType(); 1747 DebugLoc dl = Op.getDebugLoc(); 1748 unsigned Opc = 1749 Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF; 1750 1751 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0)); 1752 return DAG.getNode(Opc, dl, VT, Op); 1753} 1754 1755static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) { 1756 // Implement fcopysign with a fabs and a conditional fneg. 1757 SDValue Tmp0 = Op.getOperand(0); 1758 SDValue Tmp1 = Op.getOperand(1); 1759 DebugLoc dl = Op.getDebugLoc(); 1760 MVT VT = Op.getValueType(); 1761 MVT SrcVT = Tmp1.getValueType(); 1762 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 1763 SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl); 1764 SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32); 1765 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 1766 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp); 1767} 1768 1769SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { 1770 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1771 MFI->setFrameAddressIsTaken(true); 1772 MVT VT = Op.getValueType(); 1773 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 1774 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1775 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 1776 ? ARM::R7 : ARM::R11; 1777 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 1778 while (Depth--) 1779 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0); 1780 return FrameAddr; 1781} 1782 1783SDValue 1784ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, 1785 SDValue Chain, 1786 SDValue Dst, SDValue Src, 1787 SDValue Size, unsigned Align, 1788 bool AlwaysInline, 1789 const Value *DstSV, uint64_t DstSVOff, 1790 const Value *SrcSV, uint64_t SrcSVOff){ 1791 // Do repeated 4-byte loads and stores. To be improved. 1792 // This requires 4-byte alignment. 1793 if ((Align & 3) != 0) 1794 return SDValue(); 1795 // This requires the copy size to be a constant, preferrably 1796 // within a subtarget-specific limit. 1797 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 1798 if (!ConstantSize) 1799 return SDValue(); 1800 uint64_t SizeVal = ConstantSize->getZExtValue(); 1801 if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold()) 1802 return SDValue(); 1803 1804 unsigned BytesLeft = SizeVal & 3; 1805 unsigned NumMemOps = SizeVal >> 2; 1806 unsigned EmittedNumMemOps = 0; 1807 MVT VT = MVT::i32; 1808 unsigned VTSize = 4; 1809 unsigned i = 0; 1810 const unsigned MAX_LOADS_IN_LDM = 6; 1811 SDValue TFOps[MAX_LOADS_IN_LDM]; 1812 SDValue Loads[MAX_LOADS_IN_LDM]; 1813 uint64_t SrcOff = 0, DstOff = 0; 1814 1815 // Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the 1816 // same number of stores. The loads and stores will get combined into 1817 // ldm/stm later on. 1818 while (EmittedNumMemOps < NumMemOps) { 1819 for (i = 0; 1820 i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) { 1821 Loads[i] = DAG.getLoad(VT, dl, Chain, 1822 DAG.getNode(ISD::ADD, dl, MVT::i32, Src, 1823 DAG.getConstant(SrcOff, MVT::i32)), 1824 SrcSV, SrcSVOff + SrcOff); 1825 TFOps[i] = Loads[i].getValue(1); 1826 SrcOff += VTSize; 1827 } 1828 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i); 1829 1830 for (i = 0; 1831 i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) { 1832 TFOps[i] = DAG.getStore(Chain, dl, Loads[i], 1833 DAG.getNode(ISD::ADD, dl, MVT::i32, Dst, 1834 DAG.getConstant(DstOff, MVT::i32)), 1835 DstSV, DstSVOff + DstOff); 1836 DstOff += VTSize; 1837 } 1838 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i); 1839 1840 EmittedNumMemOps += i; 1841 } 1842 1843 if (BytesLeft == 0) 1844 return Chain; 1845 1846 // Issue loads / stores for the trailing (1 - 3) bytes. 1847 unsigned BytesLeftSave = BytesLeft; 1848 i = 0; 1849 while (BytesLeft) { 1850 if (BytesLeft >= 2) { 1851 VT = MVT::i16; 1852 VTSize = 2; 1853 } else { 1854 VT = MVT::i8; 1855 VTSize = 1; 1856 } 1857 1858 Loads[i] = DAG.getLoad(VT, dl, Chain, 1859 DAG.getNode(ISD::ADD, dl, MVT::i32, Src, 1860 DAG.getConstant(SrcOff, MVT::i32)), 1861 SrcSV, SrcSVOff + SrcOff); 1862 TFOps[i] = Loads[i].getValue(1); 1863 ++i; 1864 SrcOff += VTSize; 1865 BytesLeft -= VTSize; 1866 } 1867 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i); 1868 1869 i = 0; 1870 BytesLeft = BytesLeftSave; 1871 while (BytesLeft) { 1872 if (BytesLeft >= 2) { 1873 VT = MVT::i16; 1874 VTSize = 2; 1875 } else { 1876 VT = MVT::i8; 1877 VTSize = 1; 1878 } 1879 1880 TFOps[i] = DAG.getStore(Chain, dl, Loads[i], 1881 DAG.getNode(ISD::ADD, dl, MVT::i32, Dst, 1882 DAG.getConstant(DstOff, MVT::i32)), 1883 DstSV, DstSVOff + DstOff); 1884 ++i; 1885 DstOff += VTSize; 1886 BytesLeft -= VTSize; 1887 } 1888 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i); 1889} 1890 1891static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) { 1892 SDValue Op = N->getOperand(0); 1893 DebugLoc dl = N->getDebugLoc(); 1894 if (N->getValueType(0) == MVT::f64) { 1895 // Turn i64->f64 into FMDRR. 1896 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 1897 DAG.getConstant(0, MVT::i32)); 1898 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 1899 DAG.getConstant(1, MVT::i32)); 1900 return DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi); 1901 } 1902 1903 // Turn f64->i64 into FMRRD. 1904 SDValue Cvt = DAG.getNode(ARMISD::FMRRD, dl, 1905 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 1906 1907 // Merge the pieces into a single i64 value. 1908 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 1909} 1910 1911/// getZeroVector - Returns a vector of specified type with all zero elements. 1912/// 1913static SDValue getZeroVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) { 1914 assert(VT.isVector() && "Expected a vector type"); 1915 1916 // Zero vectors are used to represent vector negation and in those cases 1917 // will be implemented with the NEON VNEG instruction. However, VNEG does 1918 // not support i64 elements, so sometimes the zero vectors will need to be 1919 // explicitly constructed. For those cases, and potentially other uses in 1920 // the future, always build zero vectors as <4 x i32> or <2 x i32> bitcasted 1921 // to their dest type. This ensures they get CSE'd. 1922 SDValue Vec; 1923 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 1924 if (VT.getSizeInBits() == 64) 1925 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); 1926 else 1927 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 1928 1929 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 1930} 1931 1932/// getOnesVector - Returns a vector of specified type with all bits set. 1933/// 1934static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) { 1935 assert(VT.isVector() && "Expected a vector type"); 1936 1937 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 1938 // type. This ensures they get CSE'd. 1939 SDValue Vec; 1940 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 1941 if (VT.getSizeInBits() == 64) 1942 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); 1943 else 1944 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 1945 1946 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 1947} 1948 1949static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 1950 const ARMSubtarget *ST) { 1951 MVT VT = N->getValueType(0); 1952 DebugLoc dl = N->getDebugLoc(); 1953 1954 // Lower vector shifts on NEON to use VSHL. 1955 if (VT.isVector()) { 1956 assert(ST->hasNEON() && "unexpected vector shift"); 1957 1958 // Left shifts translate directly to the vshiftu intrinsic. 1959 if (N->getOpcode() == ISD::SHL) 1960 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 1961 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 1962 N->getOperand(0), N->getOperand(1)); 1963 1964 assert((N->getOpcode() == ISD::SRA || 1965 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 1966 1967 // NEON uses the same intrinsics for both left and right shifts. For 1968 // right shifts, the shift amounts are negative, so negate the vector of 1969 // shift amounts. 1970 MVT ShiftVT = N->getOperand(1).getValueType(); 1971 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 1972 getZeroVector(ShiftVT, DAG, dl), 1973 N->getOperand(1)); 1974 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 1975 Intrinsic::arm_neon_vshifts : 1976 Intrinsic::arm_neon_vshiftu); 1977 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 1978 DAG.getConstant(vshiftInt, MVT::i32), 1979 N->getOperand(0), NegatedCount); 1980 } 1981 1982 assert(VT == MVT::i64 && 1983 (N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 1984 "Unknown shift to lower!"); 1985 1986 // We only lower SRA, SRL of 1 here, all others use generic lowering. 1987 if (!isa<ConstantSDNode>(N->getOperand(1)) || 1988 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 1989 return SDValue(); 1990 1991 // If we are in thumb mode, we don't have RRX. 1992 if (ST->isThumb1Only()) return SDValue(); 1993 1994 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 1995 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 1996 DAG.getConstant(0, MVT::i32)); 1997 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 1998 DAG.getConstant(1, MVT::i32)); 1999 2000 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 2001 // captures the result into a carry flag. 2002 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 2003 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1); 2004 2005 // The low part is an ARMISD::RRX operand, which shifts the carry in. 2006 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 2007 2008 // Merge the pieces into a single i64 value. 2009 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 2010} 2011 2012static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 2013 SDValue TmpOp0, TmpOp1; 2014 bool Invert = false; 2015 bool Swap = false; 2016 unsigned Opc = 0; 2017 2018 SDValue Op0 = Op.getOperand(0); 2019 SDValue Op1 = Op.getOperand(1); 2020 SDValue CC = Op.getOperand(2); 2021 MVT VT = Op.getValueType(); 2022 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 2023 DebugLoc dl = Op.getDebugLoc(); 2024 2025 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 2026 switch (SetCCOpcode) { 2027 default: llvm_unreachable("Illegal FP comparison"); break; 2028 case ISD::SETUNE: 2029 case ISD::SETNE: Invert = true; // Fallthrough 2030 case ISD::SETOEQ: 2031 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 2032 case ISD::SETOLT: 2033 case ISD::SETLT: Swap = true; // Fallthrough 2034 case ISD::SETOGT: 2035 case ISD::SETGT: Opc = ARMISD::VCGT; break; 2036 case ISD::SETOLE: 2037 case ISD::SETLE: Swap = true; // Fallthrough 2038 case ISD::SETOGE: 2039 case ISD::SETGE: Opc = ARMISD::VCGE; break; 2040 case ISD::SETUGE: Swap = true; // Fallthrough 2041 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 2042 case ISD::SETUGT: Swap = true; // Fallthrough 2043 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 2044 case ISD::SETUEQ: Invert = true; // Fallthrough 2045 case ISD::SETONE: 2046 // Expand this to (OLT | OGT). 2047 TmpOp0 = Op0; 2048 TmpOp1 = Op1; 2049 Opc = ISD::OR; 2050 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2051 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 2052 break; 2053 case ISD::SETUO: Invert = true; // Fallthrough 2054 case ISD::SETO: 2055 // Expand this to (OLT | OGE). 2056 TmpOp0 = Op0; 2057 TmpOp1 = Op1; 2058 Opc = ISD::OR; 2059 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2060 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 2061 break; 2062 } 2063 } else { 2064 // Integer comparisons. 2065 switch (SetCCOpcode) { 2066 default: llvm_unreachable("Illegal integer comparison"); break; 2067 case ISD::SETNE: Invert = true; 2068 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 2069 case ISD::SETLT: Swap = true; 2070 case ISD::SETGT: Opc = ARMISD::VCGT; break; 2071 case ISD::SETLE: Swap = true; 2072 case ISD::SETGE: Opc = ARMISD::VCGE; break; 2073 case ISD::SETULT: Swap = true; 2074 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 2075 case ISD::SETULE: Swap = true; 2076 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 2077 } 2078 2079 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 2080 if (Opc == ARMISD::VCEQ) { 2081 2082 SDValue AndOp; 2083 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 2084 AndOp = Op0; 2085 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 2086 AndOp = Op1; 2087 2088 // Ignore bitconvert. 2089 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT) 2090 AndOp = AndOp.getOperand(0); 2091 2092 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 2093 Opc = ARMISD::VTST; 2094 Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0)); 2095 Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1)); 2096 Invert = !Invert; 2097 } 2098 } 2099 } 2100 2101 if (Swap) 2102 std::swap(Op0, Op1); 2103 2104 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 2105 2106 if (Invert) 2107 Result = DAG.getNOT(dl, Result, VT); 2108 2109 return Result; 2110} 2111 2112/// isVMOVSplat - Check if the specified splat value corresponds to an immediate 2113/// VMOV instruction, and if so, return the constant being splatted. 2114static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef, 2115 unsigned SplatBitSize, SelectionDAG &DAG) { 2116 switch (SplatBitSize) { 2117 case 8: 2118 // Any 1-byte value is OK. 2119 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 2120 return DAG.getTargetConstant(SplatBits, MVT::i8); 2121 2122 case 16: 2123 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 2124 if ((SplatBits & ~0xff) == 0 || 2125 (SplatBits & ~0xff00) == 0) 2126 return DAG.getTargetConstant(SplatBits, MVT::i16); 2127 break; 2128 2129 case 32: 2130 // NEON's 32-bit VMOV supports splat values where: 2131 // * only one byte is nonzero, or 2132 // * the least significant byte is 0xff and the second byte is nonzero, or 2133 // * the least significant 2 bytes are 0xff and the third is nonzero. 2134 if ((SplatBits & ~0xff) == 0 || 2135 (SplatBits & ~0xff00) == 0 || 2136 (SplatBits & ~0xff0000) == 0 || 2137 (SplatBits & ~0xff000000) == 0) 2138 return DAG.getTargetConstant(SplatBits, MVT::i32); 2139 2140 if ((SplatBits & ~0xffff) == 0 && 2141 ((SplatBits | SplatUndef) & 0xff) == 0xff) 2142 return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32); 2143 2144 if ((SplatBits & ~0xffffff) == 0 && 2145 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) 2146 return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32); 2147 2148 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 2149 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 2150 // VMOV.I32. A (very) minor optimization would be to replicate the value 2151 // and fall through here to test for a valid 64-bit splat. But, then the 2152 // caller would also need to check and handle the change in size. 2153 break; 2154 2155 case 64: { 2156 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 2157 uint64_t BitMask = 0xff; 2158 uint64_t Val = 0; 2159 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 2160 if (((SplatBits | SplatUndef) & BitMask) == BitMask) 2161 Val |= BitMask; 2162 else if ((SplatBits & BitMask) != 0) 2163 return SDValue(); 2164 BitMask <<= 8; 2165 } 2166 return DAG.getTargetConstant(Val, MVT::i64); 2167 } 2168 2169 default: 2170 llvm_unreachable("unexpected size for isVMOVSplat"); 2171 break; 2172 } 2173 2174 return SDValue(); 2175} 2176 2177/// getVMOVImm - If this is a build_vector of constants which can be 2178/// formed by using a VMOV instruction of the specified element size, 2179/// return the constant being splatted. The ByteSize field indicates the 2180/// number of bytes of each element [1248]. 2181SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2182 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N); 2183 APInt SplatBits, SplatUndef; 2184 unsigned SplatBitSize; 2185 bool HasAnyUndefs; 2186 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 2187 HasAnyUndefs, ByteSize * 8)) 2188 return SDValue(); 2189 2190 if (SplatBitSize > ByteSize * 8) 2191 return SDValue(); 2192 2193 return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 2194 SplatBitSize, DAG); 2195} 2196 2197/// isVREVMask - Check if a vector shuffle corresponds to a VREV 2198/// instruction with the specified blocksize. (The order of the elements 2199/// within each block of the vector is reversed.) 2200bool ARM::isVREVMask(ShuffleVectorSDNode *N, unsigned BlockSize) { 2201 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 2202 "Only possible block sizes for VREV are: 16, 32, 64"); 2203 2204 MVT VT = N->getValueType(0); 2205 unsigned NumElts = VT.getVectorNumElements(); 2206 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2207 unsigned BlockElts = N->getMaskElt(0) + 1; 2208 2209 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 2210 return false; 2211 2212 for (unsigned i = 0; i < NumElts; ++i) { 2213 if ((unsigned) N->getMaskElt(i) != 2214 (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 2215 return false; 2216 } 2217 2218 return true; 2219} 2220 2221static SDValue BuildSplat(SDValue Val, MVT VT, SelectionDAG &DAG, DebugLoc dl) { 2222 // Canonicalize all-zeros and all-ones vectors. 2223 ConstantSDNode *ConstVal = dyn_cast<ConstantSDNode>(Val.getNode()); 2224 if (ConstVal->isNullValue()) 2225 return getZeroVector(VT, DAG, dl); 2226 if (ConstVal->isAllOnesValue()) 2227 return getOnesVector(VT, DAG, dl); 2228 2229 MVT CanonicalVT; 2230 if (VT.is64BitVector()) { 2231 switch (Val.getValueType().getSizeInBits()) { 2232 case 8: CanonicalVT = MVT::v8i8; break; 2233 case 16: CanonicalVT = MVT::v4i16; break; 2234 case 32: CanonicalVT = MVT::v2i32; break; 2235 case 64: CanonicalVT = MVT::v1i64; break; 2236 default: llvm_unreachable("unexpected splat element type"); break; 2237 } 2238 } else { 2239 assert(VT.is128BitVector() && "unknown splat vector size"); 2240 switch (Val.getValueType().getSizeInBits()) { 2241 case 8: CanonicalVT = MVT::v16i8; break; 2242 case 16: CanonicalVT = MVT::v8i16; break; 2243 case 32: CanonicalVT = MVT::v4i32; break; 2244 case 64: CanonicalVT = MVT::v2i64; break; 2245 default: llvm_unreachable("unexpected splat element type"); break; 2246 } 2247 } 2248 2249 // Build a canonical splat for this value. 2250 SmallVector<SDValue, 8> Ops; 2251 Ops.assign(CanonicalVT.getVectorNumElements(), Val); 2252 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0], 2253 Ops.size()); 2254 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res); 2255} 2256 2257// If this is a case we can't handle, return null and let the default 2258// expansion code take care of it. 2259static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { 2260 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 2261 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 2262 DebugLoc dl = Op.getDebugLoc(); 2263 MVT VT = Op.getValueType(); 2264 2265 APInt SplatBits, SplatUndef; 2266 unsigned SplatBitSize; 2267 bool HasAnyUndefs; 2268 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 2269 SDValue Val = isVMOVSplat(SplatBits.getZExtValue(), 2270 SplatUndef.getZExtValue(), SplatBitSize, DAG); 2271 if (Val.getNode()) 2272 return BuildSplat(Val, VT, DAG, dl); 2273 } 2274 2275 // If there are only 2 elements in a 128-bit vector, insert them into an 2276 // undef vector. This handles the common case for 128-bit vector argument 2277 // passing, where the insertions should be translated to subreg accesses 2278 // with no real instructions. 2279 if (VT.is128BitVector() && Op.getNumOperands() == 2) { 2280 SDValue Val = DAG.getUNDEF(VT); 2281 SDValue Op0 = Op.getOperand(0); 2282 SDValue Op1 = Op.getOperand(1); 2283 if (Op0.getOpcode() != ISD::UNDEF) 2284 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op0, 2285 DAG.getIntPtrConstant(0)); 2286 if (Op1.getOpcode() != ISD::UNDEF) 2287 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op1, 2288 DAG.getIntPtrConstant(1)); 2289 return Val; 2290 } 2291 2292 return SDValue(); 2293} 2294 2295static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 2296 return Op; 2297} 2298 2299static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { 2300 return Op; 2301} 2302 2303static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 2304 MVT VT = Op.getValueType(); 2305 DebugLoc dl = Op.getDebugLoc(); 2306 assert((VT == MVT::i8 || VT == MVT::i16) && 2307 "unexpected type for custom-lowering vector extract"); 2308 SDValue Vec = Op.getOperand(0); 2309 SDValue Lane = Op.getOperand(1); 2310 Op = DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 2311 Op = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Op, DAG.getValueType(VT)); 2312 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 2313} 2314 2315static SDValue LowerCONCAT_VECTORS(SDValue Op) { 2316 if (Op.getValueType().is128BitVector() && Op.getNumOperands() == 2) 2317 return Op; 2318 return SDValue(); 2319} 2320 2321SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { 2322 switch (Op.getOpcode()) { 2323 default: llvm_unreachable("Don't know how to custom lower this!"); 2324 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2325 case ISD::GlobalAddress: 2326 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 2327 LowerGlobalAddressELF(Op, DAG); 2328 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2329 case ISD::CALL: return LowerCALL(Op, DAG); 2330 case ISD::RET: return LowerRET(Op, DAG); 2331 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, Subtarget); 2332 case ISD::BR_CC: return LowerBR_CC(Op, DAG, Subtarget); 2333 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 2334 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex); 2335 case ISD::SINT_TO_FP: 2336 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 2337 case ISD::FP_TO_SINT: 2338 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 2339 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 2340 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG); 2341 case ISD::RETURNADDR: break; 2342 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 2343 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 2344 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2345 case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG); 2346 case ISD::SHL: 2347 case ISD::SRL: 2348 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 2349 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 2350 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 2351 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2352 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 2353 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 2354 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op); 2355 } 2356 return SDValue(); 2357} 2358 2359/// ReplaceNodeResults - Replace the results of node with an illegal result 2360/// type with new values built out of custom code. 2361void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 2362 SmallVectorImpl<SDValue>&Results, 2363 SelectionDAG &DAG) { 2364 switch (N->getOpcode()) { 2365 default: 2366 llvm_unreachable("Don't know how to custom expand this!"); 2367 return; 2368 case ISD::BIT_CONVERT: 2369 Results.push_back(ExpandBIT_CONVERT(N, DAG)); 2370 return; 2371 case ISD::SRL: 2372 case ISD::SRA: { 2373 SDValue Res = LowerShift(N, DAG, Subtarget); 2374 if (Res.getNode()) 2375 Results.push_back(Res); 2376 return; 2377 } 2378 } 2379} 2380 2381//===----------------------------------------------------------------------===// 2382// ARM Scheduler Hooks 2383//===----------------------------------------------------------------------===// 2384 2385MachineBasicBlock * 2386ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 2387 MachineBasicBlock *BB) const { 2388 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2389 DebugLoc dl = MI->getDebugLoc(); 2390 switch (MI->getOpcode()) { 2391 default: assert(false && "Unexpected instr type to insert"); 2392 case ARM::tMOVCCr: { 2393 // To "insert" a SELECT_CC instruction, we actually have to insert the 2394 // diamond control-flow pattern. The incoming instruction knows the 2395 // destination vreg to set, the condition code register to branch on, the 2396 // true/false values to select between, and a branch opcode to use. 2397 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2398 MachineFunction::iterator It = BB; 2399 ++It; 2400 2401 // thisMBB: 2402 // ... 2403 // TrueVal = ... 2404 // cmpTY ccX, r1, r2 2405 // bCC copy1MBB 2406 // fallthrough --> copy0MBB 2407 MachineBasicBlock *thisMBB = BB; 2408 MachineFunction *F = BB->getParent(); 2409 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 2410 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 2411 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 2412 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 2413 F->insert(It, copy0MBB); 2414 F->insert(It, sinkMBB); 2415 // Update machine-CFG edges by first adding all successors of the current 2416 // block to the new block which will contain the Phi node for the select. 2417 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 2418 e = BB->succ_end(); i != e; ++i) 2419 sinkMBB->addSuccessor(*i); 2420 // Next, remove all successors of the current block, and add the true 2421 // and fallthrough blocks as its successors. 2422 while(!BB->succ_empty()) 2423 BB->removeSuccessor(BB->succ_begin()); 2424 BB->addSuccessor(copy0MBB); 2425 BB->addSuccessor(sinkMBB); 2426 2427 // copy0MBB: 2428 // %FalseValue = ... 2429 // # fallthrough to sinkMBB 2430 BB = copy0MBB; 2431 2432 // Update machine-CFG edges 2433 BB->addSuccessor(sinkMBB); 2434 2435 // sinkMBB: 2436 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 2437 // ... 2438 BB = sinkMBB; 2439 BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg()) 2440 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 2441 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 2442 2443 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 2444 return BB; 2445 } 2446 } 2447} 2448 2449//===----------------------------------------------------------------------===// 2450// ARM Optimization Hooks 2451//===----------------------------------------------------------------------===// 2452 2453static 2454SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 2455 TargetLowering::DAGCombinerInfo &DCI) { 2456 SelectionDAG &DAG = DCI.DAG; 2457 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2458 MVT VT = N->getValueType(0); 2459 unsigned Opc = N->getOpcode(); 2460 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 2461 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 2462 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 2463 ISD::CondCode CC = ISD::SETCC_INVALID; 2464 2465 if (isSlctCC) { 2466 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 2467 } else { 2468 SDValue CCOp = Slct.getOperand(0); 2469 if (CCOp.getOpcode() == ISD::SETCC) 2470 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 2471 } 2472 2473 bool DoXform = false; 2474 bool InvCC = false; 2475 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 2476 "Bad input!"); 2477 2478 if (LHS.getOpcode() == ISD::Constant && 2479 cast<ConstantSDNode>(LHS)->isNullValue()) { 2480 DoXform = true; 2481 } else if (CC != ISD::SETCC_INVALID && 2482 RHS.getOpcode() == ISD::Constant && 2483 cast<ConstantSDNode>(RHS)->isNullValue()) { 2484 std::swap(LHS, RHS); 2485 SDValue Op0 = Slct.getOperand(0); 2486 MVT OpVT = isSlctCC ? Op0.getValueType() : 2487 Op0.getOperand(0).getValueType(); 2488 bool isInt = OpVT.isInteger(); 2489 CC = ISD::getSetCCInverse(CC, isInt); 2490 2491 if (!TLI.isCondCodeLegal(CC, OpVT)) 2492 return SDValue(); // Inverse operator isn't legal. 2493 2494 DoXform = true; 2495 InvCC = true; 2496 } 2497 2498 if (DoXform) { 2499 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 2500 if (isSlctCC) 2501 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 2502 Slct.getOperand(0), Slct.getOperand(1), CC); 2503 SDValue CCOp = Slct.getOperand(0); 2504 if (InvCC) 2505 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 2506 CCOp.getOperand(0), CCOp.getOperand(1), CC); 2507 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 2508 CCOp, OtherOp, Result); 2509 } 2510 return SDValue(); 2511} 2512 2513/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 2514static SDValue PerformADDCombine(SDNode *N, 2515 TargetLowering::DAGCombinerInfo &DCI) { 2516 // added by evan in r37685 with no testcase. 2517 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2518 2519 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 2520 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 2521 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 2522 if (Result.getNode()) return Result; 2523 } 2524 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 2525 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 2526 if (Result.getNode()) return Result; 2527 } 2528 2529 return SDValue(); 2530} 2531 2532/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 2533static SDValue PerformSUBCombine(SDNode *N, 2534 TargetLowering::DAGCombinerInfo &DCI) { 2535 // added by evan in r37685 with no testcase. 2536 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2537 2538 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 2539 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 2540 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 2541 if (Result.getNode()) return Result; 2542 } 2543 2544 return SDValue(); 2545} 2546 2547 2548/// PerformFMRRDCombine - Target-specific dag combine xforms for ARMISD::FMRRD. 2549static SDValue PerformFMRRDCombine(SDNode *N, 2550 TargetLowering::DAGCombinerInfo &DCI) { 2551 // fmrrd(fmdrr x, y) -> x,y 2552 SDValue InDouble = N->getOperand(0); 2553 if (InDouble.getOpcode() == ARMISD::FMDRR) 2554 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 2555 return SDValue(); 2556} 2557 2558/// getVShiftImm - Check if this is a valid build_vector for the immediate 2559/// operand of a vector shift operation, where all the elements of the 2560/// build_vector must have the same constant integer value. 2561static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 2562 // Ignore bit_converts. 2563 while (Op.getOpcode() == ISD::BIT_CONVERT) 2564 Op = Op.getOperand(0); 2565 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 2566 APInt SplatBits, SplatUndef; 2567 unsigned SplatBitSize; 2568 bool HasAnyUndefs; 2569 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 2570 HasAnyUndefs, ElementBits) || 2571 SplatBitSize > ElementBits) 2572 return false; 2573 Cnt = SplatBits.getSExtValue(); 2574 return true; 2575} 2576 2577/// isVShiftLImm - Check if this is a valid build_vector for the immediate 2578/// operand of a vector shift left operation. That value must be in the range: 2579/// 0 <= Value < ElementBits for a left shift; or 2580/// 0 <= Value <= ElementBits for a long left shift. 2581static bool isVShiftLImm(SDValue Op, MVT VT, bool isLong, int64_t &Cnt) { 2582 assert(VT.isVector() && "vector shift count is not a vector type"); 2583 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 2584 if (! getVShiftImm(Op, ElementBits, Cnt)) 2585 return false; 2586 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 2587} 2588 2589/// isVShiftRImm - Check if this is a valid build_vector for the immediate 2590/// operand of a vector shift right operation. For a shift opcode, the value 2591/// is positive, but for an intrinsic the value count must be negative. The 2592/// absolute value must be in the range: 2593/// 1 <= |Value| <= ElementBits for a right shift; or 2594/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 2595static bool isVShiftRImm(SDValue Op, MVT VT, bool isNarrow, bool isIntrinsic, 2596 int64_t &Cnt) { 2597 assert(VT.isVector() && "vector shift count is not a vector type"); 2598 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 2599 if (! getVShiftImm(Op, ElementBits, Cnt)) 2600 return false; 2601 if (isIntrinsic) 2602 Cnt = -Cnt; 2603 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 2604} 2605 2606/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 2607static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 2608 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 2609 switch (IntNo) { 2610 default: 2611 // Don't do anything for most intrinsics. 2612 break; 2613 2614 // Vector shifts: check for immediate versions and lower them. 2615 // Note: This is done during DAG combining instead of DAG legalizing because 2616 // the build_vectors for 64-bit vector element shift counts are generally 2617 // not legal, and it is hard to see their values after they get legalized to 2618 // loads from a constant pool. 2619 case Intrinsic::arm_neon_vshifts: 2620 case Intrinsic::arm_neon_vshiftu: 2621 case Intrinsic::arm_neon_vshiftls: 2622 case Intrinsic::arm_neon_vshiftlu: 2623 case Intrinsic::arm_neon_vshiftn: 2624 case Intrinsic::arm_neon_vrshifts: 2625 case Intrinsic::arm_neon_vrshiftu: 2626 case Intrinsic::arm_neon_vrshiftn: 2627 case Intrinsic::arm_neon_vqshifts: 2628 case Intrinsic::arm_neon_vqshiftu: 2629 case Intrinsic::arm_neon_vqshiftsu: 2630 case Intrinsic::arm_neon_vqshiftns: 2631 case Intrinsic::arm_neon_vqshiftnu: 2632 case Intrinsic::arm_neon_vqshiftnsu: 2633 case Intrinsic::arm_neon_vqrshiftns: 2634 case Intrinsic::arm_neon_vqrshiftnu: 2635 case Intrinsic::arm_neon_vqrshiftnsu: { 2636 MVT VT = N->getOperand(1).getValueType(); 2637 int64_t Cnt; 2638 unsigned VShiftOpc = 0; 2639 2640 switch (IntNo) { 2641 case Intrinsic::arm_neon_vshifts: 2642 case Intrinsic::arm_neon_vshiftu: 2643 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 2644 VShiftOpc = ARMISD::VSHL; 2645 break; 2646 } 2647 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 2648 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 2649 ARMISD::VSHRs : ARMISD::VSHRu); 2650 break; 2651 } 2652 return SDValue(); 2653 2654 case Intrinsic::arm_neon_vshiftls: 2655 case Intrinsic::arm_neon_vshiftlu: 2656 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 2657 break; 2658 llvm_unreachable("invalid shift count for vshll intrinsic"); 2659 2660 case Intrinsic::arm_neon_vrshifts: 2661 case Intrinsic::arm_neon_vrshiftu: 2662 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 2663 break; 2664 return SDValue(); 2665 2666 case Intrinsic::arm_neon_vqshifts: 2667 case Intrinsic::arm_neon_vqshiftu: 2668 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 2669 break; 2670 return SDValue(); 2671 2672 case Intrinsic::arm_neon_vqshiftsu: 2673 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 2674 break; 2675 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 2676 2677 case Intrinsic::arm_neon_vshiftn: 2678 case Intrinsic::arm_neon_vrshiftn: 2679 case Intrinsic::arm_neon_vqshiftns: 2680 case Intrinsic::arm_neon_vqshiftnu: 2681 case Intrinsic::arm_neon_vqshiftnsu: 2682 case Intrinsic::arm_neon_vqrshiftns: 2683 case Intrinsic::arm_neon_vqrshiftnu: 2684 case Intrinsic::arm_neon_vqrshiftnsu: 2685 // Narrowing shifts require an immediate right shift. 2686 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 2687 break; 2688 llvm_unreachable("invalid shift count for narrowing vector shift intrinsic"); 2689 2690 default: 2691 llvm_unreachable("unhandled vector shift"); 2692 } 2693 2694 switch (IntNo) { 2695 case Intrinsic::arm_neon_vshifts: 2696 case Intrinsic::arm_neon_vshiftu: 2697 // Opcode already set above. 2698 break; 2699 case Intrinsic::arm_neon_vshiftls: 2700 case Intrinsic::arm_neon_vshiftlu: 2701 if (Cnt == VT.getVectorElementType().getSizeInBits()) 2702 VShiftOpc = ARMISD::VSHLLi; 2703 else 2704 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 2705 ARMISD::VSHLLs : ARMISD::VSHLLu); 2706 break; 2707 case Intrinsic::arm_neon_vshiftn: 2708 VShiftOpc = ARMISD::VSHRN; break; 2709 case Intrinsic::arm_neon_vrshifts: 2710 VShiftOpc = ARMISD::VRSHRs; break; 2711 case Intrinsic::arm_neon_vrshiftu: 2712 VShiftOpc = ARMISD::VRSHRu; break; 2713 case Intrinsic::arm_neon_vrshiftn: 2714 VShiftOpc = ARMISD::VRSHRN; break; 2715 case Intrinsic::arm_neon_vqshifts: 2716 VShiftOpc = ARMISD::VQSHLs; break; 2717 case Intrinsic::arm_neon_vqshiftu: 2718 VShiftOpc = ARMISD::VQSHLu; break; 2719 case Intrinsic::arm_neon_vqshiftsu: 2720 VShiftOpc = ARMISD::VQSHLsu; break; 2721 case Intrinsic::arm_neon_vqshiftns: 2722 VShiftOpc = ARMISD::VQSHRNs; break; 2723 case Intrinsic::arm_neon_vqshiftnu: 2724 VShiftOpc = ARMISD::VQSHRNu; break; 2725 case Intrinsic::arm_neon_vqshiftnsu: 2726 VShiftOpc = ARMISD::VQSHRNsu; break; 2727 case Intrinsic::arm_neon_vqrshiftns: 2728 VShiftOpc = ARMISD::VQRSHRNs; break; 2729 case Intrinsic::arm_neon_vqrshiftnu: 2730 VShiftOpc = ARMISD::VQRSHRNu; break; 2731 case Intrinsic::arm_neon_vqrshiftnsu: 2732 VShiftOpc = ARMISD::VQRSHRNsu; break; 2733 } 2734 2735 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 2736 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 2737 } 2738 2739 case Intrinsic::arm_neon_vshiftins: { 2740 MVT VT = N->getOperand(1).getValueType(); 2741 int64_t Cnt; 2742 unsigned VShiftOpc = 0; 2743 2744 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 2745 VShiftOpc = ARMISD::VSLI; 2746 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 2747 VShiftOpc = ARMISD::VSRI; 2748 else { 2749 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 2750 } 2751 2752 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 2753 N->getOperand(1), N->getOperand(2), 2754 DAG.getConstant(Cnt, MVT::i32)); 2755 } 2756 2757 case Intrinsic::arm_neon_vqrshifts: 2758 case Intrinsic::arm_neon_vqrshiftu: 2759 // No immediate versions of these to check for. 2760 break; 2761 } 2762 2763 return SDValue(); 2764} 2765 2766/// PerformShiftCombine - Checks for immediate versions of vector shifts and 2767/// lowers them. As with the vector shift intrinsics, this is done during DAG 2768/// combining instead of DAG legalizing because the build_vectors for 64-bit 2769/// vector element shift counts are generally not legal, and it is hard to see 2770/// their values after they get legalized to loads from a constant pool. 2771static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 2772 const ARMSubtarget *ST) { 2773 MVT VT = N->getValueType(0); 2774 2775 // Nothing to be done for scalar shifts. 2776 if (! VT.isVector()) 2777 return SDValue(); 2778 2779 assert(ST->hasNEON() && "unexpected vector shift"); 2780 int64_t Cnt; 2781 2782 switch (N->getOpcode()) { 2783 default: llvm_unreachable("unexpected shift opcode"); 2784 2785 case ISD::SHL: 2786 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 2787 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 2788 DAG.getConstant(Cnt, MVT::i32)); 2789 break; 2790 2791 case ISD::SRA: 2792 case ISD::SRL: 2793 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 2794 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 2795 ARMISD::VSHRs : ARMISD::VSHRu); 2796 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 2797 DAG.getConstant(Cnt, MVT::i32)); 2798 } 2799 } 2800 return SDValue(); 2801} 2802 2803/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 2804/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 2805static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 2806 const ARMSubtarget *ST) { 2807 SDValue N0 = N->getOperand(0); 2808 2809 // Check for sign- and zero-extensions of vector extract operations of 8- 2810 // and 16-bit vector elements. NEON supports these directly. They are 2811 // handled during DAG combining because type legalization will promote them 2812 // to 32-bit types and it is messy to recognize the operations after that. 2813 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 2814 SDValue Vec = N0.getOperand(0); 2815 SDValue Lane = N0.getOperand(1); 2816 MVT VT = N->getValueType(0); 2817 MVT EltVT = N0.getValueType(); 2818 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2819 2820 if (VT == MVT::i32 && 2821 (EltVT == MVT::i8 || EltVT == MVT::i16) && 2822 TLI.isTypeLegal(Vec.getValueType())) { 2823 2824 unsigned Opc = 0; 2825 switch (N->getOpcode()) { 2826 default: llvm_unreachable("unexpected opcode"); 2827 case ISD::SIGN_EXTEND: 2828 Opc = ARMISD::VGETLANEs; 2829 break; 2830 case ISD::ZERO_EXTEND: 2831 case ISD::ANY_EXTEND: 2832 Opc = ARMISD::VGETLANEu; 2833 break; 2834 } 2835 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 2836 } 2837 } 2838 2839 return SDValue(); 2840} 2841 2842SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 2843 DAGCombinerInfo &DCI) const { 2844 switch (N->getOpcode()) { 2845 default: break; 2846 case ISD::ADD: return PerformADDCombine(N, DCI); 2847 case ISD::SUB: return PerformSUBCombine(N, DCI); 2848 case ARMISD::FMRRD: return PerformFMRRDCombine(N, DCI); 2849 case ISD::INTRINSIC_WO_CHAIN: 2850 return PerformIntrinsicCombine(N, DCI.DAG); 2851 case ISD::SHL: 2852 case ISD::SRA: 2853 case ISD::SRL: 2854 return PerformShiftCombine(N, DCI.DAG, Subtarget); 2855 case ISD::SIGN_EXTEND: 2856 case ISD::ZERO_EXTEND: 2857 case ISD::ANY_EXTEND: 2858 return PerformExtendCombine(N, DCI.DAG, Subtarget); 2859 } 2860 return SDValue(); 2861} 2862 2863/// isLegalAddressImmediate - Return true if the integer value can be used 2864/// as the offset of the target addressing mode for load / store of the 2865/// given type. 2866static bool isLegalAddressImmediate(int64_t V, MVT VT, 2867 const ARMSubtarget *Subtarget) { 2868 if (V == 0) 2869 return true; 2870 2871 if (!VT.isSimple()) 2872 return false; 2873 2874 if (Subtarget->isThumb()) { // FIXME for thumb2 2875 if (V < 0) 2876 return false; 2877 2878 unsigned Scale = 1; 2879 switch (VT.getSimpleVT()) { 2880 default: return false; 2881 case MVT::i1: 2882 case MVT::i8: 2883 // Scale == 1; 2884 break; 2885 case MVT::i16: 2886 // Scale == 2; 2887 Scale = 2; 2888 break; 2889 case MVT::i32: 2890 // Scale == 4; 2891 Scale = 4; 2892 break; 2893 } 2894 2895 if ((V & (Scale - 1)) != 0) 2896 return false; 2897 V /= Scale; 2898 return V == (V & ((1LL << 5) - 1)); 2899 } 2900 2901 if (V < 0) 2902 V = - V; 2903 switch (VT.getSimpleVT()) { 2904 default: return false; 2905 case MVT::i1: 2906 case MVT::i8: 2907 case MVT::i32: 2908 // +- imm12 2909 return V == (V & ((1LL << 12) - 1)); 2910 case MVT::i16: 2911 // +- imm8 2912 return V == (V & ((1LL << 8) - 1)); 2913 case MVT::f32: 2914 case MVT::f64: 2915 if (!Subtarget->hasVFP2()) 2916 return false; 2917 if ((V & 3) != 0) 2918 return false; 2919 V >>= 2; 2920 return V == (V & ((1LL << 8) - 1)); 2921 } 2922} 2923 2924/// isLegalAddressingMode - Return true if the addressing mode represented 2925/// by AM is legal for this target, for a load/store of the specified type. 2926bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 2927 const Type *Ty) const { 2928 MVT VT = getValueType(Ty, true); 2929 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 2930 return false; 2931 2932 // Can never fold addr of global into load/store. 2933 if (AM.BaseGV) 2934 return false; 2935 2936 switch (AM.Scale) { 2937 case 0: // no scale reg, must be "r+i" or "r", or "i". 2938 break; 2939 case 1: 2940 if (Subtarget->isThumb()) // FIXME for thumb2 2941 return false; 2942 // FALL THROUGH. 2943 default: 2944 // ARM doesn't support any R+R*scale+imm addr modes. 2945 if (AM.BaseOffs) 2946 return false; 2947 2948 if (!VT.isSimple()) 2949 return false; 2950 2951 int Scale = AM.Scale; 2952 switch (VT.getSimpleVT()) { 2953 default: return false; 2954 case MVT::i1: 2955 case MVT::i8: 2956 case MVT::i32: 2957 case MVT::i64: 2958 // This assumes i64 is legalized to a pair of i32. If not (i.e. 2959 // ldrd / strd are used, then its address mode is same as i16. 2960 // r + r 2961 if (Scale < 0) Scale = -Scale; 2962 if (Scale == 1) 2963 return true; 2964 // r + r << imm 2965 return isPowerOf2_32(Scale & ~1); 2966 case MVT::i16: 2967 // r + r 2968 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 2969 return true; 2970 return false; 2971 2972 case MVT::isVoid: 2973 // Note, we allow "void" uses (basically, uses that aren't loads or 2974 // stores), because arm allows folding a scale into many arithmetic 2975 // operations. This should be made more precise and revisited later. 2976 2977 // Allow r << imm, but the imm has to be a multiple of two. 2978 if (AM.Scale & 1) return false; 2979 return isPowerOf2_32(AM.Scale); 2980 } 2981 break; 2982 } 2983 return true; 2984} 2985 2986static bool getARMIndexedAddressParts(SDNode *Ptr, MVT VT, 2987 bool isSEXTLoad, SDValue &Base, 2988 SDValue &Offset, bool &isInc, 2989 SelectionDAG &DAG) { 2990 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 2991 return false; 2992 2993 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 2994 // AddressingMode 3 2995 Base = Ptr->getOperand(0); 2996 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 2997 int RHSC = (int)RHS->getZExtValue(); 2998 if (RHSC < 0 && RHSC > -256) { 2999 assert(Ptr->getOpcode() == ISD::ADD); 3000 isInc = false; 3001 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 3002 return true; 3003 } 3004 } 3005 isInc = (Ptr->getOpcode() == ISD::ADD); 3006 Offset = Ptr->getOperand(1); 3007 return true; 3008 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 3009 // AddressingMode 2 3010 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 3011 int RHSC = (int)RHS->getZExtValue(); 3012 if (RHSC < 0 && RHSC > -0x1000) { 3013 assert(Ptr->getOpcode() == ISD::ADD); 3014 isInc = false; 3015 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 3016 Base = Ptr->getOperand(0); 3017 return true; 3018 } 3019 } 3020 3021 if (Ptr->getOpcode() == ISD::ADD) { 3022 isInc = true; 3023 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 3024 if (ShOpcVal != ARM_AM::no_shift) { 3025 Base = Ptr->getOperand(1); 3026 Offset = Ptr->getOperand(0); 3027 } else { 3028 Base = Ptr->getOperand(0); 3029 Offset = Ptr->getOperand(1); 3030 } 3031 return true; 3032 } 3033 3034 isInc = (Ptr->getOpcode() == ISD::ADD); 3035 Base = Ptr->getOperand(0); 3036 Offset = Ptr->getOperand(1); 3037 return true; 3038 } 3039 3040 // FIXME: Use FLDM / FSTM to emulate indexed FP load / store. 3041 return false; 3042} 3043 3044static bool getT2IndexedAddressParts(SDNode *Ptr, MVT VT, 3045 bool isSEXTLoad, SDValue &Base, 3046 SDValue &Offset, bool &isInc, 3047 SelectionDAG &DAG) { 3048 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 3049 return false; 3050 3051 Base = Ptr->getOperand(0); 3052 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 3053 int RHSC = (int)RHS->getZExtValue(); 3054 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 3055 assert(Ptr->getOpcode() == ISD::ADD); 3056 isInc = false; 3057 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 3058 return true; 3059 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 3060 isInc = Ptr->getOpcode() == ISD::ADD; 3061 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 3062 return true; 3063 } 3064 } 3065 3066 return false; 3067} 3068 3069/// getPreIndexedAddressParts - returns true by value, base pointer and 3070/// offset pointer and addressing mode by reference if the node's address 3071/// can be legally represented as pre-indexed load / store address. 3072bool 3073ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 3074 SDValue &Offset, 3075 ISD::MemIndexedMode &AM, 3076 SelectionDAG &DAG) const { 3077 if (Subtarget->isThumb1Only()) 3078 return false; 3079 3080 MVT VT; 3081 SDValue Ptr; 3082 bool isSEXTLoad = false; 3083 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 3084 Ptr = LD->getBasePtr(); 3085 VT = LD->getMemoryVT(); 3086 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 3087 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 3088 Ptr = ST->getBasePtr(); 3089 VT = ST->getMemoryVT(); 3090 } else 3091 return false; 3092 3093 bool isInc; 3094 bool isLegal = false; 3095 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 3096 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 3097 Offset, isInc, DAG); 3098 else 3099 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 3100 Offset, isInc, DAG); 3101 if (!isLegal) 3102 return false; 3103 3104 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 3105 return true; 3106} 3107 3108/// getPostIndexedAddressParts - returns true by value, base pointer and 3109/// offset pointer and addressing mode by reference if this node can be 3110/// combined with a load / store to form a post-indexed load / store. 3111bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 3112 SDValue &Base, 3113 SDValue &Offset, 3114 ISD::MemIndexedMode &AM, 3115 SelectionDAG &DAG) const { 3116 if (Subtarget->isThumb1Only()) 3117 return false; 3118 3119 MVT VT; 3120 SDValue Ptr; 3121 bool isSEXTLoad = false; 3122 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 3123 VT = LD->getMemoryVT(); 3124 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 3125 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 3126 VT = ST->getMemoryVT(); 3127 } else 3128 return false; 3129 3130 bool isInc; 3131 bool isLegal = false; 3132 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 3133 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 3134 isInc, DAG); 3135 else 3136 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 3137 isInc, DAG); 3138 if (!isLegal) 3139 return false; 3140 3141 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 3142 return true; 3143} 3144 3145void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 3146 const APInt &Mask, 3147 APInt &KnownZero, 3148 APInt &KnownOne, 3149 const SelectionDAG &DAG, 3150 unsigned Depth) const { 3151 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 3152 switch (Op.getOpcode()) { 3153 default: break; 3154 case ARMISD::CMOV: { 3155 // Bits are known zero/one if known on the LHS and RHS. 3156 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 3157 if (KnownZero == 0 && KnownOne == 0) return; 3158 3159 APInt KnownZeroRHS, KnownOneRHS; 3160 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 3161 KnownZeroRHS, KnownOneRHS, Depth+1); 3162 KnownZero &= KnownZeroRHS; 3163 KnownOne &= KnownOneRHS; 3164 return; 3165 } 3166 } 3167} 3168 3169//===----------------------------------------------------------------------===// 3170// ARM Inline Assembly Support 3171//===----------------------------------------------------------------------===// 3172 3173/// getConstraintType - Given a constraint letter, return the type of 3174/// constraint it is for this target. 3175ARMTargetLowering::ConstraintType 3176ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 3177 if (Constraint.size() == 1) { 3178 switch (Constraint[0]) { 3179 default: break; 3180 case 'l': return C_RegisterClass; 3181 case 'w': return C_RegisterClass; 3182 } 3183 } 3184 return TargetLowering::getConstraintType(Constraint); 3185} 3186 3187std::pair<unsigned, const TargetRegisterClass*> 3188ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 3189 MVT VT) const { 3190 if (Constraint.size() == 1) { 3191 // GCC RS6000 Constraint Letters 3192 switch (Constraint[0]) { 3193 case 'l': 3194 if (Subtarget->isThumb1Only()) 3195 return std::make_pair(0U, ARM::tGPRRegisterClass); 3196 else 3197 return std::make_pair(0U, ARM::GPRRegisterClass); 3198 case 'r': 3199 return std::make_pair(0U, ARM::GPRRegisterClass); 3200 case 'w': 3201 if (VT == MVT::f32) 3202 return std::make_pair(0U, ARM::SPRRegisterClass); 3203 if (VT == MVT::f64) 3204 return std::make_pair(0U, ARM::DPRRegisterClass); 3205 break; 3206 } 3207 } 3208 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3209} 3210 3211std::vector<unsigned> ARMTargetLowering:: 3212getRegClassForInlineAsmConstraint(const std::string &Constraint, 3213 MVT VT) const { 3214 if (Constraint.size() != 1) 3215 return std::vector<unsigned>(); 3216 3217 switch (Constraint[0]) { // GCC ARM Constraint Letters 3218 default: break; 3219 case 'l': 3220 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 3221 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 3222 0); 3223 case 'r': 3224 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 3225 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 3226 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 3227 ARM::R12, ARM::LR, 0); 3228 case 'w': 3229 if (VT == MVT::f32) 3230 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 3231 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 3232 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 3233 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 3234 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 3235 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 3236 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 3237 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 3238 if (VT == MVT::f64) 3239 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 3240 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 3241 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 3242 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 3243 break; 3244 } 3245 3246 return std::vector<unsigned>(); 3247} 3248 3249/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 3250/// vector. If it is invalid, don't add anything to Ops. 3251void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 3252 char Constraint, 3253 bool hasMemory, 3254 std::vector<SDValue>&Ops, 3255 SelectionDAG &DAG) const { 3256 SDValue Result(0, 0); 3257 3258 switch (Constraint) { 3259 default: break; 3260 case 'I': case 'J': case 'K': case 'L': 3261 case 'M': case 'N': case 'O': 3262 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3263 if (!C) 3264 return; 3265 3266 int64_t CVal64 = C->getSExtValue(); 3267 int CVal = (int) CVal64; 3268 // None of these constraints allow values larger than 32 bits. Check 3269 // that the value fits in an int. 3270 if (CVal != CVal64) 3271 return; 3272 3273 switch (Constraint) { 3274 case 'I': 3275 if (Subtarget->isThumb1Only()) { 3276 // This must be a constant between 0 and 255, for ADD 3277 // immediates. 3278 if (CVal >= 0 && CVal <= 255) 3279 break; 3280 } else if (Subtarget->isThumb2()) { 3281 // A constant that can be used as an immediate value in a 3282 // data-processing instruction. 3283 if (ARM_AM::getT2SOImmVal(CVal) != -1) 3284 break; 3285 } else { 3286 // A constant that can be used as an immediate value in a 3287 // data-processing instruction. 3288 if (ARM_AM::getSOImmVal(CVal) != -1) 3289 break; 3290 } 3291 return; 3292 3293 case 'J': 3294 if (Subtarget->isThumb()) { // FIXME thumb2 3295 // This must be a constant between -255 and -1, for negated ADD 3296 // immediates. This can be used in GCC with an "n" modifier that 3297 // prints the negated value, for use with SUB instructions. It is 3298 // not useful otherwise but is implemented for compatibility. 3299 if (CVal >= -255 && CVal <= -1) 3300 break; 3301 } else { 3302 // This must be a constant between -4095 and 4095. It is not clear 3303 // what this constraint is intended for. Implemented for 3304 // compatibility with GCC. 3305 if (CVal >= -4095 && CVal <= 4095) 3306 break; 3307 } 3308 return; 3309 3310 case 'K': 3311 if (Subtarget->isThumb1Only()) { 3312 // A 32-bit value where only one byte has a nonzero value. Exclude 3313 // zero to match GCC. This constraint is used by GCC internally for 3314 // constants that can be loaded with a move/shift combination. 3315 // It is not useful otherwise but is implemented for compatibility. 3316 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 3317 break; 3318 } else if (Subtarget->isThumb2()) { 3319 // A constant whose bitwise inverse can be used as an immediate 3320 // value in a data-processing instruction. This can be used in GCC 3321 // with a "B" modifier that prints the inverted value, for use with 3322 // BIC and MVN instructions. It is not useful otherwise but is 3323 // implemented for compatibility. 3324 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 3325 break; 3326 } else { 3327 // A constant whose bitwise inverse can be used as an immediate 3328 // value in a data-processing instruction. This can be used in GCC 3329 // with a "B" modifier that prints the inverted value, for use with 3330 // BIC and MVN instructions. It is not useful otherwise but is 3331 // implemented for compatibility. 3332 if (ARM_AM::getSOImmVal(~CVal) != -1) 3333 break; 3334 } 3335 return; 3336 3337 case 'L': 3338 if (Subtarget->isThumb1Only()) { 3339 // This must be a constant between -7 and 7, 3340 // for 3-operand ADD/SUB immediate instructions. 3341 if (CVal >= -7 && CVal < 7) 3342 break; 3343 } else if (Subtarget->isThumb2()) { 3344 // A constant whose negation can be used as an immediate value in a 3345 // data-processing instruction. This can be used in GCC with an "n" 3346 // modifier that prints the negated value, for use with SUB 3347 // instructions. It is not useful otherwise but is implemented for 3348 // compatibility. 3349 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 3350 break; 3351 } else { 3352 // A constant whose negation can be used as an immediate value in a 3353 // data-processing instruction. This can be used in GCC with an "n" 3354 // modifier that prints the negated value, for use with SUB 3355 // instructions. It is not useful otherwise but is implemented for 3356 // compatibility. 3357 if (ARM_AM::getSOImmVal(-CVal) != -1) 3358 break; 3359 } 3360 return; 3361 3362 case 'M': 3363 if (Subtarget->isThumb()) { // FIXME thumb2 3364 // This must be a multiple of 4 between 0 and 1020, for 3365 // ADD sp + immediate. 3366 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 3367 break; 3368 } else { 3369 // A power of two or a constant between 0 and 32. This is used in 3370 // GCC for the shift amount on shifted register operands, but it is 3371 // useful in general for any shift amounts. 3372 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 3373 break; 3374 } 3375 return; 3376 3377 case 'N': 3378 if (Subtarget->isThumb()) { // FIXME thumb2 3379 // This must be a constant between 0 and 31, for shift amounts. 3380 if (CVal >= 0 && CVal <= 31) 3381 break; 3382 } 3383 return; 3384 3385 case 'O': 3386 if (Subtarget->isThumb()) { // FIXME thumb2 3387 // This must be a multiple of 4 between -508 and 508, for 3388 // ADD/SUB sp = sp + immediate. 3389 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 3390 break; 3391 } 3392 return; 3393 } 3394 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 3395 break; 3396 } 3397 3398 if (Result.getNode()) { 3399 Ops.push_back(Result); 3400 return; 3401 } 3402 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory, 3403 Ops, DAG); 3404} 3405