ARMISelLowering.cpp revision 0c1aec18911f2a67fb37b6593d08f4f8cb7e18ef
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/MachineBasicBlock.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineInstrBuilder.h" 40#include "llvm/CodeGen/MachineRegisterInfo.h" 41#include "llvm/CodeGen/PseudoSourceValue.h" 42#include "llvm/CodeGen/SelectionDAG.h" 43#include "llvm/MC/MCSectionMachO.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/ADT/VectorExtras.h" 46#include "llvm/ADT/Statistic.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/raw_ostream.h" 51#include <sstream> 52using namespace llvm; 53 54STATISTIC(NumTailCalls, "Number of tail calls"); 55 56// This option should go away when tail calls fully work. 57static cl::opt<bool> 58EnableARMTailCalls("arm-tail-calls", cl::Hidden, 59 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 60 cl::init(false)); 61 62static cl::opt<bool> 63EnableARMLongCalls("arm-long-calls", cl::Hidden, 64 cl::desc("Generate calls via indirect call instructions"), 65 cl::init(false)); 66 67static cl::opt<bool> 68ARMInterworking("arm-interworking", cl::Hidden, 69 cl::desc("Enable / disable ARM interworking (for debugging only)"), 70 cl::init(true)); 71 72void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 73 EVT PromotedBitwiseVT) { 74 if (VT != PromotedLdStVT) { 75 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 76 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 77 PromotedLdStVT.getSimpleVT()); 78 79 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 80 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 81 PromotedLdStVT.getSimpleVT()); 82 } 83 84 EVT ElemTy = VT.getVectorElementType(); 85 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 86 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 87 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 88 if (ElemTy != MVT::i32) { 89 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 90 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 91 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 92 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 93 } 94 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 95 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 96 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 97 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand); 98 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 100 if (VT.isInteger()) { 101 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 102 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 104 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 105 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 106 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 107 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 108 setTruncStoreAction(VT.getSimpleVT(), 109 (MVT::SimpleValueType)InnerVT, Expand); 110 } 111 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 112 113 // Promote all bit-wise operations. 114 if (VT.isInteger() && VT != PromotedBitwiseVT) { 115 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 116 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 117 PromotedBitwiseVT.getSimpleVT()); 118 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 119 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 120 PromotedBitwiseVT.getSimpleVT()); 121 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 122 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 123 PromotedBitwiseVT.getSimpleVT()); 124 } 125 126 // Neon does not support vector divide/remainder operations. 127 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 128 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 129 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 130 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 131 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 132 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 133} 134 135void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 136 addRegisterClass(VT, ARM::DPRRegisterClass); 137 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 138} 139 140void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 141 addRegisterClass(VT, ARM::QPRRegisterClass); 142 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 143} 144 145static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 146 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 147 return new TargetLoweringObjectFileMachO(); 148 149 return new ARMElfTargetObjectFile(); 150} 151 152ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 153 : TargetLowering(TM, createTLOF(TM)) { 154 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 155 RegInfo = TM.getRegisterInfo(); 156 Itins = TM.getInstrItineraryData(); 157 158 if (Subtarget->isTargetDarwin()) { 159 // Uses VFP for Thumb libfuncs if available. 160 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 161 // Single-precision floating-point arithmetic. 162 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 163 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 164 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 165 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 166 167 // Double-precision floating-point arithmetic. 168 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 169 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 170 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 171 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 172 173 // Single-precision comparisons. 174 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 175 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 176 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 177 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 178 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 179 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 180 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 181 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 182 183 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 184 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 185 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 186 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 187 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 188 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 189 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 190 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 191 192 // Double-precision comparisons. 193 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 194 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 195 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 196 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 197 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 198 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 199 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 200 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 201 202 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 210 211 // Floating-point to integer conversions. 212 // i64 conversions are done via library routines even when generating VFP 213 // instructions, so use the same ones. 214 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 215 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 216 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 217 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 218 219 // Conversions between floating types. 220 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 221 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 222 223 // Integer to floating-point conversions. 224 // i64 conversions are done via library routines even when generating VFP 225 // instructions, so use the same ones. 226 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 227 // e.g., __floatunsidf vs. __floatunssidfvfp. 228 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 229 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 230 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 231 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 232 } 233 } 234 235 // These libcalls are not available in 32-bit. 236 setLibcallName(RTLIB::SHL_I128, 0); 237 setLibcallName(RTLIB::SRL_I128, 0); 238 setLibcallName(RTLIB::SRA_I128, 0); 239 240 if (Subtarget->isAAPCS_ABI()) { 241 // Double-precision floating-point arithmetic helper functions 242 // RTABI chapter 4.1.2, Table 2 243 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 244 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 245 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 246 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 247 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 248 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 249 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 250 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 251 252 // Double-precision floating-point comparison helper functions 253 // RTABI chapter 4.1.2, Table 3 254 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 255 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 256 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 257 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 258 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 259 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 260 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 261 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 262 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 263 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 264 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 265 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 266 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 267 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 268 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 269 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 270 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 271 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 272 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 273 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 277 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 278 279 // Single-precision floating-point arithmetic helper functions 280 // RTABI chapter 4.1.2, Table 4 281 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 282 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 283 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 284 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 285 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 286 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 287 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 288 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 289 290 // Single-precision floating-point comparison helper functions 291 // RTABI chapter 4.1.2, Table 5 292 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 293 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 294 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 295 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 296 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 297 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 298 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 299 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 300 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 301 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 302 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 303 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 304 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 305 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 306 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 307 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 308 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 309 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 310 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 311 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 315 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 316 317 // Floating-point to integer conversions. 318 // RTABI chapter 4.1.2, Table 6 319 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 320 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 321 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 322 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 323 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 324 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 325 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 326 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 327 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 335 336 // Conversions between floating types. 337 // RTABI chapter 4.1.2, Table 7 338 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 339 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 340 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 342 343 // Integer to floating-point conversions. 344 // RTABI chapter 4.1.2, Table 8 345 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 346 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 347 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 348 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 349 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 350 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 351 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 352 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 353 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 361 362 // Long long helper functions 363 // RTABI chapter 4.2, Table 9 364 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 365 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 366 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 367 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 368 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 369 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 370 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 371 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 372 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 373 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 376 377 // Integer division functions 378 // RTABI chapter 4.3.1 379 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 380 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 381 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 382 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 383 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 384 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 385 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 387 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 388 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 391 } 392 393 if (Subtarget->isThumb1Only()) 394 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 395 else 396 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 397 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 398 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 399 if (!Subtarget->isFPOnlySP()) 400 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 401 402 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 403 } 404 405 if (Subtarget->hasNEON()) { 406 addDRTypeForNEON(MVT::v2f32); 407 addDRTypeForNEON(MVT::v8i8); 408 addDRTypeForNEON(MVT::v4i16); 409 addDRTypeForNEON(MVT::v2i32); 410 addDRTypeForNEON(MVT::v1i64); 411 412 addQRTypeForNEON(MVT::v4f32); 413 addQRTypeForNEON(MVT::v2f64); 414 addQRTypeForNEON(MVT::v16i8); 415 addQRTypeForNEON(MVT::v8i16); 416 addQRTypeForNEON(MVT::v4i32); 417 addQRTypeForNEON(MVT::v2i64); 418 419 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 420 // neither Neon nor VFP support any arithmetic operations on it. 421 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 422 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 423 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 424 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 425 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 426 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 427 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 428 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 429 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 430 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 431 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 432 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 433 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 434 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 435 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 436 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 437 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 438 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 439 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 440 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 441 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 442 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 443 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 444 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 445 446 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 447 448 // Neon does not support some operations on v1i64 and v2i64 types. 449 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 450 // Custom handling for some quad-vector types to detect VMULL. 451 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 452 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 453 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 454 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 455 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 456 457 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 458 setTargetDAGCombine(ISD::SHL); 459 setTargetDAGCombine(ISD::SRL); 460 setTargetDAGCombine(ISD::SRA); 461 setTargetDAGCombine(ISD::SIGN_EXTEND); 462 setTargetDAGCombine(ISD::ZERO_EXTEND); 463 setTargetDAGCombine(ISD::ANY_EXTEND); 464 setTargetDAGCombine(ISD::SELECT_CC); 465 setTargetDAGCombine(ISD::BUILD_VECTOR); 466 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 467 } 468 469 computeRegisterProperties(); 470 471 // ARM does not have f32 extending load. 472 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 473 474 // ARM does not have i1 sign extending load. 475 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 476 477 // ARM supports all 4 flavors of integer indexed load / store. 478 if (!Subtarget->isThumb1Only()) { 479 for (unsigned im = (unsigned)ISD::PRE_INC; 480 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 481 setIndexedLoadAction(im, MVT::i1, Legal); 482 setIndexedLoadAction(im, MVT::i8, Legal); 483 setIndexedLoadAction(im, MVT::i16, Legal); 484 setIndexedLoadAction(im, MVT::i32, Legal); 485 setIndexedStoreAction(im, MVT::i1, Legal); 486 setIndexedStoreAction(im, MVT::i8, Legal); 487 setIndexedStoreAction(im, MVT::i16, Legal); 488 setIndexedStoreAction(im, MVT::i32, Legal); 489 } 490 } 491 492 // i64 operation support. 493 if (Subtarget->isThumb1Only()) { 494 setOperationAction(ISD::MUL, MVT::i64, Expand); 495 setOperationAction(ISD::MULHU, MVT::i32, Expand); 496 setOperationAction(ISD::MULHS, MVT::i32, Expand); 497 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 498 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 499 } else { 500 setOperationAction(ISD::MUL, MVT::i64, Expand); 501 setOperationAction(ISD::MULHU, MVT::i32, Expand); 502 if (!Subtarget->hasV6Ops()) 503 setOperationAction(ISD::MULHS, MVT::i32, Expand); 504 } 505 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 506 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 507 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 508 setOperationAction(ISD::SRL, MVT::i64, Custom); 509 setOperationAction(ISD::SRA, MVT::i64, Custom); 510 511 // ARM does not have ROTL. 512 setOperationAction(ISD::ROTL, MVT::i32, Expand); 513 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 514 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 515 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 516 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 517 518 // Only ARMv6 has BSWAP. 519 if (!Subtarget->hasV6Ops()) 520 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 521 522 // These are expanded into libcalls. 523 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 524 // v7M has a hardware divider 525 setOperationAction(ISD::SDIV, MVT::i32, Expand); 526 setOperationAction(ISD::UDIV, MVT::i32, Expand); 527 } 528 setOperationAction(ISD::SREM, MVT::i32, Expand); 529 setOperationAction(ISD::UREM, MVT::i32, Expand); 530 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 531 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 532 533 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 534 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 535 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 536 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 537 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 538 539 setOperationAction(ISD::TRAP, MVT::Other, Legal); 540 541 // Use the default implementation. 542 setOperationAction(ISD::VASTART, MVT::Other, Custom); 543 setOperationAction(ISD::VAARG, MVT::Other, Expand); 544 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 545 setOperationAction(ISD::VAEND, MVT::Other, Expand); 546 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 547 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 548 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 549 // FIXME: Shouldn't need this, since no register is used, but the legalizer 550 // doesn't yet know how to not do that for SjLj. 551 setExceptionSelectorRegister(ARM::R0); 552 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 553 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 554 // the default expansion. 555 if (Subtarget->hasDataBarrier() || 556 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 557 // membarrier needs custom lowering; the rest are legal and handled 558 // normally. 559 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 560 } else { 561 // Set them all for expansion, which will force libcalls. 562 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 563 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 564 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 565 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 566 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 567 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 568 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 569 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 570 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 571 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 572 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 573 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 574 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 575 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 576 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 577 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 578 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 579 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 580 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 581 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 582 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 583 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 584 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 585 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 586 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 587 // Since the libcalls include locking, fold in the fences 588 setShouldFoldAtomicFences(true); 589 } 590 // 64-bit versions are always libcalls (for now) 591 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 592 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 593 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 594 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 595 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 596 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 597 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 598 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 599 600 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 601 602 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 603 if (!Subtarget->hasV6Ops()) { 604 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 605 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 606 } 607 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 608 609 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 610 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 611 // iff target supports vfp2. 612 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 613 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 614 } 615 616 // We want to custom lower some of our intrinsics. 617 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 618 if (Subtarget->isTargetDarwin()) { 619 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 620 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 621 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 622 } 623 624 setOperationAction(ISD::SETCC, MVT::i32, Expand); 625 setOperationAction(ISD::SETCC, MVT::f32, Expand); 626 setOperationAction(ISD::SETCC, MVT::f64, Expand); 627 setOperationAction(ISD::SELECT, MVT::i32, Custom); 628 setOperationAction(ISD::SELECT, MVT::f32, Custom); 629 setOperationAction(ISD::SELECT, MVT::f64, Custom); 630 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 631 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 632 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 633 634 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 635 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 636 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 637 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 638 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 639 640 // We don't support sin/cos/fmod/copysign/pow 641 setOperationAction(ISD::FSIN, MVT::f64, Expand); 642 setOperationAction(ISD::FSIN, MVT::f32, Expand); 643 setOperationAction(ISD::FCOS, MVT::f32, Expand); 644 setOperationAction(ISD::FCOS, MVT::f64, Expand); 645 setOperationAction(ISD::FREM, MVT::f64, Expand); 646 setOperationAction(ISD::FREM, MVT::f32, Expand); 647 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 648 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 649 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 650 } 651 setOperationAction(ISD::FPOW, MVT::f64, Expand); 652 setOperationAction(ISD::FPOW, MVT::f32, Expand); 653 654 // Various VFP goodness 655 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 656 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 657 if (Subtarget->hasVFP2()) { 658 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 659 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 660 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 661 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 662 } 663 // Special handling for half-precision FP. 664 if (!Subtarget->hasFP16()) { 665 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 666 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 667 } 668 } 669 670 // We have target-specific dag combine patterns for the following nodes: 671 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 672 setTargetDAGCombine(ISD::ADD); 673 setTargetDAGCombine(ISD::SUB); 674 setTargetDAGCombine(ISD::MUL); 675 676 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 677 setTargetDAGCombine(ISD::OR); 678 if (Subtarget->hasNEON()) 679 setTargetDAGCombine(ISD::AND); 680 681 setStackPointerRegisterToSaveRestore(ARM::SP); 682 683 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 684 setSchedulingPreference(Sched::RegPressure); 685 else 686 setSchedulingPreference(Sched::Hybrid); 687 688 maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type 689 690 // On ARM arguments smaller than 4 bytes are extended, so all arguments 691 // are at least 4 bytes aligned. 692 setMinStackArgumentAlignment(4); 693 694 benefitFromCodePlacementOpt = true; 695} 696 697std::pair<const TargetRegisterClass*, uint8_t> 698ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 699 const TargetRegisterClass *RRC = 0; 700 uint8_t Cost = 1; 701 switch (VT.getSimpleVT().SimpleTy) { 702 default: 703 return TargetLowering::findRepresentativeClass(VT); 704 // Use DPR as representative register class for all floating point 705 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 706 // the cost is 1 for both f32 and f64. 707 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 708 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 709 RRC = ARM::DPRRegisterClass; 710 break; 711 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 712 case MVT::v4f32: case MVT::v2f64: 713 RRC = ARM::DPRRegisterClass; 714 Cost = 2; 715 break; 716 case MVT::v4i64: 717 RRC = ARM::DPRRegisterClass; 718 Cost = 4; 719 break; 720 case MVT::v8i64: 721 RRC = ARM::DPRRegisterClass; 722 Cost = 8; 723 break; 724 } 725 return std::make_pair(RRC, Cost); 726} 727 728const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 729 switch (Opcode) { 730 default: return 0; 731 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 732 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 733 case ARMISD::CALL: return "ARMISD::CALL"; 734 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 735 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 736 case ARMISD::tCALL: return "ARMISD::tCALL"; 737 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 738 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 739 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 740 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 741 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 742 case ARMISD::CMP: return "ARMISD::CMP"; 743 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 744 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 745 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 746 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 747 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 748 case ARMISD::CMOV: return "ARMISD::CMOV"; 749 case ARMISD::CNEG: return "ARMISD::CNEG"; 750 751 case ARMISD::RBIT: return "ARMISD::RBIT"; 752 753 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 754 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 755 case ARMISD::SITOF: return "ARMISD::SITOF"; 756 case ARMISD::UITOF: return "ARMISD::UITOF"; 757 758 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 759 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 760 case ARMISD::RRX: return "ARMISD::RRX"; 761 762 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 763 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 764 765 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 766 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 767 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 768 769 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 770 771 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 772 773 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 774 775 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 776 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 777 778 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 779 780 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 781 case ARMISD::VCGE: return "ARMISD::VCGE"; 782 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 783 case ARMISD::VCGT: return "ARMISD::VCGT"; 784 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 785 case ARMISD::VTST: return "ARMISD::VTST"; 786 787 case ARMISD::VSHL: return "ARMISD::VSHL"; 788 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 789 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 790 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 791 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 792 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 793 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 794 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 795 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 796 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 797 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 798 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 799 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 800 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 801 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 802 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 803 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 804 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 805 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 806 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 807 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 808 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 809 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 810 case ARMISD::VDUP: return "ARMISD::VDUP"; 811 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 812 case ARMISD::VEXT: return "ARMISD::VEXT"; 813 case ARMISD::VREV64: return "ARMISD::VREV64"; 814 case ARMISD::VREV32: return "ARMISD::VREV32"; 815 case ARMISD::VREV16: return "ARMISD::VREV16"; 816 case ARMISD::VZIP: return "ARMISD::VZIP"; 817 case ARMISD::VUZP: return "ARMISD::VUZP"; 818 case ARMISD::VTRN: return "ARMISD::VTRN"; 819 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 820 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 821 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 822 case ARMISD::FMAX: return "ARMISD::FMAX"; 823 case ARMISD::FMIN: return "ARMISD::FMIN"; 824 case ARMISD::BFI: return "ARMISD::BFI"; 825 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 826 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 827 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 828 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 829 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 830 } 831} 832 833/// getRegClassFor - Return the register class that should be used for the 834/// specified value type. 835TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 836 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 837 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 838 // load / store 4 to 8 consecutive D registers. 839 if (Subtarget->hasNEON()) { 840 if (VT == MVT::v4i64) 841 return ARM::QQPRRegisterClass; 842 else if (VT == MVT::v8i64) 843 return ARM::QQQQPRRegisterClass; 844 } 845 return TargetLowering::getRegClassFor(VT); 846} 847 848// Create a fast isel object. 849FastISel * 850ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 851 return ARM::createFastISel(funcInfo); 852} 853 854/// getFunctionAlignment - Return the Log2 alignment of this function. 855unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 856 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 857} 858 859/// getMaximalGlobalOffset - Returns the maximal possible offset which can 860/// be used for loads / stores from the global. 861unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 862 return (Subtarget->isThumb1Only() ? 127 : 4095); 863} 864 865Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 866 unsigned NumVals = N->getNumValues(); 867 if (!NumVals) 868 return Sched::RegPressure; 869 870 for (unsigned i = 0; i != NumVals; ++i) { 871 EVT VT = N->getValueType(i); 872 if (VT == MVT::Flag || VT == MVT::Other) 873 continue; 874 if (VT.isFloatingPoint() || VT.isVector()) 875 return Sched::Latency; 876 } 877 878 if (!N->isMachineOpcode()) 879 return Sched::RegPressure; 880 881 // Load are scheduled for latency even if there instruction itinerary 882 // is not available. 883 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 884 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 885 886 if (TID.getNumDefs() == 0) 887 return Sched::RegPressure; 888 if (!Itins->isEmpty() && 889 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 890 return Sched::Latency; 891 892 return Sched::RegPressure; 893} 894 895unsigned 896ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 897 MachineFunction &MF) const { 898 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 899 900 switch (RC->getID()) { 901 default: 902 return 0; 903 case ARM::tGPRRegClassID: 904 return TFI->hasFP(MF) ? 4 : 5; 905 case ARM::GPRRegClassID: { 906 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 907 return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0); 908 } 909 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 910 case ARM::DPRRegClassID: 911 return 32 - 10; 912 } 913} 914 915//===----------------------------------------------------------------------===// 916// Lowering Code 917//===----------------------------------------------------------------------===// 918 919/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 920static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 921 switch (CC) { 922 default: llvm_unreachable("Unknown condition code!"); 923 case ISD::SETNE: return ARMCC::NE; 924 case ISD::SETEQ: return ARMCC::EQ; 925 case ISD::SETGT: return ARMCC::GT; 926 case ISD::SETGE: return ARMCC::GE; 927 case ISD::SETLT: return ARMCC::LT; 928 case ISD::SETLE: return ARMCC::LE; 929 case ISD::SETUGT: return ARMCC::HI; 930 case ISD::SETUGE: return ARMCC::HS; 931 case ISD::SETULT: return ARMCC::LO; 932 case ISD::SETULE: return ARMCC::LS; 933 } 934} 935 936/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 937static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 938 ARMCC::CondCodes &CondCode2) { 939 CondCode2 = ARMCC::AL; 940 switch (CC) { 941 default: llvm_unreachable("Unknown FP condition!"); 942 case ISD::SETEQ: 943 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 944 case ISD::SETGT: 945 case ISD::SETOGT: CondCode = ARMCC::GT; break; 946 case ISD::SETGE: 947 case ISD::SETOGE: CondCode = ARMCC::GE; break; 948 case ISD::SETOLT: CondCode = ARMCC::MI; break; 949 case ISD::SETOLE: CondCode = ARMCC::LS; break; 950 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 951 case ISD::SETO: CondCode = ARMCC::VC; break; 952 case ISD::SETUO: CondCode = ARMCC::VS; break; 953 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 954 case ISD::SETUGT: CondCode = ARMCC::HI; break; 955 case ISD::SETUGE: CondCode = ARMCC::PL; break; 956 case ISD::SETLT: 957 case ISD::SETULT: CondCode = ARMCC::LT; break; 958 case ISD::SETLE: 959 case ISD::SETULE: CondCode = ARMCC::LE; break; 960 case ISD::SETNE: 961 case ISD::SETUNE: CondCode = ARMCC::NE; break; 962 } 963} 964 965//===----------------------------------------------------------------------===// 966// Calling Convention Implementation 967//===----------------------------------------------------------------------===// 968 969#include "ARMGenCallingConv.inc" 970 971/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 972/// given CallingConvention value. 973CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 974 bool Return, 975 bool isVarArg) const { 976 switch (CC) { 977 default: 978 llvm_unreachable("Unsupported calling convention"); 979 case CallingConv::Fast: 980 if (Subtarget->hasVFP2() && !isVarArg) { 981 if (!Subtarget->isAAPCS_ABI()) 982 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 983 // For AAPCS ABI targets, just use VFP variant of the calling convention. 984 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 985 } 986 // Fallthrough 987 case CallingConv::C: { 988 // Use target triple & subtarget features to do actual dispatch. 989 if (!Subtarget->isAAPCS_ABI()) 990 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 991 else if (Subtarget->hasVFP2() && 992 FloatABIType == FloatABI::Hard && !isVarArg) 993 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 994 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 995 } 996 case CallingConv::ARM_AAPCS_VFP: 997 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 998 case CallingConv::ARM_AAPCS: 999 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1000 case CallingConv::ARM_APCS: 1001 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1002 } 1003} 1004 1005/// LowerCallResult - Lower the result values of a call into the 1006/// appropriate copies out of appropriate physical registers. 1007SDValue 1008ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1009 CallingConv::ID CallConv, bool isVarArg, 1010 const SmallVectorImpl<ISD::InputArg> &Ins, 1011 DebugLoc dl, SelectionDAG &DAG, 1012 SmallVectorImpl<SDValue> &InVals) const { 1013 1014 // Assign locations to each value returned by this call. 1015 SmallVector<CCValAssign, 16> RVLocs; 1016 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1017 RVLocs, *DAG.getContext()); 1018 CCInfo.AnalyzeCallResult(Ins, 1019 CCAssignFnForNode(CallConv, /* Return*/ true, 1020 isVarArg)); 1021 1022 // Copy all of the result registers out of their specified physreg. 1023 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1024 CCValAssign VA = RVLocs[i]; 1025 1026 SDValue Val; 1027 if (VA.needsCustom()) { 1028 // Handle f64 or half of a v2f64. 1029 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1030 InFlag); 1031 Chain = Lo.getValue(1); 1032 InFlag = Lo.getValue(2); 1033 VA = RVLocs[++i]; // skip ahead to next loc 1034 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1035 InFlag); 1036 Chain = Hi.getValue(1); 1037 InFlag = Hi.getValue(2); 1038 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1039 1040 if (VA.getLocVT() == MVT::v2f64) { 1041 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1042 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1043 DAG.getConstant(0, MVT::i32)); 1044 1045 VA = RVLocs[++i]; // skip ahead to next loc 1046 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1047 Chain = Lo.getValue(1); 1048 InFlag = Lo.getValue(2); 1049 VA = RVLocs[++i]; // skip ahead to next loc 1050 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1051 Chain = Hi.getValue(1); 1052 InFlag = Hi.getValue(2); 1053 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1054 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1055 DAG.getConstant(1, MVT::i32)); 1056 } 1057 } else { 1058 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1059 InFlag); 1060 Chain = Val.getValue(1); 1061 InFlag = Val.getValue(2); 1062 } 1063 1064 switch (VA.getLocInfo()) { 1065 default: llvm_unreachable("Unknown loc info!"); 1066 case CCValAssign::Full: break; 1067 case CCValAssign::BCvt: 1068 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1069 break; 1070 } 1071 1072 InVals.push_back(Val); 1073 } 1074 1075 return Chain; 1076} 1077 1078/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1079/// by "Src" to address "Dst" of size "Size". Alignment information is 1080/// specified by the specific parameter attribute. The copy will be passed as 1081/// a byval function parameter. 1082/// Sometimes what we are copying is the end of a larger object, the part that 1083/// does not fit in registers. 1084static SDValue 1085CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1086 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1087 DebugLoc dl) { 1088 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1089 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1090 /*isVolatile=*/false, /*AlwaysInline=*/false, 1091 MachinePointerInfo(0), MachinePointerInfo(0)); 1092} 1093 1094/// LowerMemOpCallTo - Store the argument to the stack. 1095SDValue 1096ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1097 SDValue StackPtr, SDValue Arg, 1098 DebugLoc dl, SelectionDAG &DAG, 1099 const CCValAssign &VA, 1100 ISD::ArgFlagsTy Flags) const { 1101 unsigned LocMemOffset = VA.getLocMemOffset(); 1102 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1103 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1104 if (Flags.isByVal()) 1105 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1106 1107 return DAG.getStore(Chain, dl, Arg, PtrOff, 1108 MachinePointerInfo::getStack(LocMemOffset), 1109 false, false, 0); 1110} 1111 1112void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1113 SDValue Chain, SDValue &Arg, 1114 RegsToPassVector &RegsToPass, 1115 CCValAssign &VA, CCValAssign &NextVA, 1116 SDValue &StackPtr, 1117 SmallVector<SDValue, 8> &MemOpChains, 1118 ISD::ArgFlagsTy Flags) const { 1119 1120 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1121 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1122 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1123 1124 if (NextVA.isRegLoc()) 1125 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1126 else { 1127 assert(NextVA.isMemLoc()); 1128 if (StackPtr.getNode() == 0) 1129 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1130 1131 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1132 dl, DAG, NextVA, 1133 Flags)); 1134 } 1135} 1136 1137/// LowerCall - Lowering a call into a callseq_start <- 1138/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1139/// nodes. 1140SDValue 1141ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1142 CallingConv::ID CallConv, bool isVarArg, 1143 bool &isTailCall, 1144 const SmallVectorImpl<ISD::OutputArg> &Outs, 1145 const SmallVectorImpl<SDValue> &OutVals, 1146 const SmallVectorImpl<ISD::InputArg> &Ins, 1147 DebugLoc dl, SelectionDAG &DAG, 1148 SmallVectorImpl<SDValue> &InVals) const { 1149 MachineFunction &MF = DAG.getMachineFunction(); 1150 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1151 bool IsSibCall = false; 1152 // Temporarily disable tail calls so things don't break. 1153 if (!EnableARMTailCalls) 1154 isTailCall = false; 1155 if (isTailCall) { 1156 // Check if it's really possible to do a tail call. 1157 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1158 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1159 Outs, OutVals, Ins, DAG); 1160 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1161 // detected sibcalls. 1162 if (isTailCall) { 1163 ++NumTailCalls; 1164 IsSibCall = true; 1165 } 1166 } 1167 1168 // Analyze operands of the call, assigning locations to each operand. 1169 SmallVector<CCValAssign, 16> ArgLocs; 1170 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1171 *DAG.getContext()); 1172 CCInfo.AnalyzeCallOperands(Outs, 1173 CCAssignFnForNode(CallConv, /* Return*/ false, 1174 isVarArg)); 1175 1176 // Get a count of how many bytes are to be pushed on the stack. 1177 unsigned NumBytes = CCInfo.getNextStackOffset(); 1178 1179 // For tail calls, memory operands are available in our caller's stack. 1180 if (IsSibCall) 1181 NumBytes = 0; 1182 1183 // Adjust the stack pointer for the new arguments... 1184 // These operations are automatically eliminated by the prolog/epilog pass 1185 if (!IsSibCall) 1186 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1187 1188 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1189 1190 RegsToPassVector RegsToPass; 1191 SmallVector<SDValue, 8> MemOpChains; 1192 1193 // Walk the register/memloc assignments, inserting copies/loads. In the case 1194 // of tail call optimization, arguments are handled later. 1195 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1196 i != e; 1197 ++i, ++realArgIdx) { 1198 CCValAssign &VA = ArgLocs[i]; 1199 SDValue Arg = OutVals[realArgIdx]; 1200 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1201 1202 // Promote the value if needed. 1203 switch (VA.getLocInfo()) { 1204 default: llvm_unreachable("Unknown loc info!"); 1205 case CCValAssign::Full: break; 1206 case CCValAssign::SExt: 1207 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1208 break; 1209 case CCValAssign::ZExt: 1210 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1211 break; 1212 case CCValAssign::AExt: 1213 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1214 break; 1215 case CCValAssign::BCvt: 1216 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1217 break; 1218 } 1219 1220 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1221 if (VA.needsCustom()) { 1222 if (VA.getLocVT() == MVT::v2f64) { 1223 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1224 DAG.getConstant(0, MVT::i32)); 1225 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1226 DAG.getConstant(1, MVT::i32)); 1227 1228 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1229 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1230 1231 VA = ArgLocs[++i]; // skip ahead to next loc 1232 if (VA.isRegLoc()) { 1233 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1234 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1235 } else { 1236 assert(VA.isMemLoc()); 1237 1238 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1239 dl, DAG, VA, Flags)); 1240 } 1241 } else { 1242 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1243 StackPtr, MemOpChains, Flags); 1244 } 1245 } else if (VA.isRegLoc()) { 1246 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1247 } else if (!IsSibCall) { 1248 assert(VA.isMemLoc()); 1249 1250 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1251 dl, DAG, VA, Flags)); 1252 } 1253 } 1254 1255 if (!MemOpChains.empty()) 1256 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1257 &MemOpChains[0], MemOpChains.size()); 1258 1259 // Build a sequence of copy-to-reg nodes chained together with token chain 1260 // and flag operands which copy the outgoing args into the appropriate regs. 1261 SDValue InFlag; 1262 // Tail call byval lowering might overwrite argument registers so in case of 1263 // tail call optimization the copies to registers are lowered later. 1264 if (!isTailCall) 1265 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1266 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1267 RegsToPass[i].second, InFlag); 1268 InFlag = Chain.getValue(1); 1269 } 1270 1271 // For tail calls lower the arguments to the 'real' stack slot. 1272 if (isTailCall) { 1273 // Force all the incoming stack arguments to be loaded from the stack 1274 // before any new outgoing arguments are stored to the stack, because the 1275 // outgoing stack slots may alias the incoming argument stack slots, and 1276 // the alias isn't otherwise explicit. This is slightly more conservative 1277 // than necessary, because it means that each store effectively depends 1278 // on every argument instead of just those arguments it would clobber. 1279 1280 // Do not flag preceeding copytoreg stuff together with the following stuff. 1281 InFlag = SDValue(); 1282 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1283 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1284 RegsToPass[i].second, InFlag); 1285 InFlag = Chain.getValue(1); 1286 } 1287 InFlag =SDValue(); 1288 } 1289 1290 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1291 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1292 // node so that legalize doesn't hack it. 1293 bool isDirect = false; 1294 bool isARMFunc = false; 1295 bool isLocalARMFunc = false; 1296 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1297 1298 if (EnableARMLongCalls) { 1299 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1300 && "long-calls with non-static relocation model!"); 1301 // Handle a global address or an external symbol. If it's not one of 1302 // those, the target's already in a register, so we don't need to do 1303 // anything extra. 1304 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1305 const GlobalValue *GV = G->getGlobal(); 1306 // Create a constant pool entry for the callee address 1307 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1308 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1309 ARMPCLabelIndex, 1310 ARMCP::CPValue, 0); 1311 // Get the address of the callee into a register 1312 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1313 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1314 Callee = DAG.getLoad(getPointerTy(), dl, 1315 DAG.getEntryNode(), CPAddr, 1316 MachinePointerInfo::getConstantPool(), 1317 false, false, 0); 1318 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1319 const char *Sym = S->getSymbol(); 1320 1321 // Create a constant pool entry for the callee address 1322 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1323 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1324 Sym, ARMPCLabelIndex, 0); 1325 // Get the address of the callee into a register 1326 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1327 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1328 Callee = DAG.getLoad(getPointerTy(), dl, 1329 DAG.getEntryNode(), CPAddr, 1330 MachinePointerInfo::getConstantPool(), 1331 false, false, 0); 1332 } 1333 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1334 const GlobalValue *GV = G->getGlobal(); 1335 isDirect = true; 1336 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1337 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1338 getTargetMachine().getRelocationModel() != Reloc::Static; 1339 isARMFunc = !Subtarget->isThumb() || isStub; 1340 // ARM call to a local ARM function is predicable. 1341 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1342 // tBX takes a register source operand. 1343 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1344 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1345 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1346 ARMPCLabelIndex, 1347 ARMCP::CPValue, 4); 1348 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1349 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1350 Callee = DAG.getLoad(getPointerTy(), dl, 1351 DAG.getEntryNode(), CPAddr, 1352 MachinePointerInfo::getConstantPool(), 1353 false, false, 0); 1354 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1355 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1356 getPointerTy(), Callee, PICLabel); 1357 } else { 1358 // On ELF targets for PIC code, direct calls should go through the PLT 1359 unsigned OpFlags = 0; 1360 if (Subtarget->isTargetELF() && 1361 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1362 OpFlags = ARMII::MO_PLT; 1363 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1364 } 1365 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1366 isDirect = true; 1367 bool isStub = Subtarget->isTargetDarwin() && 1368 getTargetMachine().getRelocationModel() != Reloc::Static; 1369 isARMFunc = !Subtarget->isThumb() || isStub; 1370 // tBX takes a register source operand. 1371 const char *Sym = S->getSymbol(); 1372 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1373 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1374 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1375 Sym, ARMPCLabelIndex, 4); 1376 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1377 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1378 Callee = DAG.getLoad(getPointerTy(), dl, 1379 DAG.getEntryNode(), CPAddr, 1380 MachinePointerInfo::getConstantPool(), 1381 false, false, 0); 1382 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1383 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1384 getPointerTy(), Callee, PICLabel); 1385 } else { 1386 unsigned OpFlags = 0; 1387 // On ELF targets for PIC code, direct calls should go through the PLT 1388 if (Subtarget->isTargetELF() && 1389 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1390 OpFlags = ARMII::MO_PLT; 1391 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1392 } 1393 } 1394 1395 // FIXME: handle tail calls differently. 1396 unsigned CallOpc; 1397 if (Subtarget->isThumb()) { 1398 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1399 CallOpc = ARMISD::CALL_NOLINK; 1400 else 1401 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1402 } else { 1403 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1404 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1405 : ARMISD::CALL_NOLINK; 1406 } 1407 1408 std::vector<SDValue> Ops; 1409 Ops.push_back(Chain); 1410 Ops.push_back(Callee); 1411 1412 // Add argument registers to the end of the list so that they are known live 1413 // into the call. 1414 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1415 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1416 RegsToPass[i].second.getValueType())); 1417 1418 if (InFlag.getNode()) 1419 Ops.push_back(InFlag); 1420 1421 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1422 if (isTailCall) 1423 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1424 1425 // Returns a chain and a flag for retval copy to use. 1426 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1427 InFlag = Chain.getValue(1); 1428 1429 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1430 DAG.getIntPtrConstant(0, true), InFlag); 1431 if (!Ins.empty()) 1432 InFlag = Chain.getValue(1); 1433 1434 // Handle result values, copying them out of physregs into vregs that we 1435 // return. 1436 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1437 dl, DAG, InVals); 1438} 1439 1440/// MatchingStackOffset - Return true if the given stack call argument is 1441/// already available in the same position (relatively) of the caller's 1442/// incoming argument stack. 1443static 1444bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1445 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1446 const ARMInstrInfo *TII) { 1447 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1448 int FI = INT_MAX; 1449 if (Arg.getOpcode() == ISD::CopyFromReg) { 1450 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1451 if (!VR || TargetRegisterInfo::isPhysicalRegister(VR)) 1452 return false; 1453 MachineInstr *Def = MRI->getVRegDef(VR); 1454 if (!Def) 1455 return false; 1456 if (!Flags.isByVal()) { 1457 if (!TII->isLoadFromStackSlot(Def, FI)) 1458 return false; 1459 } else { 1460 return false; 1461 } 1462 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1463 if (Flags.isByVal()) 1464 // ByVal argument is passed in as a pointer but it's now being 1465 // dereferenced. e.g. 1466 // define @foo(%struct.X* %A) { 1467 // tail call @bar(%struct.X* byval %A) 1468 // } 1469 return false; 1470 SDValue Ptr = Ld->getBasePtr(); 1471 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1472 if (!FINode) 1473 return false; 1474 FI = FINode->getIndex(); 1475 } else 1476 return false; 1477 1478 assert(FI != INT_MAX); 1479 if (!MFI->isFixedObjectIndex(FI)) 1480 return false; 1481 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1482} 1483 1484/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1485/// for tail call optimization. Targets which want to do tail call 1486/// optimization should implement this function. 1487bool 1488ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1489 CallingConv::ID CalleeCC, 1490 bool isVarArg, 1491 bool isCalleeStructRet, 1492 bool isCallerStructRet, 1493 const SmallVectorImpl<ISD::OutputArg> &Outs, 1494 const SmallVectorImpl<SDValue> &OutVals, 1495 const SmallVectorImpl<ISD::InputArg> &Ins, 1496 SelectionDAG& DAG) const { 1497 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1498 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1499 bool CCMatch = CallerCC == CalleeCC; 1500 1501 // Look for obvious safe cases to perform tail call optimization that do not 1502 // require ABI changes. This is what gcc calls sibcall. 1503 1504 // Do not sibcall optimize vararg calls unless the call site is not passing 1505 // any arguments. 1506 if (isVarArg && !Outs.empty()) 1507 return false; 1508 1509 // Also avoid sibcall optimization if either caller or callee uses struct 1510 // return semantics. 1511 if (isCalleeStructRet || isCallerStructRet) 1512 return false; 1513 1514 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1515 // emitEpilogue is not ready for them. 1516 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1517 // LR. This means if we need to reload LR, it takes an extra instructions, 1518 // which outweighs the value of the tail call; but here we don't know yet 1519 // whether LR is going to be used. Probably the right approach is to 1520 // generate the tail call here and turn it back into CALL/RET in 1521 // emitEpilogue if LR is used. 1522 1523 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1524 // but we need to make sure there are enough registers; the only valid 1525 // registers are the 4 used for parameters. We don't currently do this 1526 // case. 1527 if (Subtarget->isThumb1Only()) 1528 return false; 1529 1530 // If the calling conventions do not match, then we'd better make sure the 1531 // results are returned in the same way as what the caller expects. 1532 if (!CCMatch) { 1533 SmallVector<CCValAssign, 16> RVLocs1; 1534 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1535 RVLocs1, *DAG.getContext()); 1536 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1537 1538 SmallVector<CCValAssign, 16> RVLocs2; 1539 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1540 RVLocs2, *DAG.getContext()); 1541 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1542 1543 if (RVLocs1.size() != RVLocs2.size()) 1544 return false; 1545 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1546 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1547 return false; 1548 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1549 return false; 1550 if (RVLocs1[i].isRegLoc()) { 1551 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1552 return false; 1553 } else { 1554 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1555 return false; 1556 } 1557 } 1558 } 1559 1560 // If the callee takes no arguments then go on to check the results of the 1561 // call. 1562 if (!Outs.empty()) { 1563 // Check if stack adjustment is needed. For now, do not do this if any 1564 // argument is passed on the stack. 1565 SmallVector<CCValAssign, 16> ArgLocs; 1566 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1567 ArgLocs, *DAG.getContext()); 1568 CCInfo.AnalyzeCallOperands(Outs, 1569 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1570 if (CCInfo.getNextStackOffset()) { 1571 MachineFunction &MF = DAG.getMachineFunction(); 1572 1573 // Check if the arguments are already laid out in the right way as 1574 // the caller's fixed stack objects. 1575 MachineFrameInfo *MFI = MF.getFrameInfo(); 1576 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1577 const ARMInstrInfo *TII = 1578 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1579 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1580 i != e; 1581 ++i, ++realArgIdx) { 1582 CCValAssign &VA = ArgLocs[i]; 1583 EVT RegVT = VA.getLocVT(); 1584 SDValue Arg = OutVals[realArgIdx]; 1585 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1586 if (VA.getLocInfo() == CCValAssign::Indirect) 1587 return false; 1588 if (VA.needsCustom()) { 1589 // f64 and vector types are split into multiple registers or 1590 // register/stack-slot combinations. The types will not match 1591 // the registers; give up on memory f64 refs until we figure 1592 // out what to do about this. 1593 if (!VA.isRegLoc()) 1594 return false; 1595 if (!ArgLocs[++i].isRegLoc()) 1596 return false; 1597 if (RegVT == MVT::v2f64) { 1598 if (!ArgLocs[++i].isRegLoc()) 1599 return false; 1600 if (!ArgLocs[++i].isRegLoc()) 1601 return false; 1602 } 1603 } else if (!VA.isRegLoc()) { 1604 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1605 MFI, MRI, TII)) 1606 return false; 1607 } 1608 } 1609 } 1610 } 1611 1612 return true; 1613} 1614 1615SDValue 1616ARMTargetLowering::LowerReturn(SDValue Chain, 1617 CallingConv::ID CallConv, bool isVarArg, 1618 const SmallVectorImpl<ISD::OutputArg> &Outs, 1619 const SmallVectorImpl<SDValue> &OutVals, 1620 DebugLoc dl, SelectionDAG &DAG) const { 1621 1622 // CCValAssign - represent the assignment of the return value to a location. 1623 SmallVector<CCValAssign, 16> RVLocs; 1624 1625 // CCState - Info about the registers and stack slots. 1626 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1627 *DAG.getContext()); 1628 1629 // Analyze outgoing return values. 1630 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1631 isVarArg)); 1632 1633 // If this is the first return lowered for this function, add 1634 // the regs to the liveout set for the function. 1635 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1636 for (unsigned i = 0; i != RVLocs.size(); ++i) 1637 if (RVLocs[i].isRegLoc()) 1638 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1639 } 1640 1641 SDValue Flag; 1642 1643 // Copy the result values into the output registers. 1644 for (unsigned i = 0, realRVLocIdx = 0; 1645 i != RVLocs.size(); 1646 ++i, ++realRVLocIdx) { 1647 CCValAssign &VA = RVLocs[i]; 1648 assert(VA.isRegLoc() && "Can only return in registers!"); 1649 1650 SDValue Arg = OutVals[realRVLocIdx]; 1651 1652 switch (VA.getLocInfo()) { 1653 default: llvm_unreachable("Unknown loc info!"); 1654 case CCValAssign::Full: break; 1655 case CCValAssign::BCvt: 1656 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1657 break; 1658 } 1659 1660 if (VA.needsCustom()) { 1661 if (VA.getLocVT() == MVT::v2f64) { 1662 // Extract the first half and return it in two registers. 1663 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1664 DAG.getConstant(0, MVT::i32)); 1665 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1666 DAG.getVTList(MVT::i32, MVT::i32), Half); 1667 1668 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1669 Flag = Chain.getValue(1); 1670 VA = RVLocs[++i]; // skip ahead to next loc 1671 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1672 HalfGPRs.getValue(1), Flag); 1673 Flag = Chain.getValue(1); 1674 VA = RVLocs[++i]; // skip ahead to next loc 1675 1676 // Extract the 2nd half and fall through to handle it as an f64 value. 1677 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1678 DAG.getConstant(1, MVT::i32)); 1679 } 1680 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1681 // available. 1682 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1683 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1684 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1685 Flag = Chain.getValue(1); 1686 VA = RVLocs[++i]; // skip ahead to next loc 1687 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1688 Flag); 1689 } else 1690 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1691 1692 // Guarantee that all emitted copies are 1693 // stuck together, avoiding something bad. 1694 Flag = Chain.getValue(1); 1695 } 1696 1697 SDValue result; 1698 if (Flag.getNode()) 1699 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1700 else // Return Void 1701 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1702 1703 return result; 1704} 1705 1706bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1707 if (N->getNumValues() != 1) 1708 return false; 1709 if (!N->hasNUsesOfValue(1, 0)) 1710 return false; 1711 1712 unsigned NumCopies = 0; 1713 SDNode* Copies[2]; 1714 SDNode *Use = *N->use_begin(); 1715 if (Use->getOpcode() == ISD::CopyToReg) { 1716 Copies[NumCopies++] = Use; 1717 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1718 // f64 returned in a pair of GPRs. 1719 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1720 UI != UE; ++UI) { 1721 if (UI->getOpcode() != ISD::CopyToReg) 1722 return false; 1723 Copies[UI.getUse().getResNo()] = *UI; 1724 ++NumCopies; 1725 } 1726 } else if (Use->getOpcode() == ISD::BITCAST) { 1727 // f32 returned in a single GPR. 1728 if (!Use->hasNUsesOfValue(1, 0)) 1729 return false; 1730 Use = *Use->use_begin(); 1731 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1732 return false; 1733 Copies[NumCopies++] = Use; 1734 } else { 1735 return false; 1736 } 1737 1738 if (NumCopies != 1 && NumCopies != 2) 1739 return false; 1740 1741 bool HasRet = false; 1742 for (unsigned i = 0; i < NumCopies; ++i) { 1743 SDNode *Copy = Copies[i]; 1744 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1745 UI != UE; ++UI) { 1746 if (UI->getOpcode() == ISD::CopyToReg) { 1747 SDNode *Use = *UI; 1748 if (Use == Copies[0] || Use == Copies[1]) 1749 continue; 1750 return false; 1751 } 1752 if (UI->getOpcode() != ARMISD::RET_FLAG) 1753 return false; 1754 HasRet = true; 1755 } 1756 } 1757 1758 return HasRet; 1759} 1760 1761// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1762// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1763// one of the above mentioned nodes. It has to be wrapped because otherwise 1764// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1765// be used to form addressing mode. These wrapped nodes will be selected 1766// into MOVi. 1767static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1768 EVT PtrVT = Op.getValueType(); 1769 // FIXME there is no actual debug info here 1770 DebugLoc dl = Op.getDebugLoc(); 1771 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1772 SDValue Res; 1773 if (CP->isMachineConstantPoolEntry()) 1774 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1775 CP->getAlignment()); 1776 else 1777 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1778 CP->getAlignment()); 1779 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1780} 1781 1782unsigned ARMTargetLowering::getJumpTableEncoding() const { 1783 return MachineJumpTableInfo::EK_Inline; 1784} 1785 1786SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1787 SelectionDAG &DAG) const { 1788 MachineFunction &MF = DAG.getMachineFunction(); 1789 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1790 unsigned ARMPCLabelIndex = 0; 1791 DebugLoc DL = Op.getDebugLoc(); 1792 EVT PtrVT = getPointerTy(); 1793 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1794 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1795 SDValue CPAddr; 1796 if (RelocM == Reloc::Static) { 1797 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1798 } else { 1799 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1800 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1801 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1802 ARMCP::CPBlockAddress, 1803 PCAdj); 1804 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1805 } 1806 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1807 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1808 MachinePointerInfo::getConstantPool(), 1809 false, false, 0); 1810 if (RelocM == Reloc::Static) 1811 return Result; 1812 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1813 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1814} 1815 1816// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1817SDValue 1818ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1819 SelectionDAG &DAG) const { 1820 DebugLoc dl = GA->getDebugLoc(); 1821 EVT PtrVT = getPointerTy(); 1822 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1823 MachineFunction &MF = DAG.getMachineFunction(); 1824 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1825 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1826 ARMConstantPoolValue *CPV = 1827 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1828 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1829 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1830 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1831 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1832 MachinePointerInfo::getConstantPool(), 1833 false, false, 0); 1834 SDValue Chain = Argument.getValue(1); 1835 1836 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1837 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1838 1839 // call __tls_get_addr. 1840 ArgListTy Args; 1841 ArgListEntry Entry; 1842 Entry.Node = Argument; 1843 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1844 Args.push_back(Entry); 1845 // FIXME: is there useful debug info available here? 1846 std::pair<SDValue, SDValue> CallResult = 1847 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1848 false, false, false, false, 1849 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1850 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1851 return CallResult.first; 1852} 1853 1854// Lower ISD::GlobalTLSAddress using the "initial exec" or 1855// "local exec" model. 1856SDValue 1857ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1858 SelectionDAG &DAG) const { 1859 const GlobalValue *GV = GA->getGlobal(); 1860 DebugLoc dl = GA->getDebugLoc(); 1861 SDValue Offset; 1862 SDValue Chain = DAG.getEntryNode(); 1863 EVT PtrVT = getPointerTy(); 1864 // Get the Thread Pointer 1865 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1866 1867 if (GV->isDeclaration()) { 1868 MachineFunction &MF = DAG.getMachineFunction(); 1869 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1870 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1871 // Initial exec model. 1872 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1873 ARMConstantPoolValue *CPV = 1874 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1875 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true); 1876 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1877 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1878 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1879 MachinePointerInfo::getConstantPool(), 1880 false, false, 0); 1881 Chain = Offset.getValue(1); 1882 1883 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1884 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1885 1886 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1887 MachinePointerInfo::getConstantPool(), 1888 false, false, 0); 1889 } else { 1890 // local exec model 1891 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF); 1892 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1893 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1894 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1895 MachinePointerInfo::getConstantPool(), 1896 false, false, 0); 1897 } 1898 1899 // The address of the thread local variable is the add of the thread 1900 // pointer with the offset of the variable. 1901 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1902} 1903 1904SDValue 1905ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1906 // TODO: implement the "local dynamic" model 1907 assert(Subtarget->isTargetELF() && 1908 "TLS not implemented for non-ELF targets"); 1909 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1910 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1911 // otherwise use the "Local Exec" TLS Model 1912 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1913 return LowerToTLSGeneralDynamicModel(GA, DAG); 1914 else 1915 return LowerToTLSExecModels(GA, DAG); 1916} 1917 1918SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1919 SelectionDAG &DAG) const { 1920 EVT PtrVT = getPointerTy(); 1921 DebugLoc dl = Op.getDebugLoc(); 1922 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1923 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1924 if (RelocM == Reloc::PIC_) { 1925 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1926 ARMConstantPoolValue *CPV = 1927 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 1928 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1929 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1930 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1931 CPAddr, 1932 MachinePointerInfo::getConstantPool(), 1933 false, false, 0); 1934 SDValue Chain = Result.getValue(1); 1935 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1936 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1937 if (!UseGOTOFF) 1938 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1939 MachinePointerInfo::getGOT(), false, false, 0); 1940 return Result; 1941 } else { 1942 // If we have T2 ops, we can materialize the address directly via movt/movw 1943 // pair. This is always cheaper. 1944 if (Subtarget->useMovt()) { 1945 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1946 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 1947 } else { 1948 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1949 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1950 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1951 MachinePointerInfo::getConstantPool(), 1952 false, false, 0); 1953 } 1954 } 1955} 1956 1957SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1958 SelectionDAG &DAG) const { 1959 MachineFunction &MF = DAG.getMachineFunction(); 1960 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1961 unsigned ARMPCLabelIndex = 0; 1962 EVT PtrVT = getPointerTy(); 1963 DebugLoc dl = Op.getDebugLoc(); 1964 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1965 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1966 SDValue CPAddr; 1967 if (RelocM == Reloc::Static) 1968 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1969 else { 1970 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1971 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 1972 ARMConstantPoolValue *CPV = 1973 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 1974 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1975 } 1976 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1977 1978 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1979 MachinePointerInfo::getConstantPool(), 1980 false, false, 0); 1981 SDValue Chain = Result.getValue(1); 1982 1983 if (RelocM == Reloc::PIC_) { 1984 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1985 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1986 } 1987 1988 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 1989 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 1990 false, false, 0); 1991 1992 return Result; 1993} 1994 1995SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 1996 SelectionDAG &DAG) const { 1997 assert(Subtarget->isTargetELF() && 1998 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 1999 MachineFunction &MF = DAG.getMachineFunction(); 2000 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2001 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 2002 EVT PtrVT = getPointerTy(); 2003 DebugLoc dl = Op.getDebugLoc(); 2004 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2005 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 2006 "_GLOBAL_OFFSET_TABLE_", 2007 ARMPCLabelIndex, PCAdj); 2008 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2009 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2010 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2011 MachinePointerInfo::getConstantPool(), 2012 false, false, 0); 2013 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2014 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2015} 2016 2017SDValue 2018ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2019 const { 2020 DebugLoc dl = Op.getDebugLoc(); 2021 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2022 Op.getOperand(0), Op.getOperand(1)); 2023} 2024 2025SDValue 2026ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2027 DebugLoc dl = Op.getDebugLoc(); 2028 SDValue Val = DAG.getConstant(0, MVT::i32); 2029 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2030 Op.getOperand(1), Val); 2031} 2032 2033SDValue 2034ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2035 DebugLoc dl = Op.getDebugLoc(); 2036 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2037 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2038} 2039 2040SDValue 2041ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2042 const ARMSubtarget *Subtarget) const { 2043 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2044 DebugLoc dl = Op.getDebugLoc(); 2045 switch (IntNo) { 2046 default: return SDValue(); // Don't custom lower most intrinsics. 2047 case Intrinsic::arm_thread_pointer: { 2048 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2049 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2050 } 2051 case Intrinsic::eh_sjlj_lsda: { 2052 MachineFunction &MF = DAG.getMachineFunction(); 2053 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2054 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 2055 EVT PtrVT = getPointerTy(); 2056 DebugLoc dl = Op.getDebugLoc(); 2057 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2058 SDValue CPAddr; 2059 unsigned PCAdj = (RelocM != Reloc::PIC_) 2060 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2061 ARMConstantPoolValue *CPV = 2062 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2063 ARMCP::CPLSDA, PCAdj); 2064 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2065 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2066 SDValue Result = 2067 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2068 MachinePointerInfo::getConstantPool(), 2069 false, false, 0); 2070 2071 if (RelocM == Reloc::PIC_) { 2072 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2073 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2074 } 2075 return Result; 2076 } 2077 } 2078} 2079 2080static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2081 const ARMSubtarget *Subtarget) { 2082 DebugLoc dl = Op.getDebugLoc(); 2083 if (!Subtarget->hasDataBarrier()) { 2084 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2085 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2086 // here. 2087 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2088 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2089 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2090 DAG.getConstant(0, MVT::i32)); 2091 } 2092 2093 SDValue Op5 = Op.getOperand(5); 2094 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2095 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2096 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2097 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2098 2099 ARM_MB::MemBOpt DMBOpt; 2100 if (isDeviceBarrier) 2101 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2102 else 2103 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2104 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2105 DAG.getConstant(DMBOpt, MVT::i32)); 2106} 2107 2108static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2109 const ARMSubtarget *Subtarget) { 2110 // ARM pre v5TE and Thumb1 does not have preload instructions. 2111 if (!(Subtarget->isThumb2() || 2112 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2113 // Just preserve the chain. 2114 return Op.getOperand(0); 2115 2116 DebugLoc dl = Op.getDebugLoc(); 2117 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2118 if (!isRead && 2119 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2120 // ARMv7 with MP extension has PLDW. 2121 return Op.getOperand(0); 2122 2123 if (Subtarget->isThumb()) 2124 // Invert the bits. 2125 isRead = ~isRead & 1; 2126 unsigned isData = Subtarget->isThumb() ? 0 : 1; 2127 2128 // Currently there is no intrinsic that matches pli. 2129 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2130 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2131 DAG.getConstant(isData, MVT::i32)); 2132} 2133 2134static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2135 MachineFunction &MF = DAG.getMachineFunction(); 2136 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2137 2138 // vastart just stores the address of the VarArgsFrameIndex slot into the 2139 // memory location argument. 2140 DebugLoc dl = Op.getDebugLoc(); 2141 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2142 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2143 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2144 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2145 MachinePointerInfo(SV), false, false, 0); 2146} 2147 2148SDValue 2149ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2150 SDValue &Root, SelectionDAG &DAG, 2151 DebugLoc dl) const { 2152 MachineFunction &MF = DAG.getMachineFunction(); 2153 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2154 2155 TargetRegisterClass *RC; 2156 if (AFI->isThumb1OnlyFunction()) 2157 RC = ARM::tGPRRegisterClass; 2158 else 2159 RC = ARM::GPRRegisterClass; 2160 2161 // Transform the arguments stored in physical registers into virtual ones. 2162 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2163 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2164 2165 SDValue ArgValue2; 2166 if (NextVA.isMemLoc()) { 2167 MachineFrameInfo *MFI = MF.getFrameInfo(); 2168 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2169 2170 // Create load node to retrieve arguments from the stack. 2171 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2172 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2173 MachinePointerInfo::getFixedStack(FI), 2174 false, false, 0); 2175 } else { 2176 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2177 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2178 } 2179 2180 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2181} 2182 2183SDValue 2184ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2185 CallingConv::ID CallConv, bool isVarArg, 2186 const SmallVectorImpl<ISD::InputArg> 2187 &Ins, 2188 DebugLoc dl, SelectionDAG &DAG, 2189 SmallVectorImpl<SDValue> &InVals) 2190 const { 2191 2192 MachineFunction &MF = DAG.getMachineFunction(); 2193 MachineFrameInfo *MFI = MF.getFrameInfo(); 2194 2195 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2196 2197 // Assign locations to all of the incoming arguments. 2198 SmallVector<CCValAssign, 16> ArgLocs; 2199 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2200 *DAG.getContext()); 2201 CCInfo.AnalyzeFormalArguments(Ins, 2202 CCAssignFnForNode(CallConv, /* Return*/ false, 2203 isVarArg)); 2204 2205 SmallVector<SDValue, 16> ArgValues; 2206 2207 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2208 CCValAssign &VA = ArgLocs[i]; 2209 2210 // Arguments stored in registers. 2211 if (VA.isRegLoc()) { 2212 EVT RegVT = VA.getLocVT(); 2213 2214 SDValue ArgValue; 2215 if (VA.needsCustom()) { 2216 // f64 and vector types are split up into multiple registers or 2217 // combinations of registers and stack slots. 2218 if (VA.getLocVT() == MVT::v2f64) { 2219 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2220 Chain, DAG, dl); 2221 VA = ArgLocs[++i]; // skip ahead to next loc 2222 SDValue ArgValue2; 2223 if (VA.isMemLoc()) { 2224 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2225 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2226 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2227 MachinePointerInfo::getFixedStack(FI), 2228 false, false, 0); 2229 } else { 2230 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2231 Chain, DAG, dl); 2232 } 2233 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2234 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2235 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2236 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2237 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2238 } else 2239 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2240 2241 } else { 2242 TargetRegisterClass *RC; 2243 2244 if (RegVT == MVT::f32) 2245 RC = ARM::SPRRegisterClass; 2246 else if (RegVT == MVT::f64) 2247 RC = ARM::DPRRegisterClass; 2248 else if (RegVT == MVT::v2f64) 2249 RC = ARM::QPRRegisterClass; 2250 else if (RegVT == MVT::i32) 2251 RC = (AFI->isThumb1OnlyFunction() ? 2252 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2253 else 2254 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2255 2256 // Transform the arguments in physical registers into virtual ones. 2257 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2258 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2259 } 2260 2261 // If this is an 8 or 16-bit value, it is really passed promoted 2262 // to 32 bits. Insert an assert[sz]ext to capture this, then 2263 // truncate to the right size. 2264 switch (VA.getLocInfo()) { 2265 default: llvm_unreachable("Unknown loc info!"); 2266 case CCValAssign::Full: break; 2267 case CCValAssign::BCvt: 2268 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2269 break; 2270 case CCValAssign::SExt: 2271 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2272 DAG.getValueType(VA.getValVT())); 2273 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2274 break; 2275 case CCValAssign::ZExt: 2276 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2277 DAG.getValueType(VA.getValVT())); 2278 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2279 break; 2280 } 2281 2282 InVals.push_back(ArgValue); 2283 2284 } else { // VA.isRegLoc() 2285 2286 // sanity check 2287 assert(VA.isMemLoc()); 2288 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2289 2290 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 2291 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true); 2292 2293 // Create load nodes to retrieve arguments from the stack. 2294 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2295 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2296 MachinePointerInfo::getFixedStack(FI), 2297 false, false, 0)); 2298 } 2299 } 2300 2301 // varargs 2302 if (isVarArg) { 2303 static const unsigned GPRArgRegs[] = { 2304 ARM::R0, ARM::R1, ARM::R2, ARM::R3 2305 }; 2306 2307 unsigned NumGPRs = CCInfo.getFirstUnallocated 2308 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2309 2310 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 2311 unsigned VARegSize = (4 - NumGPRs) * 4; 2312 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2313 unsigned ArgOffset = CCInfo.getNextStackOffset(); 2314 if (VARegSaveSize) { 2315 // If this function is vararg, store any remaining integer argument regs 2316 // to their spots on the stack so that they may be loaded by deferencing 2317 // the result of va_next. 2318 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2319 AFI->setVarArgsFrameIndex( 2320 MFI->CreateFixedObject(VARegSaveSize, 2321 ArgOffset + VARegSaveSize - VARegSize, 2322 false)); 2323 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2324 getPointerTy()); 2325 2326 SmallVector<SDValue, 4> MemOps; 2327 for (; NumGPRs < 4; ++NumGPRs) { 2328 TargetRegisterClass *RC; 2329 if (AFI->isThumb1OnlyFunction()) 2330 RC = ARM::tGPRRegisterClass; 2331 else 2332 RC = ARM::GPRRegisterClass; 2333 2334 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 2335 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2336 SDValue Store = 2337 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2338 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2339 false, false, 0); 2340 MemOps.push_back(Store); 2341 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2342 DAG.getConstant(4, getPointerTy())); 2343 } 2344 if (!MemOps.empty()) 2345 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2346 &MemOps[0], MemOps.size()); 2347 } else 2348 // This will point to the next argument passed via stack. 2349 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2350 } 2351 2352 return Chain; 2353} 2354 2355/// isFloatingPointZero - Return true if this is +0.0. 2356static bool isFloatingPointZero(SDValue Op) { 2357 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2358 return CFP->getValueAPF().isPosZero(); 2359 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2360 // Maybe this has already been legalized into the constant pool? 2361 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2362 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2363 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2364 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2365 return CFP->getValueAPF().isPosZero(); 2366 } 2367 } 2368 return false; 2369} 2370 2371/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2372/// the given operands. 2373SDValue 2374ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2375 SDValue &ARMcc, SelectionDAG &DAG, 2376 DebugLoc dl) const { 2377 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2378 unsigned C = RHSC->getZExtValue(); 2379 if (!isLegalICmpImmediate(C)) { 2380 // Constant does not fit, try adjusting it by one? 2381 switch (CC) { 2382 default: break; 2383 case ISD::SETLT: 2384 case ISD::SETGE: 2385 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2386 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2387 RHS = DAG.getConstant(C-1, MVT::i32); 2388 } 2389 break; 2390 case ISD::SETULT: 2391 case ISD::SETUGE: 2392 if (C != 0 && isLegalICmpImmediate(C-1)) { 2393 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2394 RHS = DAG.getConstant(C-1, MVT::i32); 2395 } 2396 break; 2397 case ISD::SETLE: 2398 case ISD::SETGT: 2399 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2400 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2401 RHS = DAG.getConstant(C+1, MVT::i32); 2402 } 2403 break; 2404 case ISD::SETULE: 2405 case ISD::SETUGT: 2406 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2407 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2408 RHS = DAG.getConstant(C+1, MVT::i32); 2409 } 2410 break; 2411 } 2412 } 2413 } 2414 2415 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2416 ARMISD::NodeType CompareType; 2417 switch (CondCode) { 2418 default: 2419 CompareType = ARMISD::CMP; 2420 break; 2421 case ARMCC::EQ: 2422 case ARMCC::NE: 2423 // Uses only Z Flag 2424 CompareType = ARMISD::CMPZ; 2425 break; 2426 } 2427 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2428 return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS); 2429} 2430 2431/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2432SDValue 2433ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2434 DebugLoc dl) const { 2435 SDValue Cmp; 2436 if (!isFloatingPointZero(RHS)) 2437 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS); 2438 else 2439 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS); 2440 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp); 2441} 2442 2443SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2444 SDValue Cond = Op.getOperand(0); 2445 SDValue SelectTrue = Op.getOperand(1); 2446 SDValue SelectFalse = Op.getOperand(2); 2447 DebugLoc dl = Op.getDebugLoc(); 2448 2449 // Convert: 2450 // 2451 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2452 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2453 // 2454 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2455 const ConstantSDNode *CMOVTrue = 2456 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2457 const ConstantSDNode *CMOVFalse = 2458 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2459 2460 if (CMOVTrue && CMOVFalse) { 2461 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2462 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2463 2464 SDValue True; 2465 SDValue False; 2466 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2467 True = SelectTrue; 2468 False = SelectFalse; 2469 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2470 True = SelectFalse; 2471 False = SelectTrue; 2472 } 2473 2474 if (True.getNode() && False.getNode()) { 2475 EVT VT = Cond.getValueType(); 2476 SDValue ARMcc = Cond.getOperand(2); 2477 SDValue CCR = Cond.getOperand(3); 2478 SDValue Cmp = Cond.getOperand(4); 2479 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2480 } 2481 } 2482 } 2483 2484 return DAG.getSelectCC(dl, Cond, 2485 DAG.getConstant(0, Cond.getValueType()), 2486 SelectTrue, SelectFalse, ISD::SETNE); 2487} 2488 2489SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2490 EVT VT = Op.getValueType(); 2491 SDValue LHS = Op.getOperand(0); 2492 SDValue RHS = Op.getOperand(1); 2493 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2494 SDValue TrueVal = Op.getOperand(2); 2495 SDValue FalseVal = Op.getOperand(3); 2496 DebugLoc dl = Op.getDebugLoc(); 2497 2498 if (LHS.getValueType() == MVT::i32) { 2499 SDValue ARMcc; 2500 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2501 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2502 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2503 } 2504 2505 ARMCC::CondCodes CondCode, CondCode2; 2506 FPCCToARMCC(CC, CondCode, CondCode2); 2507 2508 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2509 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2510 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2511 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2512 ARMcc, CCR, Cmp); 2513 if (CondCode2 != ARMCC::AL) { 2514 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2515 // FIXME: Needs another CMP because flag can have but one use. 2516 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2517 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2518 Result, TrueVal, ARMcc2, CCR, Cmp2); 2519 } 2520 return Result; 2521} 2522 2523/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2524/// to morph to an integer compare sequence. 2525static bool canChangeToInt(SDValue Op, bool &SeenZero, 2526 const ARMSubtarget *Subtarget) { 2527 SDNode *N = Op.getNode(); 2528 if (!N->hasOneUse()) 2529 // Otherwise it requires moving the value from fp to integer registers. 2530 return false; 2531 if (!N->getNumValues()) 2532 return false; 2533 EVT VT = Op.getValueType(); 2534 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2535 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2536 // vmrs are very slow, e.g. cortex-a8. 2537 return false; 2538 2539 if (isFloatingPointZero(Op)) { 2540 SeenZero = true; 2541 return true; 2542 } 2543 return ISD::isNormalLoad(N); 2544} 2545 2546static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2547 if (isFloatingPointZero(Op)) 2548 return DAG.getConstant(0, MVT::i32); 2549 2550 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2551 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2552 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2553 Ld->isVolatile(), Ld->isNonTemporal(), 2554 Ld->getAlignment()); 2555 2556 llvm_unreachable("Unknown VFP cmp argument!"); 2557} 2558 2559static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2560 SDValue &RetVal1, SDValue &RetVal2) { 2561 if (isFloatingPointZero(Op)) { 2562 RetVal1 = DAG.getConstant(0, MVT::i32); 2563 RetVal2 = DAG.getConstant(0, MVT::i32); 2564 return; 2565 } 2566 2567 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2568 SDValue Ptr = Ld->getBasePtr(); 2569 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2570 Ld->getChain(), Ptr, 2571 Ld->getPointerInfo(), 2572 Ld->isVolatile(), Ld->isNonTemporal(), 2573 Ld->getAlignment()); 2574 2575 EVT PtrType = Ptr.getValueType(); 2576 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2577 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2578 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2579 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2580 Ld->getChain(), NewPtr, 2581 Ld->getPointerInfo().getWithOffset(4), 2582 Ld->isVolatile(), Ld->isNonTemporal(), 2583 NewAlign); 2584 return; 2585 } 2586 2587 llvm_unreachable("Unknown VFP cmp argument!"); 2588} 2589 2590/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2591/// f32 and even f64 comparisons to integer ones. 2592SDValue 2593ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2594 SDValue Chain = Op.getOperand(0); 2595 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2596 SDValue LHS = Op.getOperand(2); 2597 SDValue RHS = Op.getOperand(3); 2598 SDValue Dest = Op.getOperand(4); 2599 DebugLoc dl = Op.getDebugLoc(); 2600 2601 bool SeenZero = false; 2602 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2603 canChangeToInt(RHS, SeenZero, Subtarget) && 2604 // If one of the operand is zero, it's safe to ignore the NaN case since 2605 // we only care about equality comparisons. 2606 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2607 // If unsafe fp math optimization is enabled and there are no othter uses of 2608 // the CMP operands, and the condition code is EQ oe NE, we can optimize it 2609 // to an integer comparison. 2610 if (CC == ISD::SETOEQ) 2611 CC = ISD::SETEQ; 2612 else if (CC == ISD::SETUNE) 2613 CC = ISD::SETNE; 2614 2615 SDValue ARMcc; 2616 if (LHS.getValueType() == MVT::f32) { 2617 LHS = bitcastf32Toi32(LHS, DAG); 2618 RHS = bitcastf32Toi32(RHS, DAG); 2619 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2620 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2621 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2622 Chain, Dest, ARMcc, CCR, Cmp); 2623 } 2624 2625 SDValue LHS1, LHS2; 2626 SDValue RHS1, RHS2; 2627 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2628 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2629 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2630 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2631 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2632 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2633 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2634 } 2635 2636 return SDValue(); 2637} 2638 2639SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2640 SDValue Chain = Op.getOperand(0); 2641 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2642 SDValue LHS = Op.getOperand(2); 2643 SDValue RHS = Op.getOperand(3); 2644 SDValue Dest = Op.getOperand(4); 2645 DebugLoc dl = Op.getDebugLoc(); 2646 2647 if (LHS.getValueType() == MVT::i32) { 2648 SDValue ARMcc; 2649 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2650 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2651 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2652 Chain, Dest, ARMcc, CCR, Cmp); 2653 } 2654 2655 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2656 2657 if (UnsafeFPMath && 2658 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2659 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2660 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2661 if (Result.getNode()) 2662 return Result; 2663 } 2664 2665 ARMCC::CondCodes CondCode, CondCode2; 2666 FPCCToARMCC(CC, CondCode, CondCode2); 2667 2668 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2669 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2670 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2671 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2672 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2673 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2674 if (CondCode2 != ARMCC::AL) { 2675 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2676 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2677 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2678 } 2679 return Res; 2680} 2681 2682SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2683 SDValue Chain = Op.getOperand(0); 2684 SDValue Table = Op.getOperand(1); 2685 SDValue Index = Op.getOperand(2); 2686 DebugLoc dl = Op.getDebugLoc(); 2687 2688 EVT PTy = getPointerTy(); 2689 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2690 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2691 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2692 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2693 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2694 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2695 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2696 if (Subtarget->isThumb2()) { 2697 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2698 // which does another jump to the destination. This also makes it easier 2699 // to translate it to TBB / TBH later. 2700 // FIXME: This might not work if the function is extremely large. 2701 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2702 Addr, Op.getOperand(2), JTI, UId); 2703 } 2704 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2705 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2706 MachinePointerInfo::getJumpTable(), 2707 false, false, 0); 2708 Chain = Addr.getValue(1); 2709 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2710 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2711 } else { 2712 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2713 MachinePointerInfo::getJumpTable(), false, false, 0); 2714 Chain = Addr.getValue(1); 2715 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2716 } 2717} 2718 2719static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2720 DebugLoc dl = Op.getDebugLoc(); 2721 unsigned Opc; 2722 2723 switch (Op.getOpcode()) { 2724 default: 2725 assert(0 && "Invalid opcode!"); 2726 case ISD::FP_TO_SINT: 2727 Opc = ARMISD::FTOSI; 2728 break; 2729 case ISD::FP_TO_UINT: 2730 Opc = ARMISD::FTOUI; 2731 break; 2732 } 2733 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2734 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 2735} 2736 2737static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2738 EVT VT = Op.getValueType(); 2739 DebugLoc dl = Op.getDebugLoc(); 2740 unsigned Opc; 2741 2742 switch (Op.getOpcode()) { 2743 default: 2744 assert(0 && "Invalid opcode!"); 2745 case ISD::SINT_TO_FP: 2746 Opc = ARMISD::SITOF; 2747 break; 2748 case ISD::UINT_TO_FP: 2749 Opc = ARMISD::UITOF; 2750 break; 2751 } 2752 2753 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 2754 return DAG.getNode(Opc, dl, VT, Op); 2755} 2756 2757SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 2758 // Implement fcopysign with a fabs and a conditional fneg. 2759 SDValue Tmp0 = Op.getOperand(0); 2760 SDValue Tmp1 = Op.getOperand(1); 2761 DebugLoc dl = Op.getDebugLoc(); 2762 EVT VT = Op.getValueType(); 2763 EVT SrcVT = Tmp1.getValueType(); 2764 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 2765 SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32); 2766 SDValue FP0 = DAG.getConstantFP(0.0, SrcVT); 2767 SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl); 2768 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2769 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp); 2770} 2771 2772SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2773 MachineFunction &MF = DAG.getMachineFunction(); 2774 MachineFrameInfo *MFI = MF.getFrameInfo(); 2775 MFI->setReturnAddressIsTaken(true); 2776 2777 EVT VT = Op.getValueType(); 2778 DebugLoc dl = Op.getDebugLoc(); 2779 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2780 if (Depth) { 2781 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2782 SDValue Offset = DAG.getConstant(4, MVT::i32); 2783 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2784 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2785 MachinePointerInfo(), false, false, 0); 2786 } 2787 2788 // Return LR, which contains the return address. Mark it an implicit live-in. 2789 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 2790 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 2791} 2792 2793SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 2794 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2795 MFI->setFrameAddressIsTaken(true); 2796 2797 EVT VT = Op.getValueType(); 2798 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 2799 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2800 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 2801 ? ARM::R7 : ARM::R11; 2802 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2803 while (Depth--) 2804 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2805 MachinePointerInfo(), 2806 false, false, 0); 2807 return FrameAddr; 2808} 2809 2810/// ExpandBITCAST - If the target supports VFP, this function is called to 2811/// expand a bit convert where either the source or destination type is i64 to 2812/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 2813/// operand type is illegal (e.g., v2f32 for a target that doesn't support 2814/// vectors), since the legalizer won't know what to do with that. 2815static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 2816 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2817 DebugLoc dl = N->getDebugLoc(); 2818 SDValue Op = N->getOperand(0); 2819 2820 // This function is only supposed to be called for i64 types, either as the 2821 // source or destination of the bit convert. 2822 EVT SrcVT = Op.getValueType(); 2823 EVT DstVT = N->getValueType(0); 2824 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 2825 "ExpandBITCAST called for non-i64 type"); 2826 2827 // Turn i64->f64 into VMOVDRR. 2828 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 2829 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2830 DAG.getConstant(0, MVT::i32)); 2831 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2832 DAG.getConstant(1, MVT::i32)); 2833 return DAG.getNode(ISD::BITCAST, dl, DstVT, 2834 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 2835 } 2836 2837 // Turn f64->i64 into VMOVRRD. 2838 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 2839 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 2840 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 2841 // Merge the pieces into a single i64 value. 2842 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 2843 } 2844 2845 return SDValue(); 2846} 2847 2848/// getZeroVector - Returns a vector of specified type with all zero elements. 2849/// Zero vectors are used to represent vector negation and in those cases 2850/// will be implemented with the NEON VNEG instruction. However, VNEG does 2851/// not support i64 elements, so sometimes the zero vectors will need to be 2852/// explicitly constructed. Regardless, use a canonical VMOV to create the 2853/// zero vector. 2854static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2855 assert(VT.isVector() && "Expected a vector type"); 2856 // The canonical modified immediate encoding of a zero vector is....0! 2857 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 2858 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 2859 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 2860 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 2861} 2862 2863/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 2864/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2865SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 2866 SelectionDAG &DAG) const { 2867 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2868 EVT VT = Op.getValueType(); 2869 unsigned VTBits = VT.getSizeInBits(); 2870 DebugLoc dl = Op.getDebugLoc(); 2871 SDValue ShOpLo = Op.getOperand(0); 2872 SDValue ShOpHi = Op.getOperand(1); 2873 SDValue ShAmt = Op.getOperand(2); 2874 SDValue ARMcc; 2875 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 2876 2877 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 2878 2879 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2880 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2881 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 2882 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2883 DAG.getConstant(VTBits, MVT::i32)); 2884 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 2885 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2886 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 2887 2888 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2889 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2890 ARMcc, DAG, dl); 2891 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 2892 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 2893 CCR, Cmp); 2894 2895 SDValue Ops[2] = { Lo, Hi }; 2896 return DAG.getMergeValues(Ops, 2, dl); 2897} 2898 2899/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 2900/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2901SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 2902 SelectionDAG &DAG) const { 2903 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2904 EVT VT = Op.getValueType(); 2905 unsigned VTBits = VT.getSizeInBits(); 2906 DebugLoc dl = Op.getDebugLoc(); 2907 SDValue ShOpLo = Op.getOperand(0); 2908 SDValue ShOpHi = Op.getOperand(1); 2909 SDValue ShAmt = Op.getOperand(2); 2910 SDValue ARMcc; 2911 2912 assert(Op.getOpcode() == ISD::SHL_PARTS); 2913 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2914 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2915 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 2916 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2917 DAG.getConstant(VTBits, MVT::i32)); 2918 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 2919 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 2920 2921 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2922 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2923 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2924 ARMcc, DAG, dl); 2925 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 2926 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 2927 CCR, Cmp); 2928 2929 SDValue Ops[2] = { Lo, Hi }; 2930 return DAG.getMergeValues(Ops, 2, dl); 2931} 2932 2933SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 2934 SelectionDAG &DAG) const { 2935 // The rounding mode is in bits 23:22 of the FPSCR. 2936 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 2937 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 2938 // so that the shift + and get folded into a bitfield extract. 2939 DebugLoc dl = Op.getDebugLoc(); 2940 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 2941 DAG.getConstant(Intrinsic::arm_get_fpscr, 2942 MVT::i32)); 2943 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 2944 DAG.getConstant(1U << 22, MVT::i32)); 2945 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 2946 DAG.getConstant(22, MVT::i32)); 2947 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 2948 DAG.getConstant(3, MVT::i32)); 2949} 2950 2951static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 2952 const ARMSubtarget *ST) { 2953 EVT VT = N->getValueType(0); 2954 DebugLoc dl = N->getDebugLoc(); 2955 2956 if (!ST->hasV6T2Ops()) 2957 return SDValue(); 2958 2959 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 2960 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 2961} 2962 2963static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 2964 const ARMSubtarget *ST) { 2965 EVT VT = N->getValueType(0); 2966 DebugLoc dl = N->getDebugLoc(); 2967 2968 if (!VT.isVector()) 2969 return SDValue(); 2970 2971 // Lower vector shifts on NEON to use VSHL. 2972 assert(ST->hasNEON() && "unexpected vector shift"); 2973 2974 // Left shifts translate directly to the vshiftu intrinsic. 2975 if (N->getOpcode() == ISD::SHL) 2976 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2977 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 2978 N->getOperand(0), N->getOperand(1)); 2979 2980 assert((N->getOpcode() == ISD::SRA || 2981 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 2982 2983 // NEON uses the same intrinsics for both left and right shifts. For 2984 // right shifts, the shift amounts are negative, so negate the vector of 2985 // shift amounts. 2986 EVT ShiftVT = N->getOperand(1).getValueType(); 2987 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 2988 getZeroVector(ShiftVT, DAG, dl), 2989 N->getOperand(1)); 2990 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 2991 Intrinsic::arm_neon_vshifts : 2992 Intrinsic::arm_neon_vshiftu); 2993 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2994 DAG.getConstant(vshiftInt, MVT::i32), 2995 N->getOperand(0), NegatedCount); 2996} 2997 2998static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 2999 const ARMSubtarget *ST) { 3000 EVT VT = N->getValueType(0); 3001 DebugLoc dl = N->getDebugLoc(); 3002 3003 // We can get here for a node like i32 = ISD::SHL i32, i64 3004 if (VT != MVT::i64) 3005 return SDValue(); 3006 3007 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3008 "Unknown shift to lower!"); 3009 3010 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3011 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3012 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3013 return SDValue(); 3014 3015 // If we are in thumb mode, we don't have RRX. 3016 if (ST->isThumb1Only()) return SDValue(); 3017 3018 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3019 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3020 DAG.getConstant(0, MVT::i32)); 3021 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3022 DAG.getConstant(1, MVT::i32)); 3023 3024 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3025 // captures the result into a carry flag. 3026 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3027 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1); 3028 3029 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3030 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3031 3032 // Merge the pieces into a single i64 value. 3033 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3034} 3035 3036static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3037 SDValue TmpOp0, TmpOp1; 3038 bool Invert = false; 3039 bool Swap = false; 3040 unsigned Opc = 0; 3041 3042 SDValue Op0 = Op.getOperand(0); 3043 SDValue Op1 = Op.getOperand(1); 3044 SDValue CC = Op.getOperand(2); 3045 EVT VT = Op.getValueType(); 3046 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3047 DebugLoc dl = Op.getDebugLoc(); 3048 3049 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3050 switch (SetCCOpcode) { 3051 default: llvm_unreachable("Illegal FP comparison"); break; 3052 case ISD::SETUNE: 3053 case ISD::SETNE: Invert = true; // Fallthrough 3054 case ISD::SETOEQ: 3055 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3056 case ISD::SETOLT: 3057 case ISD::SETLT: Swap = true; // Fallthrough 3058 case ISD::SETOGT: 3059 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3060 case ISD::SETOLE: 3061 case ISD::SETLE: Swap = true; // Fallthrough 3062 case ISD::SETOGE: 3063 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3064 case ISD::SETUGE: Swap = true; // Fallthrough 3065 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3066 case ISD::SETUGT: Swap = true; // Fallthrough 3067 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3068 case ISD::SETUEQ: Invert = true; // Fallthrough 3069 case ISD::SETONE: 3070 // Expand this to (OLT | OGT). 3071 TmpOp0 = Op0; 3072 TmpOp1 = Op1; 3073 Opc = ISD::OR; 3074 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3075 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3076 break; 3077 case ISD::SETUO: Invert = true; // Fallthrough 3078 case ISD::SETO: 3079 // Expand this to (OLT | OGE). 3080 TmpOp0 = Op0; 3081 TmpOp1 = Op1; 3082 Opc = ISD::OR; 3083 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3084 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3085 break; 3086 } 3087 } else { 3088 // Integer comparisons. 3089 switch (SetCCOpcode) { 3090 default: llvm_unreachable("Illegal integer comparison"); break; 3091 case ISD::SETNE: Invert = true; 3092 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3093 case ISD::SETLT: Swap = true; 3094 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3095 case ISD::SETLE: Swap = true; 3096 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3097 case ISD::SETULT: Swap = true; 3098 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3099 case ISD::SETULE: Swap = true; 3100 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3101 } 3102 3103 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3104 if (Opc == ARMISD::VCEQ) { 3105 3106 SDValue AndOp; 3107 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3108 AndOp = Op0; 3109 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3110 AndOp = Op1; 3111 3112 // Ignore bitconvert. 3113 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3114 AndOp = AndOp.getOperand(0); 3115 3116 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3117 Opc = ARMISD::VTST; 3118 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3119 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3120 Invert = !Invert; 3121 } 3122 } 3123 } 3124 3125 if (Swap) 3126 std::swap(Op0, Op1); 3127 3128 // If one of the operands is a constant vector zero, attempt to fold the 3129 // comparison to a specialized compare-against-zero form. 3130 SDValue SingleOp; 3131 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3132 SingleOp = Op0; 3133 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3134 if (Opc == ARMISD::VCGE) 3135 Opc = ARMISD::VCLEZ; 3136 else if (Opc == ARMISD::VCGT) 3137 Opc = ARMISD::VCLTZ; 3138 SingleOp = Op1; 3139 } 3140 3141 SDValue Result; 3142 if (SingleOp.getNode()) { 3143 switch (Opc) { 3144 case ARMISD::VCEQ: 3145 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3146 case ARMISD::VCGE: 3147 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3148 case ARMISD::VCLEZ: 3149 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3150 case ARMISD::VCGT: 3151 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3152 case ARMISD::VCLTZ: 3153 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3154 default: 3155 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3156 } 3157 } else { 3158 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3159 } 3160 3161 if (Invert) 3162 Result = DAG.getNOT(dl, Result, VT); 3163 3164 return Result; 3165} 3166 3167/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3168/// valid vector constant for a NEON instruction with a "modified immediate" 3169/// operand (e.g., VMOV). If so, return the encoded value. 3170static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3171 unsigned SplatBitSize, SelectionDAG &DAG, 3172 EVT &VT, bool is128Bits, NEONModImmType type) { 3173 unsigned OpCmode, Imm; 3174 3175 // SplatBitSize is set to the smallest size that splats the vector, so a 3176 // zero vector will always have SplatBitSize == 8. However, NEON modified 3177 // immediate instructions others than VMOV do not support the 8-bit encoding 3178 // of a zero vector, and the default encoding of zero is supposed to be the 3179 // 32-bit version. 3180 if (SplatBits == 0) 3181 SplatBitSize = 32; 3182 3183 switch (SplatBitSize) { 3184 case 8: 3185 if (type != VMOVModImm) 3186 return SDValue(); 3187 // Any 1-byte value is OK. Op=0, Cmode=1110. 3188 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3189 OpCmode = 0xe; 3190 Imm = SplatBits; 3191 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3192 break; 3193 3194 case 16: 3195 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3196 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3197 if ((SplatBits & ~0xff) == 0) { 3198 // Value = 0x00nn: Op=x, Cmode=100x. 3199 OpCmode = 0x8; 3200 Imm = SplatBits; 3201 break; 3202 } 3203 if ((SplatBits & ~0xff00) == 0) { 3204 // Value = 0xnn00: Op=x, Cmode=101x. 3205 OpCmode = 0xa; 3206 Imm = SplatBits >> 8; 3207 break; 3208 } 3209 return SDValue(); 3210 3211 case 32: 3212 // NEON's 32-bit VMOV supports splat values where: 3213 // * only one byte is nonzero, or 3214 // * the least significant byte is 0xff and the second byte is nonzero, or 3215 // * the least significant 2 bytes are 0xff and the third is nonzero. 3216 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3217 if ((SplatBits & ~0xff) == 0) { 3218 // Value = 0x000000nn: Op=x, Cmode=000x. 3219 OpCmode = 0; 3220 Imm = SplatBits; 3221 break; 3222 } 3223 if ((SplatBits & ~0xff00) == 0) { 3224 // Value = 0x0000nn00: Op=x, Cmode=001x. 3225 OpCmode = 0x2; 3226 Imm = SplatBits >> 8; 3227 break; 3228 } 3229 if ((SplatBits & ~0xff0000) == 0) { 3230 // Value = 0x00nn0000: Op=x, Cmode=010x. 3231 OpCmode = 0x4; 3232 Imm = SplatBits >> 16; 3233 break; 3234 } 3235 if ((SplatBits & ~0xff000000) == 0) { 3236 // Value = 0xnn000000: Op=x, Cmode=011x. 3237 OpCmode = 0x6; 3238 Imm = SplatBits >> 24; 3239 break; 3240 } 3241 3242 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3243 if (type == OtherModImm) return SDValue(); 3244 3245 if ((SplatBits & ~0xffff) == 0 && 3246 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3247 // Value = 0x0000nnff: Op=x, Cmode=1100. 3248 OpCmode = 0xc; 3249 Imm = SplatBits >> 8; 3250 SplatBits |= 0xff; 3251 break; 3252 } 3253 3254 if ((SplatBits & ~0xffffff) == 0 && 3255 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3256 // Value = 0x00nnffff: Op=x, Cmode=1101. 3257 OpCmode = 0xd; 3258 Imm = SplatBits >> 16; 3259 SplatBits |= 0xffff; 3260 break; 3261 } 3262 3263 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3264 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3265 // VMOV.I32. A (very) minor optimization would be to replicate the value 3266 // and fall through here to test for a valid 64-bit splat. But, then the 3267 // caller would also need to check and handle the change in size. 3268 return SDValue(); 3269 3270 case 64: { 3271 if (type != VMOVModImm) 3272 return SDValue(); 3273 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3274 uint64_t BitMask = 0xff; 3275 uint64_t Val = 0; 3276 unsigned ImmMask = 1; 3277 Imm = 0; 3278 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3279 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3280 Val |= BitMask; 3281 Imm |= ImmMask; 3282 } else if ((SplatBits & BitMask) != 0) { 3283 return SDValue(); 3284 } 3285 BitMask <<= 8; 3286 ImmMask <<= 1; 3287 } 3288 // Op=1, Cmode=1110. 3289 OpCmode = 0x1e; 3290 SplatBits = Val; 3291 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3292 break; 3293 } 3294 3295 default: 3296 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3297 return SDValue(); 3298 } 3299 3300 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3301 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3302} 3303 3304static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3305 bool &ReverseVEXT, unsigned &Imm) { 3306 unsigned NumElts = VT.getVectorNumElements(); 3307 ReverseVEXT = false; 3308 3309 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3310 if (M[0] < 0) 3311 return false; 3312 3313 Imm = M[0]; 3314 3315 // If this is a VEXT shuffle, the immediate value is the index of the first 3316 // element. The other shuffle indices must be the successive elements after 3317 // the first one. 3318 unsigned ExpectedElt = Imm; 3319 for (unsigned i = 1; i < NumElts; ++i) { 3320 // Increment the expected index. If it wraps around, it may still be 3321 // a VEXT but the source vectors must be swapped. 3322 ExpectedElt += 1; 3323 if (ExpectedElt == NumElts * 2) { 3324 ExpectedElt = 0; 3325 ReverseVEXT = true; 3326 } 3327 3328 if (M[i] < 0) continue; // ignore UNDEF indices 3329 if (ExpectedElt != static_cast<unsigned>(M[i])) 3330 return false; 3331 } 3332 3333 // Adjust the index value if the source operands will be swapped. 3334 if (ReverseVEXT) 3335 Imm -= NumElts; 3336 3337 return true; 3338} 3339 3340/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3341/// instruction with the specified blocksize. (The order of the elements 3342/// within each block of the vector is reversed.) 3343static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3344 unsigned BlockSize) { 3345 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3346 "Only possible block sizes for VREV are: 16, 32, 64"); 3347 3348 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3349 if (EltSz == 64) 3350 return false; 3351 3352 unsigned NumElts = VT.getVectorNumElements(); 3353 unsigned BlockElts = M[0] + 1; 3354 // If the first shuffle index is UNDEF, be optimistic. 3355 if (M[0] < 0) 3356 BlockElts = BlockSize / EltSz; 3357 3358 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3359 return false; 3360 3361 for (unsigned i = 0; i < NumElts; ++i) { 3362 if (M[i] < 0) continue; // ignore UNDEF indices 3363 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3364 return false; 3365 } 3366 3367 return true; 3368} 3369 3370static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3371 unsigned &WhichResult) { 3372 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3373 if (EltSz == 64) 3374 return false; 3375 3376 unsigned NumElts = VT.getVectorNumElements(); 3377 WhichResult = (M[0] == 0 ? 0 : 1); 3378 for (unsigned i = 0; i < NumElts; i += 2) { 3379 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3380 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3381 return false; 3382 } 3383 return true; 3384} 3385 3386/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3387/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3388/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3389static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3390 unsigned &WhichResult) { 3391 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3392 if (EltSz == 64) 3393 return false; 3394 3395 unsigned NumElts = VT.getVectorNumElements(); 3396 WhichResult = (M[0] == 0 ? 0 : 1); 3397 for (unsigned i = 0; i < NumElts; i += 2) { 3398 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3399 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3400 return false; 3401 } 3402 return true; 3403} 3404 3405static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3406 unsigned &WhichResult) { 3407 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3408 if (EltSz == 64) 3409 return false; 3410 3411 unsigned NumElts = VT.getVectorNumElements(); 3412 WhichResult = (M[0] == 0 ? 0 : 1); 3413 for (unsigned i = 0; i != NumElts; ++i) { 3414 if (M[i] < 0) continue; // ignore UNDEF indices 3415 if ((unsigned) M[i] != 2 * i + WhichResult) 3416 return false; 3417 } 3418 3419 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3420 if (VT.is64BitVector() && EltSz == 32) 3421 return false; 3422 3423 return true; 3424} 3425 3426/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3427/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3428/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3429static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3430 unsigned &WhichResult) { 3431 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3432 if (EltSz == 64) 3433 return false; 3434 3435 unsigned Half = VT.getVectorNumElements() / 2; 3436 WhichResult = (M[0] == 0 ? 0 : 1); 3437 for (unsigned j = 0; j != 2; ++j) { 3438 unsigned Idx = WhichResult; 3439 for (unsigned i = 0; i != Half; ++i) { 3440 int MIdx = M[i + j * Half]; 3441 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3442 return false; 3443 Idx += 2; 3444 } 3445 } 3446 3447 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3448 if (VT.is64BitVector() && EltSz == 32) 3449 return false; 3450 3451 return true; 3452} 3453 3454static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3455 unsigned &WhichResult) { 3456 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3457 if (EltSz == 64) 3458 return false; 3459 3460 unsigned NumElts = VT.getVectorNumElements(); 3461 WhichResult = (M[0] == 0 ? 0 : 1); 3462 unsigned Idx = WhichResult * NumElts / 2; 3463 for (unsigned i = 0; i != NumElts; i += 2) { 3464 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3465 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3466 return false; 3467 Idx += 1; 3468 } 3469 3470 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3471 if (VT.is64BitVector() && EltSz == 32) 3472 return false; 3473 3474 return true; 3475} 3476 3477/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3478/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3479/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3480static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3481 unsigned &WhichResult) { 3482 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3483 if (EltSz == 64) 3484 return false; 3485 3486 unsigned NumElts = VT.getVectorNumElements(); 3487 WhichResult = (M[0] == 0 ? 0 : 1); 3488 unsigned Idx = WhichResult * NumElts / 2; 3489 for (unsigned i = 0; i != NumElts; i += 2) { 3490 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3491 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3492 return false; 3493 Idx += 1; 3494 } 3495 3496 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3497 if (VT.is64BitVector() && EltSz == 32) 3498 return false; 3499 3500 return true; 3501} 3502 3503// If N is an integer constant that can be moved into a register in one 3504// instruction, return an SDValue of such a constant (will become a MOV 3505// instruction). Otherwise return null. 3506static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3507 const ARMSubtarget *ST, DebugLoc dl) { 3508 uint64_t Val; 3509 if (!isa<ConstantSDNode>(N)) 3510 return SDValue(); 3511 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3512 3513 if (ST->isThumb1Only()) { 3514 if (Val <= 255 || ~Val <= 255) 3515 return DAG.getConstant(Val, MVT::i32); 3516 } else { 3517 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3518 return DAG.getConstant(Val, MVT::i32); 3519 } 3520 return SDValue(); 3521} 3522 3523// If this is a case we can't handle, return null and let the default 3524// expansion code take care of it. 3525static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3526 const ARMSubtarget *ST) { 3527 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3528 DebugLoc dl = Op.getDebugLoc(); 3529 EVT VT = Op.getValueType(); 3530 3531 APInt SplatBits, SplatUndef; 3532 unsigned SplatBitSize; 3533 bool HasAnyUndefs; 3534 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3535 if (SplatBitSize <= 64) { 3536 // Check if an immediate VMOV works. 3537 EVT VmovVT; 3538 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3539 SplatUndef.getZExtValue(), SplatBitSize, 3540 DAG, VmovVT, VT.is128BitVector(), 3541 VMOVModImm); 3542 if (Val.getNode()) { 3543 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3544 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3545 } 3546 3547 // Try an immediate VMVN. 3548 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3549 ((1LL << SplatBitSize) - 1)); 3550 Val = isNEONModifiedImm(NegatedImm, 3551 SplatUndef.getZExtValue(), SplatBitSize, 3552 DAG, VmovVT, VT.is128BitVector(), 3553 VMVNModImm); 3554 if (Val.getNode()) { 3555 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3556 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3557 } 3558 } 3559 } 3560 3561 // Scan through the operands to see if only one value is used. 3562 unsigned NumElts = VT.getVectorNumElements(); 3563 bool isOnlyLowElement = true; 3564 bool usesOnlyOneValue = true; 3565 bool isConstant = true; 3566 SDValue Value; 3567 for (unsigned i = 0; i < NumElts; ++i) { 3568 SDValue V = Op.getOperand(i); 3569 if (V.getOpcode() == ISD::UNDEF) 3570 continue; 3571 if (i > 0) 3572 isOnlyLowElement = false; 3573 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3574 isConstant = false; 3575 3576 if (!Value.getNode()) 3577 Value = V; 3578 else if (V != Value) 3579 usesOnlyOneValue = false; 3580 } 3581 3582 if (!Value.getNode()) 3583 return DAG.getUNDEF(VT); 3584 3585 if (isOnlyLowElement) 3586 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3587 3588 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3589 3590 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3591 // i32 and try again. 3592 if (usesOnlyOneValue && EltSize <= 32) { 3593 if (!isConstant) 3594 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3595 if (VT.getVectorElementType().isFloatingPoint()) { 3596 SmallVector<SDValue, 8> Ops; 3597 for (unsigned i = 0; i < NumElts; ++i) 3598 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3599 Op.getOperand(i))); 3600 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3601 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3602 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3603 if (Val.getNode()) 3604 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3605 } 3606 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3607 if (Val.getNode()) 3608 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3609 } 3610 3611 // If all elements are constants and the case above didn't get hit, fall back 3612 // to the default expansion, which will generate a load from the constant 3613 // pool. 3614 if (isConstant) 3615 return SDValue(); 3616 3617 // Vectors with 32- or 64-bit elements can be built by directly assigning 3618 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3619 // will be legalized. 3620 if (EltSize >= 32) { 3621 // Do the expansion with floating-point types, since that is what the VFP 3622 // registers are defined to use, and since i64 is not legal. 3623 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3624 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3625 SmallVector<SDValue, 8> Ops; 3626 for (unsigned i = 0; i < NumElts; ++i) 3627 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 3628 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3629 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3630 } 3631 3632 return SDValue(); 3633} 3634 3635/// isShuffleMaskLegal - Targets can use this to indicate that they only 3636/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 3637/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 3638/// are assumed to be legal. 3639bool 3640ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 3641 EVT VT) const { 3642 if (VT.getVectorNumElements() == 4 && 3643 (VT.is128BitVector() || VT.is64BitVector())) { 3644 unsigned PFIndexes[4]; 3645 for (unsigned i = 0; i != 4; ++i) { 3646 if (M[i] < 0) 3647 PFIndexes[i] = 8; 3648 else 3649 PFIndexes[i] = M[i]; 3650 } 3651 3652 // Compute the index in the perfect shuffle table. 3653 unsigned PFTableIndex = 3654 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3655 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3656 unsigned Cost = (PFEntry >> 30); 3657 3658 if (Cost <= 4) 3659 return true; 3660 } 3661 3662 bool ReverseVEXT; 3663 unsigned Imm, WhichResult; 3664 3665 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3666 return (EltSize >= 32 || 3667 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 3668 isVREVMask(M, VT, 64) || 3669 isVREVMask(M, VT, 32) || 3670 isVREVMask(M, VT, 16) || 3671 isVEXTMask(M, VT, ReverseVEXT, Imm) || 3672 isVTRNMask(M, VT, WhichResult) || 3673 isVUZPMask(M, VT, WhichResult) || 3674 isVZIPMask(M, VT, WhichResult) || 3675 isVTRN_v_undef_Mask(M, VT, WhichResult) || 3676 isVUZP_v_undef_Mask(M, VT, WhichResult) || 3677 isVZIP_v_undef_Mask(M, VT, WhichResult)); 3678} 3679 3680/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3681/// the specified operations to build the shuffle. 3682static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3683 SDValue RHS, SelectionDAG &DAG, 3684 DebugLoc dl) { 3685 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3686 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3687 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3688 3689 enum { 3690 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3691 OP_VREV, 3692 OP_VDUP0, 3693 OP_VDUP1, 3694 OP_VDUP2, 3695 OP_VDUP3, 3696 OP_VEXT1, 3697 OP_VEXT2, 3698 OP_VEXT3, 3699 OP_VUZPL, // VUZP, left result 3700 OP_VUZPR, // VUZP, right result 3701 OP_VZIPL, // VZIP, left result 3702 OP_VZIPR, // VZIP, right result 3703 OP_VTRNL, // VTRN, left result 3704 OP_VTRNR // VTRN, right result 3705 }; 3706 3707 if (OpNum == OP_COPY) { 3708 if (LHSID == (1*9+2)*9+3) return LHS; 3709 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3710 return RHS; 3711 } 3712 3713 SDValue OpLHS, OpRHS; 3714 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 3715 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 3716 EVT VT = OpLHS.getValueType(); 3717 3718 switch (OpNum) { 3719 default: llvm_unreachable("Unknown shuffle opcode!"); 3720 case OP_VREV: 3721 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 3722 case OP_VDUP0: 3723 case OP_VDUP1: 3724 case OP_VDUP2: 3725 case OP_VDUP3: 3726 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 3727 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 3728 case OP_VEXT1: 3729 case OP_VEXT2: 3730 case OP_VEXT3: 3731 return DAG.getNode(ARMISD::VEXT, dl, VT, 3732 OpLHS, OpRHS, 3733 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 3734 case OP_VUZPL: 3735 case OP_VUZPR: 3736 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3737 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 3738 case OP_VZIPL: 3739 case OP_VZIPR: 3740 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3741 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 3742 case OP_VTRNL: 3743 case OP_VTRNR: 3744 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3745 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 3746 } 3747} 3748 3749static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 3750 SDValue V1 = Op.getOperand(0); 3751 SDValue V2 = Op.getOperand(1); 3752 DebugLoc dl = Op.getDebugLoc(); 3753 EVT VT = Op.getValueType(); 3754 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 3755 SmallVector<int, 8> ShuffleMask; 3756 3757 // Convert shuffles that are directly supported on NEON to target-specific 3758 // DAG nodes, instead of keeping them as shuffles and matching them again 3759 // during code selection. This is more efficient and avoids the possibility 3760 // of inconsistencies between legalization and selection. 3761 // FIXME: floating-point vectors should be canonicalized to integer vectors 3762 // of the same time so that they get CSEd properly. 3763 SVN->getMask(ShuffleMask); 3764 3765 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3766 if (EltSize <= 32) { 3767 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 3768 int Lane = SVN->getSplatIndex(); 3769 // If this is undef splat, generate it via "just" vdup, if possible. 3770 if (Lane == -1) Lane = 0; 3771 3772 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 3773 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 3774 } 3775 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 3776 DAG.getConstant(Lane, MVT::i32)); 3777 } 3778 3779 bool ReverseVEXT; 3780 unsigned Imm; 3781 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 3782 if (ReverseVEXT) 3783 std::swap(V1, V2); 3784 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 3785 DAG.getConstant(Imm, MVT::i32)); 3786 } 3787 3788 if (isVREVMask(ShuffleMask, VT, 64)) 3789 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 3790 if (isVREVMask(ShuffleMask, VT, 32)) 3791 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 3792 if (isVREVMask(ShuffleMask, VT, 16)) 3793 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 3794 3795 // Check for Neon shuffles that modify both input vectors in place. 3796 // If both results are used, i.e., if there are two shuffles with the same 3797 // source operands and with masks corresponding to both results of one of 3798 // these operations, DAG memoization will ensure that a single node is 3799 // used for both shuffles. 3800 unsigned WhichResult; 3801 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 3802 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3803 V1, V2).getValue(WhichResult); 3804 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 3805 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3806 V1, V2).getValue(WhichResult); 3807 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 3808 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3809 V1, V2).getValue(WhichResult); 3810 3811 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3812 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3813 V1, V1).getValue(WhichResult); 3814 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3815 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3816 V1, V1).getValue(WhichResult); 3817 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3818 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3819 V1, V1).getValue(WhichResult); 3820 } 3821 3822 // If the shuffle is not directly supported and it has 4 elements, use 3823 // the PerfectShuffle-generated table to synthesize it from other shuffles. 3824 unsigned NumElts = VT.getVectorNumElements(); 3825 if (NumElts == 4) { 3826 unsigned PFIndexes[4]; 3827 for (unsigned i = 0; i != 4; ++i) { 3828 if (ShuffleMask[i] < 0) 3829 PFIndexes[i] = 8; 3830 else 3831 PFIndexes[i] = ShuffleMask[i]; 3832 } 3833 3834 // Compute the index in the perfect shuffle table. 3835 unsigned PFTableIndex = 3836 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3837 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3838 unsigned Cost = (PFEntry >> 30); 3839 3840 if (Cost <= 4) 3841 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 3842 } 3843 3844 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 3845 if (EltSize >= 32) { 3846 // Do the expansion with floating-point types, since that is what the VFP 3847 // registers are defined to use, and since i64 is not legal. 3848 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3849 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3850 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 3851 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 3852 SmallVector<SDValue, 8> Ops; 3853 for (unsigned i = 0; i < NumElts; ++i) { 3854 if (ShuffleMask[i] < 0) 3855 Ops.push_back(DAG.getUNDEF(EltVT)); 3856 else 3857 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3858 ShuffleMask[i] < (int)NumElts ? V1 : V2, 3859 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 3860 MVT::i32))); 3861 } 3862 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3863 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3864 } 3865 3866 return SDValue(); 3867} 3868 3869static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 3870 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 3871 SDValue Lane = Op.getOperand(1); 3872 if (!isa<ConstantSDNode>(Lane)) 3873 return SDValue(); 3874 3875 SDValue Vec = Op.getOperand(0); 3876 if (Op.getValueType() == MVT::i32 && 3877 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 3878 DebugLoc dl = Op.getDebugLoc(); 3879 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 3880 } 3881 3882 return Op; 3883} 3884 3885static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 3886 // The only time a CONCAT_VECTORS operation can have legal types is when 3887 // two 64-bit vectors are concatenated to a 128-bit vector. 3888 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 3889 "unexpected CONCAT_VECTORS"); 3890 DebugLoc dl = Op.getDebugLoc(); 3891 SDValue Val = DAG.getUNDEF(MVT::v2f64); 3892 SDValue Op0 = Op.getOperand(0); 3893 SDValue Op1 = Op.getOperand(1); 3894 if (Op0.getOpcode() != ISD::UNDEF) 3895 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3896 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 3897 DAG.getIntPtrConstant(0)); 3898 if (Op1.getOpcode() != ISD::UNDEF) 3899 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3900 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 3901 DAG.getIntPtrConstant(1)); 3902 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 3903} 3904 3905/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 3906/// element has been zero/sign-extended, depending on the isSigned parameter, 3907/// from an integer type half its size. 3908static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 3909 bool isSigned) { 3910 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 3911 EVT VT = N->getValueType(0); 3912 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 3913 SDNode *BVN = N->getOperand(0).getNode(); 3914 if (BVN->getValueType(0) != MVT::v4i32 || 3915 BVN->getOpcode() != ISD::BUILD_VECTOR) 3916 return false; 3917 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 3918 unsigned HiElt = 1 - LoElt; 3919 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 3920 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 3921 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 3922 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 3923 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 3924 return false; 3925 if (isSigned) { 3926 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 3927 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 3928 return true; 3929 } else { 3930 if (Hi0->isNullValue() && Hi1->isNullValue()) 3931 return true; 3932 } 3933 return false; 3934 } 3935 3936 if (N->getOpcode() != ISD::BUILD_VECTOR) 3937 return false; 3938 3939 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 3940 SDNode *Elt = N->getOperand(i).getNode(); 3941 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 3942 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3943 unsigned HalfSize = EltSize / 2; 3944 if (isSigned) { 3945 int64_t SExtVal = C->getSExtValue(); 3946 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 3947 return false; 3948 } else { 3949 if ((C->getZExtValue() >> HalfSize) != 0) 3950 return false; 3951 } 3952 continue; 3953 } 3954 return false; 3955 } 3956 3957 return true; 3958} 3959 3960/// isSignExtended - Check if a node is a vector value that is sign-extended 3961/// or a constant BUILD_VECTOR with sign-extended elements. 3962static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 3963 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 3964 return true; 3965 if (isExtendedBUILD_VECTOR(N, DAG, true)) 3966 return true; 3967 return false; 3968} 3969 3970/// isZeroExtended - Check if a node is a vector value that is zero-extended 3971/// or a constant BUILD_VECTOR with zero-extended elements. 3972static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 3973 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 3974 return true; 3975 if (isExtendedBUILD_VECTOR(N, DAG, false)) 3976 return true; 3977 return false; 3978} 3979 3980/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 3981/// load, or BUILD_VECTOR with extended elements, return the unextended value. 3982static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 3983 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 3984 return N->getOperand(0); 3985 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 3986 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 3987 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 3988 LD->isNonTemporal(), LD->getAlignment()); 3989 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 3990 // have been legalized as a BITCAST from v4i32. 3991 if (N->getOpcode() == ISD::BITCAST) { 3992 SDNode *BVN = N->getOperand(0).getNode(); 3993 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 3994 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 3995 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 3996 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 3997 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 3998 } 3999 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4000 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4001 EVT VT = N->getValueType(0); 4002 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4003 unsigned NumElts = VT.getVectorNumElements(); 4004 MVT TruncVT = MVT::getIntegerVT(EltSize); 4005 SmallVector<SDValue, 8> Ops; 4006 for (unsigned i = 0; i != NumElts; ++i) { 4007 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4008 const APInt &CInt = C->getAPIntValue(); 4009 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4010 } 4011 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4012 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4013} 4014 4015static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4016 // Multiplications are only custom-lowered for 128-bit vectors so that 4017 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4018 EVT VT = Op.getValueType(); 4019 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4020 SDNode *N0 = Op.getOperand(0).getNode(); 4021 SDNode *N1 = Op.getOperand(1).getNode(); 4022 unsigned NewOpc = 0; 4023 if (isSignExtended(N0, DAG) && isSignExtended(N1, DAG)) 4024 NewOpc = ARMISD::VMULLs; 4025 else if (isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG)) 4026 NewOpc = ARMISD::VMULLu; 4027 else if (VT == MVT::v2i64) 4028 // Fall through to expand this. It is not legal. 4029 return SDValue(); 4030 else 4031 // Other vector multiplications are legal. 4032 return Op; 4033 4034 // Legalize to a VMULL instruction. 4035 DebugLoc DL = Op.getDebugLoc(); 4036 SDValue Op0 = SkipExtension(N0, DAG); 4037 SDValue Op1 = SkipExtension(N1, DAG); 4038 4039 assert(Op0.getValueType().is64BitVector() && 4040 Op1.getValueType().is64BitVector() && 4041 "unexpected types for extended operands to VMULL"); 4042 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4043} 4044 4045SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4046 switch (Op.getOpcode()) { 4047 default: llvm_unreachable("Don't know how to custom lower this!"); 4048 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4049 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4050 case ISD::GlobalAddress: 4051 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4052 LowerGlobalAddressELF(Op, DAG); 4053 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4054 case ISD::SELECT: return LowerSELECT(Op, DAG); 4055 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4056 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4057 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4058 case ISD::VASTART: return LowerVASTART(Op, DAG); 4059 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4060 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4061 case ISD::SINT_TO_FP: 4062 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4063 case ISD::FP_TO_SINT: 4064 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4065 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4066 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4067 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4068 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4069 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4070 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4071 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4072 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4073 Subtarget); 4074 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4075 case ISD::SHL: 4076 case ISD::SRL: 4077 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4078 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4079 case ISD::SRL_PARTS: 4080 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4081 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4082 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 4083 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4084 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4085 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4086 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4087 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4088 case ISD::MUL: return LowerMUL(Op, DAG); 4089 } 4090 return SDValue(); 4091} 4092 4093/// ReplaceNodeResults - Replace the results of node with an illegal result 4094/// type with new values built out of custom code. 4095void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4096 SmallVectorImpl<SDValue>&Results, 4097 SelectionDAG &DAG) const { 4098 SDValue Res; 4099 switch (N->getOpcode()) { 4100 default: 4101 llvm_unreachable("Don't know how to custom expand this!"); 4102 break; 4103 case ISD::BITCAST: 4104 Res = ExpandBITCAST(N, DAG); 4105 break; 4106 case ISD::SRL: 4107 case ISD::SRA: 4108 Res = Expand64BitShift(N, DAG, Subtarget); 4109 break; 4110 } 4111 if (Res.getNode()) 4112 Results.push_back(Res); 4113} 4114 4115//===----------------------------------------------------------------------===// 4116// ARM Scheduler Hooks 4117//===----------------------------------------------------------------------===// 4118 4119MachineBasicBlock * 4120ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 4121 MachineBasicBlock *BB, 4122 unsigned Size) const { 4123 unsigned dest = MI->getOperand(0).getReg(); 4124 unsigned ptr = MI->getOperand(1).getReg(); 4125 unsigned oldval = MI->getOperand(2).getReg(); 4126 unsigned newval = MI->getOperand(3).getReg(); 4127 unsigned scratch = BB->getParent()->getRegInfo() 4128 .createVirtualRegister(ARM::GPRRegisterClass); 4129 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4130 DebugLoc dl = MI->getDebugLoc(); 4131 bool isThumb2 = Subtarget->isThumb2(); 4132 4133 unsigned ldrOpc, strOpc; 4134 switch (Size) { 4135 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4136 case 1: 4137 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4138 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB; 4139 break; 4140 case 2: 4141 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4142 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4143 break; 4144 case 4: 4145 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4146 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4147 break; 4148 } 4149 4150 MachineFunction *MF = BB->getParent(); 4151 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4152 MachineFunction::iterator It = BB; 4153 ++It; // insert the new blocks after the current block 4154 4155 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4156 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4157 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4158 MF->insert(It, loop1MBB); 4159 MF->insert(It, loop2MBB); 4160 MF->insert(It, exitMBB); 4161 4162 // Transfer the remainder of BB and its successor edges to exitMBB. 4163 exitMBB->splice(exitMBB->begin(), BB, 4164 llvm::next(MachineBasicBlock::iterator(MI)), 4165 BB->end()); 4166 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4167 4168 // thisMBB: 4169 // ... 4170 // fallthrough --> loop1MBB 4171 BB->addSuccessor(loop1MBB); 4172 4173 // loop1MBB: 4174 // ldrex dest, [ptr] 4175 // cmp dest, oldval 4176 // bne exitMBB 4177 BB = loop1MBB; 4178 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4179 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4180 .addReg(dest).addReg(oldval)); 4181 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4182 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4183 BB->addSuccessor(loop2MBB); 4184 BB->addSuccessor(exitMBB); 4185 4186 // loop2MBB: 4187 // strex scratch, newval, [ptr] 4188 // cmp scratch, #0 4189 // bne loop1MBB 4190 BB = loop2MBB; 4191 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 4192 .addReg(ptr)); 4193 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4194 .addReg(scratch).addImm(0)); 4195 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4196 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4197 BB->addSuccessor(loop1MBB); 4198 BB->addSuccessor(exitMBB); 4199 4200 // exitMBB: 4201 // ... 4202 BB = exitMBB; 4203 4204 MI->eraseFromParent(); // The instruction is gone now. 4205 4206 return BB; 4207} 4208 4209MachineBasicBlock * 4210ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4211 unsigned Size, unsigned BinOpcode) const { 4212 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4213 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4214 4215 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4216 MachineFunction *MF = BB->getParent(); 4217 MachineFunction::iterator It = BB; 4218 ++It; 4219 4220 unsigned dest = MI->getOperand(0).getReg(); 4221 unsigned ptr = MI->getOperand(1).getReg(); 4222 unsigned incr = MI->getOperand(2).getReg(); 4223 DebugLoc dl = MI->getDebugLoc(); 4224 4225 bool isThumb2 = Subtarget->isThumb2(); 4226 unsigned ldrOpc, strOpc; 4227 switch (Size) { 4228 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4229 case 1: 4230 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4231 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4232 break; 4233 case 2: 4234 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4235 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4236 break; 4237 case 4: 4238 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4239 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4240 break; 4241 } 4242 4243 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4244 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4245 MF->insert(It, loopMBB); 4246 MF->insert(It, exitMBB); 4247 4248 // Transfer the remainder of BB and its successor edges to exitMBB. 4249 exitMBB->splice(exitMBB->begin(), BB, 4250 llvm::next(MachineBasicBlock::iterator(MI)), 4251 BB->end()); 4252 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4253 4254 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4255 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4256 unsigned scratch2 = (!BinOpcode) ? incr : 4257 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4258 4259 // thisMBB: 4260 // ... 4261 // fallthrough --> loopMBB 4262 BB->addSuccessor(loopMBB); 4263 4264 // loopMBB: 4265 // ldrex dest, ptr 4266 // <binop> scratch2, dest, incr 4267 // strex scratch, scratch2, ptr 4268 // cmp scratch, #0 4269 // bne- loopMBB 4270 // fallthrough --> exitMBB 4271 BB = loopMBB; 4272 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4273 if (BinOpcode) { 4274 // operand order needs to go the other way for NAND 4275 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 4276 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4277 addReg(incr).addReg(dest)).addReg(0); 4278 else 4279 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4280 addReg(dest).addReg(incr)).addReg(0); 4281 } 4282 4283 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 4284 .addReg(ptr)); 4285 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4286 .addReg(scratch).addImm(0)); 4287 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4288 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4289 4290 BB->addSuccessor(loopMBB); 4291 BB->addSuccessor(exitMBB); 4292 4293 // exitMBB: 4294 // ... 4295 BB = exitMBB; 4296 4297 MI->eraseFromParent(); // The instruction is gone now. 4298 4299 return BB; 4300} 4301 4302static 4303MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 4304 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 4305 E = MBB->succ_end(); I != E; ++I) 4306 if (*I != Succ) 4307 return *I; 4308 llvm_unreachable("Expecting a BB with two successors!"); 4309} 4310 4311MachineBasicBlock * 4312ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4313 MachineBasicBlock *BB) const { 4314 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4315 DebugLoc dl = MI->getDebugLoc(); 4316 bool isThumb2 = Subtarget->isThumb2(); 4317 switch (MI->getOpcode()) { 4318 default: 4319 MI->dump(); 4320 llvm_unreachable("Unexpected instr type to insert"); 4321 4322 case ARM::ATOMIC_LOAD_ADD_I8: 4323 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4324 case ARM::ATOMIC_LOAD_ADD_I16: 4325 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4326 case ARM::ATOMIC_LOAD_ADD_I32: 4327 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4328 4329 case ARM::ATOMIC_LOAD_AND_I8: 4330 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4331 case ARM::ATOMIC_LOAD_AND_I16: 4332 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4333 case ARM::ATOMIC_LOAD_AND_I32: 4334 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4335 4336 case ARM::ATOMIC_LOAD_OR_I8: 4337 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4338 case ARM::ATOMIC_LOAD_OR_I16: 4339 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4340 case ARM::ATOMIC_LOAD_OR_I32: 4341 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4342 4343 case ARM::ATOMIC_LOAD_XOR_I8: 4344 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4345 case ARM::ATOMIC_LOAD_XOR_I16: 4346 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4347 case ARM::ATOMIC_LOAD_XOR_I32: 4348 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4349 4350 case ARM::ATOMIC_LOAD_NAND_I8: 4351 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4352 case ARM::ATOMIC_LOAD_NAND_I16: 4353 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4354 case ARM::ATOMIC_LOAD_NAND_I32: 4355 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4356 4357 case ARM::ATOMIC_LOAD_SUB_I8: 4358 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4359 case ARM::ATOMIC_LOAD_SUB_I16: 4360 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4361 case ARM::ATOMIC_LOAD_SUB_I32: 4362 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4363 4364 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 4365 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 4366 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 4367 4368 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 4369 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 4370 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 4371 4372 case ARM::tMOVCCr_pseudo: { 4373 // To "insert" a SELECT_CC instruction, we actually have to insert the 4374 // diamond control-flow pattern. The incoming instruction knows the 4375 // destination vreg to set, the condition code register to branch on, the 4376 // true/false values to select between, and a branch opcode to use. 4377 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4378 MachineFunction::iterator It = BB; 4379 ++It; 4380 4381 // thisMBB: 4382 // ... 4383 // TrueVal = ... 4384 // cmpTY ccX, r1, r2 4385 // bCC copy1MBB 4386 // fallthrough --> copy0MBB 4387 MachineBasicBlock *thisMBB = BB; 4388 MachineFunction *F = BB->getParent(); 4389 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4390 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4391 F->insert(It, copy0MBB); 4392 F->insert(It, sinkMBB); 4393 4394 // Transfer the remainder of BB and its successor edges to sinkMBB. 4395 sinkMBB->splice(sinkMBB->begin(), BB, 4396 llvm::next(MachineBasicBlock::iterator(MI)), 4397 BB->end()); 4398 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 4399 4400 BB->addSuccessor(copy0MBB); 4401 BB->addSuccessor(sinkMBB); 4402 4403 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 4404 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 4405 4406 // copy0MBB: 4407 // %FalseValue = ... 4408 // # fallthrough to sinkMBB 4409 BB = copy0MBB; 4410 4411 // Update machine-CFG edges 4412 BB->addSuccessor(sinkMBB); 4413 4414 // sinkMBB: 4415 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4416 // ... 4417 BB = sinkMBB; 4418 BuildMI(*BB, BB->begin(), dl, 4419 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 4420 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4421 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4422 4423 MI->eraseFromParent(); // The pseudo instruction is gone now. 4424 return BB; 4425 } 4426 4427 case ARM::BCCi64: 4428 case ARM::BCCZi64: { 4429 // Compare both parts that make up the double comparison separately for 4430 // equality. 4431 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 4432 4433 unsigned LHS1 = MI->getOperand(1).getReg(); 4434 unsigned LHS2 = MI->getOperand(2).getReg(); 4435 if (RHSisZero) { 4436 AddDefaultPred(BuildMI(BB, dl, 4437 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4438 .addReg(LHS1).addImm(0)); 4439 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4440 .addReg(LHS2).addImm(0) 4441 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4442 } else { 4443 unsigned RHS1 = MI->getOperand(3).getReg(); 4444 unsigned RHS2 = MI->getOperand(4).getReg(); 4445 AddDefaultPred(BuildMI(BB, dl, 4446 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4447 .addReg(LHS1).addReg(RHS1)); 4448 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4449 .addReg(LHS2).addReg(RHS2) 4450 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4451 } 4452 4453 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 4454 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 4455 if (MI->getOperand(0).getImm() == ARMCC::NE) 4456 std::swap(destMBB, exitMBB); 4457 4458 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4459 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 4460 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 4461 .addMBB(exitMBB); 4462 4463 MI->eraseFromParent(); // The pseudo instruction is gone now. 4464 return BB; 4465 } 4466 } 4467} 4468 4469//===----------------------------------------------------------------------===// 4470// ARM Optimization Hooks 4471//===----------------------------------------------------------------------===// 4472 4473static 4474SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 4475 TargetLowering::DAGCombinerInfo &DCI) { 4476 SelectionDAG &DAG = DCI.DAG; 4477 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4478 EVT VT = N->getValueType(0); 4479 unsigned Opc = N->getOpcode(); 4480 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 4481 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 4482 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 4483 ISD::CondCode CC = ISD::SETCC_INVALID; 4484 4485 if (isSlctCC) { 4486 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 4487 } else { 4488 SDValue CCOp = Slct.getOperand(0); 4489 if (CCOp.getOpcode() == ISD::SETCC) 4490 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 4491 } 4492 4493 bool DoXform = false; 4494 bool InvCC = false; 4495 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 4496 "Bad input!"); 4497 4498 if (LHS.getOpcode() == ISD::Constant && 4499 cast<ConstantSDNode>(LHS)->isNullValue()) { 4500 DoXform = true; 4501 } else if (CC != ISD::SETCC_INVALID && 4502 RHS.getOpcode() == ISD::Constant && 4503 cast<ConstantSDNode>(RHS)->isNullValue()) { 4504 std::swap(LHS, RHS); 4505 SDValue Op0 = Slct.getOperand(0); 4506 EVT OpVT = isSlctCC ? Op0.getValueType() : 4507 Op0.getOperand(0).getValueType(); 4508 bool isInt = OpVT.isInteger(); 4509 CC = ISD::getSetCCInverse(CC, isInt); 4510 4511 if (!TLI.isCondCodeLegal(CC, OpVT)) 4512 return SDValue(); // Inverse operator isn't legal. 4513 4514 DoXform = true; 4515 InvCC = true; 4516 } 4517 4518 if (DoXform) { 4519 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 4520 if (isSlctCC) 4521 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 4522 Slct.getOperand(0), Slct.getOperand(1), CC); 4523 SDValue CCOp = Slct.getOperand(0); 4524 if (InvCC) 4525 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 4526 CCOp.getOperand(0), CCOp.getOperand(1), CC); 4527 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4528 CCOp, OtherOp, Result); 4529 } 4530 return SDValue(); 4531} 4532 4533/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 4534/// operands N0 and N1. This is a helper for PerformADDCombine that is 4535/// called with the default operands, and if that fails, with commuted 4536/// operands. 4537static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 4538 TargetLowering::DAGCombinerInfo &DCI) { 4539 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 4540 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 4541 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 4542 if (Result.getNode()) return Result; 4543 } 4544 return SDValue(); 4545} 4546 4547/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 4548/// 4549static SDValue PerformADDCombine(SDNode *N, 4550 TargetLowering::DAGCombinerInfo &DCI) { 4551 SDValue N0 = N->getOperand(0); 4552 SDValue N1 = N->getOperand(1); 4553 4554 // First try with the default operand order. 4555 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 4556 if (Result.getNode()) 4557 return Result; 4558 4559 // If that didn't work, try again with the operands commuted. 4560 return PerformADDCombineWithOperands(N, N1, N0, DCI); 4561} 4562 4563/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 4564/// 4565static SDValue PerformSUBCombine(SDNode *N, 4566 TargetLowering::DAGCombinerInfo &DCI) { 4567 SDValue N0 = N->getOperand(0); 4568 SDValue N1 = N->getOperand(1); 4569 4570 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 4571 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 4572 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 4573 if (Result.getNode()) return Result; 4574 } 4575 4576 return SDValue(); 4577} 4578 4579static SDValue PerformMULCombine(SDNode *N, 4580 TargetLowering::DAGCombinerInfo &DCI, 4581 const ARMSubtarget *Subtarget) { 4582 SelectionDAG &DAG = DCI.DAG; 4583 4584 if (Subtarget->isThumb1Only()) 4585 return SDValue(); 4586 4587 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 4588 return SDValue(); 4589 4590 EVT VT = N->getValueType(0); 4591 if (VT != MVT::i32) 4592 return SDValue(); 4593 4594 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4595 if (!C) 4596 return SDValue(); 4597 4598 uint64_t MulAmt = C->getZExtValue(); 4599 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 4600 ShiftAmt = ShiftAmt & (32 - 1); 4601 SDValue V = N->getOperand(0); 4602 DebugLoc DL = N->getDebugLoc(); 4603 4604 SDValue Res; 4605 MulAmt >>= ShiftAmt; 4606 if (isPowerOf2_32(MulAmt - 1)) { 4607 // (mul x, 2^N + 1) => (add (shl x, N), x) 4608 Res = DAG.getNode(ISD::ADD, DL, VT, 4609 V, DAG.getNode(ISD::SHL, DL, VT, 4610 V, DAG.getConstant(Log2_32(MulAmt-1), 4611 MVT::i32))); 4612 } else if (isPowerOf2_32(MulAmt + 1)) { 4613 // (mul x, 2^N - 1) => (sub (shl x, N), x) 4614 Res = DAG.getNode(ISD::SUB, DL, VT, 4615 DAG.getNode(ISD::SHL, DL, VT, 4616 V, DAG.getConstant(Log2_32(MulAmt+1), 4617 MVT::i32)), 4618 V); 4619 } else 4620 return SDValue(); 4621 4622 if (ShiftAmt != 0) 4623 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 4624 DAG.getConstant(ShiftAmt, MVT::i32)); 4625 4626 // Do not add new nodes to DAG combiner worklist. 4627 DCI.CombineTo(N, Res, false); 4628 return SDValue(); 4629} 4630 4631static SDValue PerformANDCombine(SDNode *N, 4632 TargetLowering::DAGCombinerInfo &DCI) { 4633 // Attempt to use immediate-form VBIC 4634 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4635 DebugLoc dl = N->getDebugLoc(); 4636 EVT VT = N->getValueType(0); 4637 SelectionDAG &DAG = DCI.DAG; 4638 4639 APInt SplatBits, SplatUndef; 4640 unsigned SplatBitSize; 4641 bool HasAnyUndefs; 4642 if (BVN && 4643 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4644 if (SplatBitSize <= 64) { 4645 EVT VbicVT; 4646 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 4647 SplatUndef.getZExtValue(), SplatBitSize, 4648 DAG, VbicVT, VT.is128BitVector(), 4649 OtherModImm); 4650 if (Val.getNode()) { 4651 SDValue Input = 4652 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 4653 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 4654 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 4655 } 4656 } 4657 } 4658 4659 return SDValue(); 4660} 4661 4662/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 4663static SDValue PerformORCombine(SDNode *N, 4664 TargetLowering::DAGCombinerInfo &DCI, 4665 const ARMSubtarget *Subtarget) { 4666 // Attempt to use immediate-form VORR 4667 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4668 DebugLoc dl = N->getDebugLoc(); 4669 EVT VT = N->getValueType(0); 4670 SelectionDAG &DAG = DCI.DAG; 4671 4672 APInt SplatBits, SplatUndef; 4673 unsigned SplatBitSize; 4674 bool HasAnyUndefs; 4675 if (BVN && Subtarget->hasNEON() && 4676 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4677 if (SplatBitSize <= 64) { 4678 EVT VorrVT; 4679 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4680 SplatUndef.getZExtValue(), SplatBitSize, 4681 DAG, VorrVT, VT.is128BitVector(), 4682 OtherModImm); 4683 if (Val.getNode()) { 4684 SDValue Input = 4685 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 4686 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 4687 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 4688 } 4689 } 4690 } 4691 4692 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 4693 // reasonable. 4694 4695 // BFI is only available on V6T2+ 4696 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 4697 return SDValue(); 4698 4699 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 4700 DebugLoc DL = N->getDebugLoc(); 4701 // 1) or (and A, mask), val => ARMbfi A, val, mask 4702 // iff (val & mask) == val 4703 // 4704 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4705 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 4706 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4707 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 4708 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4709 // (i.e., copy a bitfield value into another bitfield of the same width) 4710 if (N0.getOpcode() != ISD::AND) 4711 return SDValue(); 4712 4713 if (VT != MVT::i32) 4714 return SDValue(); 4715 4716 SDValue N00 = N0.getOperand(0); 4717 4718 // The value and the mask need to be constants so we can verify this is 4719 // actually a bitfield set. If the mask is 0xffff, we can do better 4720 // via a movt instruction, so don't use BFI in that case. 4721 SDValue MaskOp = N0.getOperand(1); 4722 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 4723 if (!MaskC) 4724 return SDValue(); 4725 unsigned Mask = MaskC->getZExtValue(); 4726 if (Mask == 0xffff) 4727 return SDValue(); 4728 SDValue Res; 4729 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 4730 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4731 if (N1C) { 4732 unsigned Val = N1C->getZExtValue(); 4733 if ((Val & ~Mask) != Val) 4734 return SDValue(); 4735 4736 if (ARM::isBitFieldInvertedMask(Mask)) { 4737 Val >>= CountTrailingZeros_32(~Mask); 4738 4739 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 4740 DAG.getConstant(Val, MVT::i32), 4741 DAG.getConstant(Mask, MVT::i32)); 4742 4743 // Do not add new nodes to DAG combiner worklist. 4744 DCI.CombineTo(N, Res, false); 4745 return SDValue(); 4746 } 4747 } else if (N1.getOpcode() == ISD::AND) { 4748 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4749 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4750 if (!N11C) 4751 return SDValue(); 4752 unsigned Mask2 = N11C->getZExtValue(); 4753 4754 if (ARM::isBitFieldInvertedMask(Mask) && 4755 ARM::isBitFieldInvertedMask(~Mask2) && 4756 (CountPopulation_32(Mask) == CountPopulation_32(~Mask2))) { 4757 // The pack halfword instruction works better for masks that fit it, 4758 // so use that when it's available. 4759 if (Subtarget->hasT2ExtractPack() && 4760 (Mask == 0xffff || Mask == 0xffff0000)) 4761 return SDValue(); 4762 // 2a 4763 unsigned lsb = CountTrailingZeros_32(Mask2); 4764 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 4765 DAG.getConstant(lsb, MVT::i32)); 4766 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 4767 DAG.getConstant(Mask, MVT::i32)); 4768 // Do not add new nodes to DAG combiner worklist. 4769 DCI.CombineTo(N, Res, false); 4770 return SDValue(); 4771 } else if (ARM::isBitFieldInvertedMask(~Mask) && 4772 ARM::isBitFieldInvertedMask(Mask2) && 4773 (CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) { 4774 // The pack halfword instruction works better for masks that fit it, 4775 // so use that when it's available. 4776 if (Subtarget->hasT2ExtractPack() && 4777 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 4778 return SDValue(); 4779 // 2b 4780 unsigned lsb = CountTrailingZeros_32(Mask); 4781 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 4782 DAG.getConstant(lsb, MVT::i32)); 4783 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 4784 DAG.getConstant(Mask2, MVT::i32)); 4785 // Do not add new nodes to DAG combiner worklist. 4786 DCI.CombineTo(N, Res, false); 4787 return SDValue(); 4788 } 4789 } 4790 4791 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 4792 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 4793 ARM::isBitFieldInvertedMask(~Mask)) { 4794 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 4795 // where lsb(mask) == #shamt and masked bits of B are known zero. 4796 SDValue ShAmt = N00.getOperand(1); 4797 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 4798 unsigned LSB = CountTrailingZeros_32(Mask); 4799 if (ShAmtC != LSB) 4800 return SDValue(); 4801 4802 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 4803 DAG.getConstant(~Mask, MVT::i32)); 4804 4805 // Do not add new nodes to DAG combiner worklist. 4806 DCI.CombineTo(N, Res, false); 4807 } 4808 4809 return SDValue(); 4810} 4811 4812/// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff 4813/// C1 & C2 == C1. 4814static SDValue PerformBFICombine(SDNode *N, 4815 TargetLowering::DAGCombinerInfo &DCI) { 4816 SDValue N1 = N->getOperand(1); 4817 if (N1.getOpcode() == ISD::AND) { 4818 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4819 if (!N11C) 4820 return SDValue(); 4821 unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 4822 unsigned Mask2 = N11C->getZExtValue(); 4823 if ((Mask & Mask2) == Mask2) 4824 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 4825 N->getOperand(0), N1.getOperand(0), 4826 N->getOperand(2)); 4827 } 4828 return SDValue(); 4829} 4830 4831/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 4832/// ARMISD::VMOVRRD. 4833static SDValue PerformVMOVRRDCombine(SDNode *N, 4834 TargetLowering::DAGCombinerInfo &DCI) { 4835 // vmovrrd(vmovdrr x, y) -> x,y 4836 SDValue InDouble = N->getOperand(0); 4837 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 4838 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 4839 return SDValue(); 4840} 4841 4842/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 4843/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 4844static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 4845 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 4846 SDValue Op0 = N->getOperand(0); 4847 SDValue Op1 = N->getOperand(1); 4848 if (Op0.getOpcode() == ISD::BITCAST) 4849 Op0 = Op0.getOperand(0); 4850 if (Op1.getOpcode() == ISD::BITCAST) 4851 Op1 = Op1.getOperand(0); 4852 if (Op0.getOpcode() == ARMISD::VMOVRRD && 4853 Op0.getNode() == Op1.getNode() && 4854 Op0.getResNo() == 0 && Op1.getResNo() == 1) 4855 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 4856 N->getValueType(0), Op0.getOperand(0)); 4857 return SDValue(); 4858} 4859 4860/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 4861/// ISD::BUILD_VECTOR. 4862static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG) { 4863 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 4864 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 4865 // into a pair of GPRs, which is fine when the value is used as a scalar, 4866 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 4867 if (N->getNumOperands() == 2) 4868 return PerformVMOVDRRCombine(N, DAG); 4869 4870 return SDValue(); 4871} 4872 4873/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 4874/// ISD::VECTOR_SHUFFLE. 4875static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 4876 // The LLVM shufflevector instruction does not require the shuffle mask 4877 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 4878 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 4879 // operands do not match the mask length, they are extended by concatenating 4880 // them with undef vectors. That is probably the right thing for other 4881 // targets, but for NEON it is better to concatenate two double-register 4882 // size vector operands into a single quad-register size vector. Do that 4883 // transformation here: 4884 // shuffle(concat(v1, undef), concat(v2, undef)) -> 4885 // shuffle(concat(v1, v2), undef) 4886 SDValue Op0 = N->getOperand(0); 4887 SDValue Op1 = N->getOperand(1); 4888 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 4889 Op1.getOpcode() != ISD::CONCAT_VECTORS || 4890 Op0.getNumOperands() != 2 || 4891 Op1.getNumOperands() != 2) 4892 return SDValue(); 4893 SDValue Concat0Op1 = Op0.getOperand(1); 4894 SDValue Concat1Op1 = Op1.getOperand(1); 4895 if (Concat0Op1.getOpcode() != ISD::UNDEF || 4896 Concat1Op1.getOpcode() != ISD::UNDEF) 4897 return SDValue(); 4898 // Skip the transformation if any of the types are illegal. 4899 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4900 EVT VT = N->getValueType(0); 4901 if (!TLI.isTypeLegal(VT) || 4902 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 4903 !TLI.isTypeLegal(Concat1Op1.getValueType())) 4904 return SDValue(); 4905 4906 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 4907 Op0.getOperand(0), Op1.getOperand(0)); 4908 // Translate the shuffle mask. 4909 SmallVector<int, 16> NewMask; 4910 unsigned NumElts = VT.getVectorNumElements(); 4911 unsigned HalfElts = NumElts/2; 4912 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 4913 for (unsigned n = 0; n < NumElts; ++n) { 4914 int MaskElt = SVN->getMaskElt(n); 4915 int NewElt = -1; 4916 if (MaskElt < (int)HalfElts) 4917 NewElt = MaskElt; 4918 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 4919 NewElt = HalfElts + MaskElt - NumElts; 4920 NewMask.push_back(NewElt); 4921 } 4922 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 4923 DAG.getUNDEF(VT), NewMask.data()); 4924} 4925 4926/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 4927/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 4928/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 4929/// return true. 4930static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 4931 SelectionDAG &DAG = DCI.DAG; 4932 EVT VT = N->getValueType(0); 4933 // vldN-dup instructions only support 64-bit vectors for N > 1. 4934 if (!VT.is64BitVector()) 4935 return false; 4936 4937 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 4938 SDNode *VLD = N->getOperand(0).getNode(); 4939 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 4940 return false; 4941 unsigned NumVecs = 0; 4942 unsigned NewOpc = 0; 4943 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 4944 if (IntNo == Intrinsic::arm_neon_vld2lane) { 4945 NumVecs = 2; 4946 NewOpc = ARMISD::VLD2DUP; 4947 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 4948 NumVecs = 3; 4949 NewOpc = ARMISD::VLD3DUP; 4950 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 4951 NumVecs = 4; 4952 NewOpc = ARMISD::VLD4DUP; 4953 } else { 4954 return false; 4955 } 4956 4957 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 4958 // numbers match the load. 4959 unsigned VLDLaneNo = 4960 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 4961 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 4962 UI != UE; ++UI) { 4963 // Ignore uses of the chain result. 4964 if (UI.getUse().getResNo() == NumVecs) 4965 continue; 4966 SDNode *User = *UI; 4967 if (User->getOpcode() != ARMISD::VDUPLANE || 4968 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 4969 return false; 4970 } 4971 4972 // Create the vldN-dup node. 4973 EVT Tys[5]; 4974 unsigned n; 4975 for (n = 0; n < NumVecs; ++n) 4976 Tys[n] = VT; 4977 Tys[n] = MVT::Other; 4978 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 4979 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 4980 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 4981 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 4982 Ops, 2, VLDMemInt->getMemoryVT(), 4983 VLDMemInt->getMemOperand()); 4984 4985 // Update the uses. 4986 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 4987 UI != UE; ++UI) { 4988 unsigned ResNo = UI.getUse().getResNo(); 4989 // Ignore uses of the chain result. 4990 if (ResNo == NumVecs) 4991 continue; 4992 SDNode *User = *UI; 4993 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 4994 } 4995 4996 // Now the vldN-lane intrinsic is dead except for its chain result. 4997 // Update uses of the chain. 4998 std::vector<SDValue> VLDDupResults; 4999 for (unsigned n = 0; n < NumVecs; ++n) 5000 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 5001 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 5002 DCI.CombineTo(VLD, VLDDupResults); 5003 5004 return true; 5005} 5006 5007/// PerformVDUPLANECombine - Target-specific dag combine xforms for 5008/// ARMISD::VDUPLANE. 5009static SDValue PerformVDUPLANECombine(SDNode *N, 5010 TargetLowering::DAGCombinerInfo &DCI) { 5011 SDValue Op = N->getOperand(0); 5012 5013 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 5014 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 5015 if (CombineVLDDUP(N, DCI)) 5016 return SDValue(N, 0); 5017 5018 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 5019 // redundant. Ignore bit_converts for now; element sizes are checked below. 5020 while (Op.getOpcode() == ISD::BITCAST) 5021 Op = Op.getOperand(0); 5022 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 5023 return SDValue(); 5024 5025 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 5026 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 5027 // The canonical VMOV for a zero vector uses a 32-bit element size. 5028 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5029 unsigned EltBits; 5030 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 5031 EltSize = 8; 5032 EVT VT = N->getValueType(0); 5033 if (EltSize > VT.getVectorElementType().getSizeInBits()) 5034 return SDValue(); 5035 5036 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 5037} 5038 5039/// getVShiftImm - Check if this is a valid build_vector for the immediate 5040/// operand of a vector shift operation, where all the elements of the 5041/// build_vector must have the same constant integer value. 5042static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 5043 // Ignore bit_converts. 5044 while (Op.getOpcode() == ISD::BITCAST) 5045 Op = Op.getOperand(0); 5046 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5047 APInt SplatBits, SplatUndef; 5048 unsigned SplatBitSize; 5049 bool HasAnyUndefs; 5050 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 5051 HasAnyUndefs, ElementBits) || 5052 SplatBitSize > ElementBits) 5053 return false; 5054 Cnt = SplatBits.getSExtValue(); 5055 return true; 5056} 5057 5058/// isVShiftLImm - Check if this is a valid build_vector for the immediate 5059/// operand of a vector shift left operation. That value must be in the range: 5060/// 0 <= Value < ElementBits for a left shift; or 5061/// 0 <= Value <= ElementBits for a long left shift. 5062static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 5063 assert(VT.isVector() && "vector shift count is not a vector type"); 5064 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 5065 if (! getVShiftImm(Op, ElementBits, Cnt)) 5066 return false; 5067 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 5068} 5069 5070/// isVShiftRImm - Check if this is a valid build_vector for the immediate 5071/// operand of a vector shift right operation. For a shift opcode, the value 5072/// is positive, but for an intrinsic the value count must be negative. The 5073/// absolute value must be in the range: 5074/// 1 <= |Value| <= ElementBits for a right shift; or 5075/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 5076static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 5077 int64_t &Cnt) { 5078 assert(VT.isVector() && "vector shift count is not a vector type"); 5079 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 5080 if (! getVShiftImm(Op, ElementBits, Cnt)) 5081 return false; 5082 if (isIntrinsic) 5083 Cnt = -Cnt; 5084 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 5085} 5086 5087/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 5088static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 5089 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 5090 switch (IntNo) { 5091 default: 5092 // Don't do anything for most intrinsics. 5093 break; 5094 5095 // Vector shifts: check for immediate versions and lower them. 5096 // Note: This is done during DAG combining instead of DAG legalizing because 5097 // the build_vectors for 64-bit vector element shift counts are generally 5098 // not legal, and it is hard to see their values after they get legalized to 5099 // loads from a constant pool. 5100 case Intrinsic::arm_neon_vshifts: 5101 case Intrinsic::arm_neon_vshiftu: 5102 case Intrinsic::arm_neon_vshiftls: 5103 case Intrinsic::arm_neon_vshiftlu: 5104 case Intrinsic::arm_neon_vshiftn: 5105 case Intrinsic::arm_neon_vrshifts: 5106 case Intrinsic::arm_neon_vrshiftu: 5107 case Intrinsic::arm_neon_vrshiftn: 5108 case Intrinsic::arm_neon_vqshifts: 5109 case Intrinsic::arm_neon_vqshiftu: 5110 case Intrinsic::arm_neon_vqshiftsu: 5111 case Intrinsic::arm_neon_vqshiftns: 5112 case Intrinsic::arm_neon_vqshiftnu: 5113 case Intrinsic::arm_neon_vqshiftnsu: 5114 case Intrinsic::arm_neon_vqrshiftns: 5115 case Intrinsic::arm_neon_vqrshiftnu: 5116 case Intrinsic::arm_neon_vqrshiftnsu: { 5117 EVT VT = N->getOperand(1).getValueType(); 5118 int64_t Cnt; 5119 unsigned VShiftOpc = 0; 5120 5121 switch (IntNo) { 5122 case Intrinsic::arm_neon_vshifts: 5123 case Intrinsic::arm_neon_vshiftu: 5124 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 5125 VShiftOpc = ARMISD::VSHL; 5126 break; 5127 } 5128 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 5129 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 5130 ARMISD::VSHRs : ARMISD::VSHRu); 5131 break; 5132 } 5133 return SDValue(); 5134 5135 case Intrinsic::arm_neon_vshiftls: 5136 case Intrinsic::arm_neon_vshiftlu: 5137 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 5138 break; 5139 llvm_unreachable("invalid shift count for vshll intrinsic"); 5140 5141 case Intrinsic::arm_neon_vrshifts: 5142 case Intrinsic::arm_neon_vrshiftu: 5143 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 5144 break; 5145 return SDValue(); 5146 5147 case Intrinsic::arm_neon_vqshifts: 5148 case Intrinsic::arm_neon_vqshiftu: 5149 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 5150 break; 5151 return SDValue(); 5152 5153 case Intrinsic::arm_neon_vqshiftsu: 5154 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 5155 break; 5156 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 5157 5158 case Intrinsic::arm_neon_vshiftn: 5159 case Intrinsic::arm_neon_vrshiftn: 5160 case Intrinsic::arm_neon_vqshiftns: 5161 case Intrinsic::arm_neon_vqshiftnu: 5162 case Intrinsic::arm_neon_vqshiftnsu: 5163 case Intrinsic::arm_neon_vqrshiftns: 5164 case Intrinsic::arm_neon_vqrshiftnu: 5165 case Intrinsic::arm_neon_vqrshiftnsu: 5166 // Narrowing shifts require an immediate right shift. 5167 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 5168 break; 5169 llvm_unreachable("invalid shift count for narrowing vector shift " 5170 "intrinsic"); 5171 5172 default: 5173 llvm_unreachable("unhandled vector shift"); 5174 } 5175 5176 switch (IntNo) { 5177 case Intrinsic::arm_neon_vshifts: 5178 case Intrinsic::arm_neon_vshiftu: 5179 // Opcode already set above. 5180 break; 5181 case Intrinsic::arm_neon_vshiftls: 5182 case Intrinsic::arm_neon_vshiftlu: 5183 if (Cnt == VT.getVectorElementType().getSizeInBits()) 5184 VShiftOpc = ARMISD::VSHLLi; 5185 else 5186 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 5187 ARMISD::VSHLLs : ARMISD::VSHLLu); 5188 break; 5189 case Intrinsic::arm_neon_vshiftn: 5190 VShiftOpc = ARMISD::VSHRN; break; 5191 case Intrinsic::arm_neon_vrshifts: 5192 VShiftOpc = ARMISD::VRSHRs; break; 5193 case Intrinsic::arm_neon_vrshiftu: 5194 VShiftOpc = ARMISD::VRSHRu; break; 5195 case Intrinsic::arm_neon_vrshiftn: 5196 VShiftOpc = ARMISD::VRSHRN; break; 5197 case Intrinsic::arm_neon_vqshifts: 5198 VShiftOpc = ARMISD::VQSHLs; break; 5199 case Intrinsic::arm_neon_vqshiftu: 5200 VShiftOpc = ARMISD::VQSHLu; break; 5201 case Intrinsic::arm_neon_vqshiftsu: 5202 VShiftOpc = ARMISD::VQSHLsu; break; 5203 case Intrinsic::arm_neon_vqshiftns: 5204 VShiftOpc = ARMISD::VQSHRNs; break; 5205 case Intrinsic::arm_neon_vqshiftnu: 5206 VShiftOpc = ARMISD::VQSHRNu; break; 5207 case Intrinsic::arm_neon_vqshiftnsu: 5208 VShiftOpc = ARMISD::VQSHRNsu; break; 5209 case Intrinsic::arm_neon_vqrshiftns: 5210 VShiftOpc = ARMISD::VQRSHRNs; break; 5211 case Intrinsic::arm_neon_vqrshiftnu: 5212 VShiftOpc = ARMISD::VQRSHRNu; break; 5213 case Intrinsic::arm_neon_vqrshiftnsu: 5214 VShiftOpc = ARMISD::VQRSHRNsu; break; 5215 } 5216 5217 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 5218 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 5219 } 5220 5221 case Intrinsic::arm_neon_vshiftins: { 5222 EVT VT = N->getOperand(1).getValueType(); 5223 int64_t Cnt; 5224 unsigned VShiftOpc = 0; 5225 5226 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 5227 VShiftOpc = ARMISD::VSLI; 5228 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 5229 VShiftOpc = ARMISD::VSRI; 5230 else { 5231 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 5232 } 5233 5234 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 5235 N->getOperand(1), N->getOperand(2), 5236 DAG.getConstant(Cnt, MVT::i32)); 5237 } 5238 5239 case Intrinsic::arm_neon_vqrshifts: 5240 case Intrinsic::arm_neon_vqrshiftu: 5241 // No immediate versions of these to check for. 5242 break; 5243 } 5244 5245 return SDValue(); 5246} 5247 5248/// PerformShiftCombine - Checks for immediate versions of vector shifts and 5249/// lowers them. As with the vector shift intrinsics, this is done during DAG 5250/// combining instead of DAG legalizing because the build_vectors for 64-bit 5251/// vector element shift counts are generally not legal, and it is hard to see 5252/// their values after they get legalized to loads from a constant pool. 5253static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 5254 const ARMSubtarget *ST) { 5255 EVT VT = N->getValueType(0); 5256 5257 // Nothing to be done for scalar shifts. 5258 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5259 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 5260 return SDValue(); 5261 5262 assert(ST->hasNEON() && "unexpected vector shift"); 5263 int64_t Cnt; 5264 5265 switch (N->getOpcode()) { 5266 default: llvm_unreachable("unexpected shift opcode"); 5267 5268 case ISD::SHL: 5269 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 5270 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 5271 DAG.getConstant(Cnt, MVT::i32)); 5272 break; 5273 5274 case ISD::SRA: 5275 case ISD::SRL: 5276 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 5277 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 5278 ARMISD::VSHRs : ARMISD::VSHRu); 5279 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 5280 DAG.getConstant(Cnt, MVT::i32)); 5281 } 5282 } 5283 return SDValue(); 5284} 5285 5286/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 5287/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 5288static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 5289 const ARMSubtarget *ST) { 5290 SDValue N0 = N->getOperand(0); 5291 5292 // Check for sign- and zero-extensions of vector extract operations of 8- 5293 // and 16-bit vector elements. NEON supports these directly. They are 5294 // handled during DAG combining because type legalization will promote them 5295 // to 32-bit types and it is messy to recognize the operations after that. 5296 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 5297 SDValue Vec = N0.getOperand(0); 5298 SDValue Lane = N0.getOperand(1); 5299 EVT VT = N->getValueType(0); 5300 EVT EltVT = N0.getValueType(); 5301 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5302 5303 if (VT == MVT::i32 && 5304 (EltVT == MVT::i8 || EltVT == MVT::i16) && 5305 TLI.isTypeLegal(Vec.getValueType()) && 5306 isa<ConstantSDNode>(Lane)) { 5307 5308 unsigned Opc = 0; 5309 switch (N->getOpcode()) { 5310 default: llvm_unreachable("unexpected opcode"); 5311 case ISD::SIGN_EXTEND: 5312 Opc = ARMISD::VGETLANEs; 5313 break; 5314 case ISD::ZERO_EXTEND: 5315 case ISD::ANY_EXTEND: 5316 Opc = ARMISD::VGETLANEu; 5317 break; 5318 } 5319 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 5320 } 5321 } 5322 5323 return SDValue(); 5324} 5325 5326/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 5327/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 5328static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 5329 const ARMSubtarget *ST) { 5330 // If the target supports NEON, try to use vmax/vmin instructions for f32 5331 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 5332 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 5333 // a NaN; only do the transformation when it matches that behavior. 5334 5335 // For now only do this when using NEON for FP operations; if using VFP, it 5336 // is not obvious that the benefit outweighs the cost of switching to the 5337 // NEON pipeline. 5338 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 5339 N->getValueType(0) != MVT::f32) 5340 return SDValue(); 5341 5342 SDValue CondLHS = N->getOperand(0); 5343 SDValue CondRHS = N->getOperand(1); 5344 SDValue LHS = N->getOperand(2); 5345 SDValue RHS = N->getOperand(3); 5346 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 5347 5348 unsigned Opcode = 0; 5349 bool IsReversed; 5350 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 5351 IsReversed = false; // x CC y ? x : y 5352 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 5353 IsReversed = true ; // x CC y ? y : x 5354 } else { 5355 return SDValue(); 5356 } 5357 5358 bool IsUnordered; 5359 switch (CC) { 5360 default: break; 5361 case ISD::SETOLT: 5362 case ISD::SETOLE: 5363 case ISD::SETLT: 5364 case ISD::SETLE: 5365 case ISD::SETULT: 5366 case ISD::SETULE: 5367 // If LHS is NaN, an ordered comparison will be false and the result will 5368 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 5369 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5370 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 5371 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5372 break; 5373 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 5374 // will return -0, so vmin can only be used for unsafe math or if one of 5375 // the operands is known to be nonzero. 5376 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 5377 !UnsafeFPMath && 5378 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5379 break; 5380 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 5381 break; 5382 5383 case ISD::SETOGT: 5384 case ISD::SETOGE: 5385 case ISD::SETGT: 5386 case ISD::SETGE: 5387 case ISD::SETUGT: 5388 case ISD::SETUGE: 5389 // If LHS is NaN, an ordered comparison will be false and the result will 5390 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 5391 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5392 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 5393 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5394 break; 5395 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 5396 // will return +0, so vmax can only be used for unsafe math or if one of 5397 // the operands is known to be nonzero. 5398 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 5399 !UnsafeFPMath && 5400 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5401 break; 5402 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 5403 break; 5404 } 5405 5406 if (!Opcode) 5407 return SDValue(); 5408 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 5409} 5410 5411SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 5412 DAGCombinerInfo &DCI) const { 5413 switch (N->getOpcode()) { 5414 default: break; 5415 case ISD::ADD: return PerformADDCombine(N, DCI); 5416 case ISD::SUB: return PerformSUBCombine(N, DCI); 5417 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 5418 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 5419 case ISD::AND: return PerformANDCombine(N, DCI); 5420 case ARMISD::BFI: return PerformBFICombine(N, DCI); 5421 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 5422 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 5423 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI.DAG); 5424 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 5425 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 5426 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 5427 case ISD::SHL: 5428 case ISD::SRA: 5429 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 5430 case ISD::SIGN_EXTEND: 5431 case ISD::ZERO_EXTEND: 5432 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 5433 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 5434 } 5435 return SDValue(); 5436} 5437 5438bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 5439 if (!Subtarget->allowsUnalignedMem()) 5440 return false; 5441 5442 switch (VT.getSimpleVT().SimpleTy) { 5443 default: 5444 return false; 5445 case MVT::i8: 5446 case MVT::i16: 5447 case MVT::i32: 5448 return true; 5449 // FIXME: VLD1 etc with standard alignment is legal. 5450 } 5451} 5452 5453static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 5454 if (V < 0) 5455 return false; 5456 5457 unsigned Scale = 1; 5458 switch (VT.getSimpleVT().SimpleTy) { 5459 default: return false; 5460 case MVT::i1: 5461 case MVT::i8: 5462 // Scale == 1; 5463 break; 5464 case MVT::i16: 5465 // Scale == 2; 5466 Scale = 2; 5467 break; 5468 case MVT::i32: 5469 // Scale == 4; 5470 Scale = 4; 5471 break; 5472 } 5473 5474 if ((V & (Scale - 1)) != 0) 5475 return false; 5476 V /= Scale; 5477 return V == (V & ((1LL << 5) - 1)); 5478} 5479 5480static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 5481 const ARMSubtarget *Subtarget) { 5482 bool isNeg = false; 5483 if (V < 0) { 5484 isNeg = true; 5485 V = - V; 5486 } 5487 5488 switch (VT.getSimpleVT().SimpleTy) { 5489 default: return false; 5490 case MVT::i1: 5491 case MVT::i8: 5492 case MVT::i16: 5493 case MVT::i32: 5494 // + imm12 or - imm8 5495 if (isNeg) 5496 return V == (V & ((1LL << 8) - 1)); 5497 return V == (V & ((1LL << 12) - 1)); 5498 case MVT::f32: 5499 case MVT::f64: 5500 // Same as ARM mode. FIXME: NEON? 5501 if (!Subtarget->hasVFP2()) 5502 return false; 5503 if ((V & 3) != 0) 5504 return false; 5505 V >>= 2; 5506 return V == (V & ((1LL << 8) - 1)); 5507 } 5508} 5509 5510/// isLegalAddressImmediate - Return true if the integer value can be used 5511/// as the offset of the target addressing mode for load / store of the 5512/// given type. 5513static bool isLegalAddressImmediate(int64_t V, EVT VT, 5514 const ARMSubtarget *Subtarget) { 5515 if (V == 0) 5516 return true; 5517 5518 if (!VT.isSimple()) 5519 return false; 5520 5521 if (Subtarget->isThumb1Only()) 5522 return isLegalT1AddressImmediate(V, VT); 5523 else if (Subtarget->isThumb2()) 5524 return isLegalT2AddressImmediate(V, VT, Subtarget); 5525 5526 // ARM mode. 5527 if (V < 0) 5528 V = - V; 5529 switch (VT.getSimpleVT().SimpleTy) { 5530 default: return false; 5531 case MVT::i1: 5532 case MVT::i8: 5533 case MVT::i32: 5534 // +- imm12 5535 return V == (V & ((1LL << 12) - 1)); 5536 case MVT::i16: 5537 // +- imm8 5538 return V == (V & ((1LL << 8) - 1)); 5539 case MVT::f32: 5540 case MVT::f64: 5541 if (!Subtarget->hasVFP2()) // FIXME: NEON? 5542 return false; 5543 if ((V & 3) != 0) 5544 return false; 5545 V >>= 2; 5546 return V == (V & ((1LL << 8) - 1)); 5547 } 5548} 5549 5550bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 5551 EVT VT) const { 5552 int Scale = AM.Scale; 5553 if (Scale < 0) 5554 return false; 5555 5556 switch (VT.getSimpleVT().SimpleTy) { 5557 default: return false; 5558 case MVT::i1: 5559 case MVT::i8: 5560 case MVT::i16: 5561 case MVT::i32: 5562 if (Scale == 1) 5563 return true; 5564 // r + r << imm 5565 Scale = Scale & ~1; 5566 return Scale == 2 || Scale == 4 || Scale == 8; 5567 case MVT::i64: 5568 // r + r 5569 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5570 return true; 5571 return false; 5572 case MVT::isVoid: 5573 // Note, we allow "void" uses (basically, uses that aren't loads or 5574 // stores), because arm allows folding a scale into many arithmetic 5575 // operations. This should be made more precise and revisited later. 5576 5577 // Allow r << imm, but the imm has to be a multiple of two. 5578 if (Scale & 1) return false; 5579 return isPowerOf2_32(Scale); 5580 } 5581} 5582 5583/// isLegalAddressingMode - Return true if the addressing mode represented 5584/// by AM is legal for this target, for a load/store of the specified type. 5585bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5586 const Type *Ty) const { 5587 EVT VT = getValueType(Ty, true); 5588 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 5589 return false; 5590 5591 // Can never fold addr of global into load/store. 5592 if (AM.BaseGV) 5593 return false; 5594 5595 switch (AM.Scale) { 5596 case 0: // no scale reg, must be "r+i" or "r", or "i". 5597 break; 5598 case 1: 5599 if (Subtarget->isThumb1Only()) 5600 return false; 5601 // FALL THROUGH. 5602 default: 5603 // ARM doesn't support any R+R*scale+imm addr modes. 5604 if (AM.BaseOffs) 5605 return false; 5606 5607 if (!VT.isSimple()) 5608 return false; 5609 5610 if (Subtarget->isThumb2()) 5611 return isLegalT2ScaledAddressingMode(AM, VT); 5612 5613 int Scale = AM.Scale; 5614 switch (VT.getSimpleVT().SimpleTy) { 5615 default: return false; 5616 case MVT::i1: 5617 case MVT::i8: 5618 case MVT::i32: 5619 if (Scale < 0) Scale = -Scale; 5620 if (Scale == 1) 5621 return true; 5622 // r + r << imm 5623 return isPowerOf2_32(Scale & ~1); 5624 case MVT::i16: 5625 case MVT::i64: 5626 // r + r 5627 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5628 return true; 5629 return false; 5630 5631 case MVT::isVoid: 5632 // Note, we allow "void" uses (basically, uses that aren't loads or 5633 // stores), because arm allows folding a scale into many arithmetic 5634 // operations. This should be made more precise and revisited later. 5635 5636 // Allow r << imm, but the imm has to be a multiple of two. 5637 if (Scale & 1) return false; 5638 return isPowerOf2_32(Scale); 5639 } 5640 break; 5641 } 5642 return true; 5643} 5644 5645/// isLegalICmpImmediate - Return true if the specified immediate is legal 5646/// icmp immediate, that is the target has icmp instructions which can compare 5647/// a register against the immediate without having to materialize the 5648/// immediate into a register. 5649bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 5650 if (!Subtarget->isThumb()) 5651 return ARM_AM::getSOImmVal(Imm) != -1; 5652 if (Subtarget->isThumb2()) 5653 return ARM_AM::getT2SOImmVal(Imm) != -1; 5654 return Imm >= 0 && Imm <= 255; 5655} 5656 5657static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 5658 bool isSEXTLoad, SDValue &Base, 5659 SDValue &Offset, bool &isInc, 5660 SelectionDAG &DAG) { 5661 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5662 return false; 5663 5664 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 5665 // AddressingMode 3 5666 Base = Ptr->getOperand(0); 5667 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5668 int RHSC = (int)RHS->getZExtValue(); 5669 if (RHSC < 0 && RHSC > -256) { 5670 assert(Ptr->getOpcode() == ISD::ADD); 5671 isInc = false; 5672 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5673 return true; 5674 } 5675 } 5676 isInc = (Ptr->getOpcode() == ISD::ADD); 5677 Offset = Ptr->getOperand(1); 5678 return true; 5679 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 5680 // AddressingMode 2 5681 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5682 int RHSC = (int)RHS->getZExtValue(); 5683 if (RHSC < 0 && RHSC > -0x1000) { 5684 assert(Ptr->getOpcode() == ISD::ADD); 5685 isInc = false; 5686 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5687 Base = Ptr->getOperand(0); 5688 return true; 5689 } 5690 } 5691 5692 if (Ptr->getOpcode() == ISD::ADD) { 5693 isInc = true; 5694 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 5695 if (ShOpcVal != ARM_AM::no_shift) { 5696 Base = Ptr->getOperand(1); 5697 Offset = Ptr->getOperand(0); 5698 } else { 5699 Base = Ptr->getOperand(0); 5700 Offset = Ptr->getOperand(1); 5701 } 5702 return true; 5703 } 5704 5705 isInc = (Ptr->getOpcode() == ISD::ADD); 5706 Base = Ptr->getOperand(0); 5707 Offset = Ptr->getOperand(1); 5708 return true; 5709 } 5710 5711 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 5712 return false; 5713} 5714 5715static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 5716 bool isSEXTLoad, SDValue &Base, 5717 SDValue &Offset, bool &isInc, 5718 SelectionDAG &DAG) { 5719 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5720 return false; 5721 5722 Base = Ptr->getOperand(0); 5723 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5724 int RHSC = (int)RHS->getZExtValue(); 5725 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 5726 assert(Ptr->getOpcode() == ISD::ADD); 5727 isInc = false; 5728 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5729 return true; 5730 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 5731 isInc = Ptr->getOpcode() == ISD::ADD; 5732 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 5733 return true; 5734 } 5735 } 5736 5737 return false; 5738} 5739 5740/// getPreIndexedAddressParts - returns true by value, base pointer and 5741/// offset pointer and addressing mode by reference if the node's address 5742/// can be legally represented as pre-indexed load / store address. 5743bool 5744ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 5745 SDValue &Offset, 5746 ISD::MemIndexedMode &AM, 5747 SelectionDAG &DAG) const { 5748 if (Subtarget->isThumb1Only()) 5749 return false; 5750 5751 EVT VT; 5752 SDValue Ptr; 5753 bool isSEXTLoad = false; 5754 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5755 Ptr = LD->getBasePtr(); 5756 VT = LD->getMemoryVT(); 5757 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5758 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5759 Ptr = ST->getBasePtr(); 5760 VT = ST->getMemoryVT(); 5761 } else 5762 return false; 5763 5764 bool isInc; 5765 bool isLegal = false; 5766 if (Subtarget->isThumb2()) 5767 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5768 Offset, isInc, DAG); 5769 else 5770 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5771 Offset, isInc, DAG); 5772 if (!isLegal) 5773 return false; 5774 5775 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 5776 return true; 5777} 5778 5779/// getPostIndexedAddressParts - returns true by value, base pointer and 5780/// offset pointer and addressing mode by reference if this node can be 5781/// combined with a load / store to form a post-indexed load / store. 5782bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 5783 SDValue &Base, 5784 SDValue &Offset, 5785 ISD::MemIndexedMode &AM, 5786 SelectionDAG &DAG) const { 5787 if (Subtarget->isThumb1Only()) 5788 return false; 5789 5790 EVT VT; 5791 SDValue Ptr; 5792 bool isSEXTLoad = false; 5793 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5794 VT = LD->getMemoryVT(); 5795 Ptr = LD->getBasePtr(); 5796 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5797 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5798 VT = ST->getMemoryVT(); 5799 Ptr = ST->getBasePtr(); 5800 } else 5801 return false; 5802 5803 bool isInc; 5804 bool isLegal = false; 5805 if (Subtarget->isThumb2()) 5806 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5807 isInc, DAG); 5808 else 5809 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5810 isInc, DAG); 5811 if (!isLegal) 5812 return false; 5813 5814 if (Ptr != Base) { 5815 // Swap base ptr and offset to catch more post-index load / store when 5816 // it's legal. In Thumb2 mode, offset must be an immediate. 5817 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 5818 !Subtarget->isThumb2()) 5819 std::swap(Base, Offset); 5820 5821 // Post-indexed load / store update the base pointer. 5822 if (Ptr != Base) 5823 return false; 5824 } 5825 5826 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 5827 return true; 5828} 5829 5830void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5831 const APInt &Mask, 5832 APInt &KnownZero, 5833 APInt &KnownOne, 5834 const SelectionDAG &DAG, 5835 unsigned Depth) const { 5836 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5837 switch (Op.getOpcode()) { 5838 default: break; 5839 case ARMISD::CMOV: { 5840 // Bits are known zero/one if known on the LHS and RHS. 5841 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 5842 if (KnownZero == 0 && KnownOne == 0) return; 5843 5844 APInt KnownZeroRHS, KnownOneRHS; 5845 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 5846 KnownZeroRHS, KnownOneRHS, Depth+1); 5847 KnownZero &= KnownZeroRHS; 5848 KnownOne &= KnownOneRHS; 5849 return; 5850 } 5851 } 5852} 5853 5854//===----------------------------------------------------------------------===// 5855// ARM Inline Assembly Support 5856//===----------------------------------------------------------------------===// 5857 5858/// getConstraintType - Given a constraint letter, return the type of 5859/// constraint it is for this target. 5860ARMTargetLowering::ConstraintType 5861ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 5862 if (Constraint.size() == 1) { 5863 switch (Constraint[0]) { 5864 default: break; 5865 case 'l': return C_RegisterClass; 5866 case 'w': return C_RegisterClass; 5867 } 5868 } 5869 return TargetLowering::getConstraintType(Constraint); 5870} 5871 5872/// Examine constraint type and operand type and determine a weight value. 5873/// This object must already have been set up with the operand type 5874/// and the current alternative constraint selected. 5875TargetLowering::ConstraintWeight 5876ARMTargetLowering::getSingleConstraintMatchWeight( 5877 AsmOperandInfo &info, const char *constraint) const { 5878 ConstraintWeight weight = CW_Invalid; 5879 Value *CallOperandVal = info.CallOperandVal; 5880 // If we don't have a value, we can't do a match, 5881 // but allow it at the lowest weight. 5882 if (CallOperandVal == NULL) 5883 return CW_Default; 5884 const Type *type = CallOperandVal->getType(); 5885 // Look at the constraint type. 5886 switch (*constraint) { 5887 default: 5888 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 5889 break; 5890 case 'l': 5891 if (type->isIntegerTy()) { 5892 if (Subtarget->isThumb()) 5893 weight = CW_SpecificReg; 5894 else 5895 weight = CW_Register; 5896 } 5897 break; 5898 case 'w': 5899 if (type->isFloatingPointTy()) 5900 weight = CW_Register; 5901 break; 5902 } 5903 return weight; 5904} 5905 5906std::pair<unsigned, const TargetRegisterClass*> 5907ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5908 EVT VT) const { 5909 if (Constraint.size() == 1) { 5910 // GCC ARM Constraint Letters 5911 switch (Constraint[0]) { 5912 case 'l': 5913 if (Subtarget->isThumb()) 5914 return std::make_pair(0U, ARM::tGPRRegisterClass); 5915 else 5916 return std::make_pair(0U, ARM::GPRRegisterClass); 5917 case 'r': 5918 return std::make_pair(0U, ARM::GPRRegisterClass); 5919 case 'w': 5920 if (VT == MVT::f32) 5921 return std::make_pair(0U, ARM::SPRRegisterClass); 5922 if (VT.getSizeInBits() == 64) 5923 return std::make_pair(0U, ARM::DPRRegisterClass); 5924 if (VT.getSizeInBits() == 128) 5925 return std::make_pair(0U, ARM::QPRRegisterClass); 5926 break; 5927 } 5928 } 5929 if (StringRef("{cc}").equals_lower(Constraint)) 5930 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 5931 5932 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5933} 5934 5935std::vector<unsigned> ARMTargetLowering:: 5936getRegClassForInlineAsmConstraint(const std::string &Constraint, 5937 EVT VT) const { 5938 if (Constraint.size() != 1) 5939 return std::vector<unsigned>(); 5940 5941 switch (Constraint[0]) { // GCC ARM Constraint Letters 5942 default: break; 5943 case 'l': 5944 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5945 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5946 0); 5947 case 'r': 5948 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5949 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5950 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 5951 ARM::R12, ARM::LR, 0); 5952 case 'w': 5953 if (VT == MVT::f32) 5954 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 5955 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 5956 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 5957 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 5958 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 5959 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 5960 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 5961 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 5962 if (VT.getSizeInBits() == 64) 5963 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 5964 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 5965 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 5966 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 5967 if (VT.getSizeInBits() == 128) 5968 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 5969 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 5970 break; 5971 } 5972 5973 return std::vector<unsigned>(); 5974} 5975 5976/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5977/// vector. If it is invalid, don't add anything to Ops. 5978void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5979 char Constraint, 5980 std::vector<SDValue>&Ops, 5981 SelectionDAG &DAG) const { 5982 SDValue Result(0, 0); 5983 5984 switch (Constraint) { 5985 default: break; 5986 case 'I': case 'J': case 'K': case 'L': 5987 case 'M': case 'N': case 'O': 5988 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 5989 if (!C) 5990 return; 5991 5992 int64_t CVal64 = C->getSExtValue(); 5993 int CVal = (int) CVal64; 5994 // None of these constraints allow values larger than 32 bits. Check 5995 // that the value fits in an int. 5996 if (CVal != CVal64) 5997 return; 5998 5999 switch (Constraint) { 6000 case 'I': 6001 if (Subtarget->isThumb1Only()) { 6002 // This must be a constant between 0 and 255, for ADD 6003 // immediates. 6004 if (CVal >= 0 && CVal <= 255) 6005 break; 6006 } else if (Subtarget->isThumb2()) { 6007 // A constant that can be used as an immediate value in a 6008 // data-processing instruction. 6009 if (ARM_AM::getT2SOImmVal(CVal) != -1) 6010 break; 6011 } else { 6012 // A constant that can be used as an immediate value in a 6013 // data-processing instruction. 6014 if (ARM_AM::getSOImmVal(CVal) != -1) 6015 break; 6016 } 6017 return; 6018 6019 case 'J': 6020 if (Subtarget->isThumb()) { // FIXME thumb2 6021 // This must be a constant between -255 and -1, for negated ADD 6022 // immediates. This can be used in GCC with an "n" modifier that 6023 // prints the negated value, for use with SUB instructions. It is 6024 // not useful otherwise but is implemented for compatibility. 6025 if (CVal >= -255 && CVal <= -1) 6026 break; 6027 } else { 6028 // This must be a constant between -4095 and 4095. It is not clear 6029 // what this constraint is intended for. Implemented for 6030 // compatibility with GCC. 6031 if (CVal >= -4095 && CVal <= 4095) 6032 break; 6033 } 6034 return; 6035 6036 case 'K': 6037 if (Subtarget->isThumb1Only()) { 6038 // A 32-bit value where only one byte has a nonzero value. Exclude 6039 // zero to match GCC. This constraint is used by GCC internally for 6040 // constants that can be loaded with a move/shift combination. 6041 // It is not useful otherwise but is implemented for compatibility. 6042 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 6043 break; 6044 } else if (Subtarget->isThumb2()) { 6045 // A constant whose bitwise inverse can be used as an immediate 6046 // value in a data-processing instruction. This can be used in GCC 6047 // with a "B" modifier that prints the inverted value, for use with 6048 // BIC and MVN instructions. It is not useful otherwise but is 6049 // implemented for compatibility. 6050 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 6051 break; 6052 } else { 6053 // A constant whose bitwise inverse can be used as an immediate 6054 // value in a data-processing instruction. This can be used in GCC 6055 // with a "B" modifier that prints the inverted value, for use with 6056 // BIC and MVN instructions. It is not useful otherwise but is 6057 // implemented for compatibility. 6058 if (ARM_AM::getSOImmVal(~CVal) != -1) 6059 break; 6060 } 6061 return; 6062 6063 case 'L': 6064 if (Subtarget->isThumb1Only()) { 6065 // This must be a constant between -7 and 7, 6066 // for 3-operand ADD/SUB immediate instructions. 6067 if (CVal >= -7 && CVal < 7) 6068 break; 6069 } else if (Subtarget->isThumb2()) { 6070 // A constant whose negation can be used as an immediate value in a 6071 // data-processing instruction. This can be used in GCC with an "n" 6072 // modifier that prints the negated value, for use with SUB 6073 // instructions. It is not useful otherwise but is implemented for 6074 // compatibility. 6075 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 6076 break; 6077 } else { 6078 // A constant whose negation can be used as an immediate value in a 6079 // data-processing instruction. This can be used in GCC with an "n" 6080 // modifier that prints the negated value, for use with SUB 6081 // instructions. It is not useful otherwise but is implemented for 6082 // compatibility. 6083 if (ARM_AM::getSOImmVal(-CVal) != -1) 6084 break; 6085 } 6086 return; 6087 6088 case 'M': 6089 if (Subtarget->isThumb()) { // FIXME thumb2 6090 // This must be a multiple of 4 between 0 and 1020, for 6091 // ADD sp + immediate. 6092 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 6093 break; 6094 } else { 6095 // A power of two or a constant between 0 and 32. This is used in 6096 // GCC for the shift amount on shifted register operands, but it is 6097 // useful in general for any shift amounts. 6098 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 6099 break; 6100 } 6101 return; 6102 6103 case 'N': 6104 if (Subtarget->isThumb()) { // FIXME thumb2 6105 // This must be a constant between 0 and 31, for shift amounts. 6106 if (CVal >= 0 && CVal <= 31) 6107 break; 6108 } 6109 return; 6110 6111 case 'O': 6112 if (Subtarget->isThumb()) { // FIXME thumb2 6113 // This must be a multiple of 4 between -508 and 508, for 6114 // ADD/SUB sp = sp + immediate. 6115 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 6116 break; 6117 } 6118 return; 6119 } 6120 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 6121 break; 6122 } 6123 6124 if (Result.getNode()) { 6125 Ops.push_back(Result); 6126 return; 6127 } 6128 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6129} 6130 6131bool 6132ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 6133 // The ARM target isn't yet aware of offsets. 6134 return false; 6135} 6136 6137int ARM::getVFPf32Imm(const APFloat &FPImm) { 6138 APInt Imm = FPImm.bitcastToAPInt(); 6139 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 6140 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 6141 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 6142 6143 // We can handle 4 bits of mantissa. 6144 // mantissa = (16+UInt(e:f:g:h))/16. 6145 if (Mantissa & 0x7ffff) 6146 return -1; 6147 Mantissa >>= 19; 6148 if ((Mantissa & 0xf) != Mantissa) 6149 return -1; 6150 6151 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 6152 if (Exp < -3 || Exp > 4) 6153 return -1; 6154 Exp = ((Exp+3) & 0x7) ^ 4; 6155 6156 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 6157} 6158 6159int ARM::getVFPf64Imm(const APFloat &FPImm) { 6160 APInt Imm = FPImm.bitcastToAPInt(); 6161 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 6162 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 6163 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 6164 6165 // We can handle 4 bits of mantissa. 6166 // mantissa = (16+UInt(e:f:g:h))/16. 6167 if (Mantissa & 0xffffffffffffLL) 6168 return -1; 6169 Mantissa >>= 48; 6170 if ((Mantissa & 0xf) != Mantissa) 6171 return -1; 6172 6173 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 6174 if (Exp < -3 || Exp > 4) 6175 return -1; 6176 Exp = ((Exp+3) & 0x7) ^ 4; 6177 6178 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 6179} 6180 6181bool ARM::isBitFieldInvertedMask(unsigned v) { 6182 if (v == 0xffffffff) 6183 return 0; 6184 // there can be 1's on either or both "outsides", all the "inside" 6185 // bits must be 0's 6186 unsigned int lsb = 0, msb = 31; 6187 while (v & (1 << msb)) --msb; 6188 while (v & (1 << lsb)) ++lsb; 6189 for (unsigned int i = lsb; i <= msb; ++i) { 6190 if (v & (1 << i)) 6191 return 0; 6192 } 6193 return 1; 6194} 6195 6196/// isFPImmLegal - Returns true if the target can instruction select the 6197/// specified FP immediate natively. If false, the legalizer will 6198/// materialize the FP immediate as a load from a constant pool. 6199bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 6200 if (!Subtarget->hasVFP3()) 6201 return false; 6202 if (VT == MVT::f32) 6203 return ARM::getVFPf32Imm(Imm) != -1; 6204 if (VT == MVT::f64) 6205 return ARM::getVFPf64Imm(Imm) != -1; 6206 return false; 6207} 6208 6209/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 6210/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 6211/// specified in the intrinsic calls. 6212bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 6213 const CallInst &I, 6214 unsigned Intrinsic) const { 6215 switch (Intrinsic) { 6216 case Intrinsic::arm_neon_vld1: 6217 case Intrinsic::arm_neon_vld2: 6218 case Intrinsic::arm_neon_vld3: 6219 case Intrinsic::arm_neon_vld4: 6220 case Intrinsic::arm_neon_vld2lane: 6221 case Intrinsic::arm_neon_vld3lane: 6222 case Intrinsic::arm_neon_vld4lane: { 6223 Info.opc = ISD::INTRINSIC_W_CHAIN; 6224 // Conservatively set memVT to the entire set of vectors loaded. 6225 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 6226 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 6227 Info.ptrVal = I.getArgOperand(0); 6228 Info.offset = 0; 6229 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 6230 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 6231 Info.vol = false; // volatile loads with NEON intrinsics not supported 6232 Info.readMem = true; 6233 Info.writeMem = false; 6234 return true; 6235 } 6236 case Intrinsic::arm_neon_vst1: 6237 case Intrinsic::arm_neon_vst2: 6238 case Intrinsic::arm_neon_vst3: 6239 case Intrinsic::arm_neon_vst4: 6240 case Intrinsic::arm_neon_vst2lane: 6241 case Intrinsic::arm_neon_vst3lane: 6242 case Intrinsic::arm_neon_vst4lane: { 6243 Info.opc = ISD::INTRINSIC_VOID; 6244 // Conservatively set memVT to the entire set of vectors stored. 6245 unsigned NumElts = 0; 6246 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 6247 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 6248 if (!ArgTy->isVectorTy()) 6249 break; 6250 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 6251 } 6252 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 6253 Info.ptrVal = I.getArgOperand(0); 6254 Info.offset = 0; 6255 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 6256 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 6257 Info.vol = false; // volatile stores with NEON intrinsics not supported 6258 Info.readMem = false; 6259 Info.writeMem = true; 6260 return true; 6261 } 6262 default: 6263 break; 6264 } 6265 6266 return false; 6267} 6268