ARMISelLowering.cpp revision fc8475bde993cc0fa6101427e73e8a9cf7d1c3a4
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/PseudoSourceValue.h" 43#include "llvm/CodeGen/SelectionDAG.h" 44#include "llvm/MC/MCSectionMachO.h" 45#include "llvm/Target/TargetOptions.h" 46#include "llvm/ADT/VectorExtras.h" 47#include "llvm/ADT/StringExtras.h" 48#include "llvm/ADT/Statistic.h" 49#include "llvm/Support/CommandLine.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53#include <sstream> 54using namespace llvm; 55 56STATISTIC(NumTailCalls, "Number of tail calls"); 57STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 58 59// This option should go away when tail calls fully work. 60static cl::opt<bool> 61EnableARMTailCalls("arm-tail-calls", cl::Hidden, 62 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 63 cl::init(false)); 64 65cl::opt<bool> 66EnableARMLongCalls("arm-long-calls", cl::Hidden, 67 cl::desc("Generate calls via indirect call instructions"), 68 cl::init(false)); 69 70static cl::opt<bool> 71ARMInterworking("arm-interworking", cl::Hidden, 72 cl::desc("Enable / disable ARM interworking (for debugging only)"), 73 cl::init(true)); 74 75void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 76 EVT PromotedBitwiseVT) { 77 if (VT != PromotedLdStVT) { 78 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 79 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 80 PromotedLdStVT.getSimpleVT()); 81 82 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 83 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 84 PromotedLdStVT.getSimpleVT()); 85 } 86 87 EVT ElemTy = VT.getVectorElementType(); 88 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 89 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 90 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 91 if (ElemTy != MVT::i32) { 92 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 93 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 94 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 95 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 96 } 97 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 98 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 99 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 100 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 101 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 102 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 103 if (VT.isInteger()) { 104 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 105 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 106 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 107 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 108 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 109 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 110 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 111 setTruncStoreAction(VT.getSimpleVT(), 112 (MVT::SimpleValueType)InnerVT, Expand); 113 } 114 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 115 116 // Promote all bit-wise operations. 117 if (VT.isInteger() && VT != PromotedBitwiseVT) { 118 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 119 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 120 PromotedBitwiseVT.getSimpleVT()); 121 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 122 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 123 PromotedBitwiseVT.getSimpleVT()); 124 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 125 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 126 PromotedBitwiseVT.getSimpleVT()); 127 } 128 129 // Neon does not support vector divide/remainder operations. 130 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 131 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 132 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 133 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 134 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 135 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 136} 137 138void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 139 addRegisterClass(VT, ARM::DPRRegisterClass); 140 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 141} 142 143void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 144 addRegisterClass(VT, ARM::QPRRegisterClass); 145 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 146} 147 148static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 149 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 150 return new TargetLoweringObjectFileMachO(); 151 152 return new ARMElfTargetObjectFile(); 153} 154 155ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 156 : TargetLowering(TM, createTLOF(TM)) { 157 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 158 RegInfo = TM.getRegisterInfo(); 159 Itins = TM.getInstrItineraryData(); 160 161 if (Subtarget->isTargetDarwin()) { 162 // Uses VFP for Thumb libfuncs if available. 163 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 164 // Single-precision floating-point arithmetic. 165 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 166 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 167 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 168 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 169 170 // Double-precision floating-point arithmetic. 171 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 172 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 173 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 174 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 175 176 // Single-precision comparisons. 177 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 178 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 179 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 180 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 181 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 182 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 183 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 184 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 185 186 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 187 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 188 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 189 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 190 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 191 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 192 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 193 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 194 195 // Double-precision comparisons. 196 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 197 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 198 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 199 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 200 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 201 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 202 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 203 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 204 205 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 210 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 213 214 // Floating-point to integer conversions. 215 // i64 conversions are done via library routines even when generating VFP 216 // instructions, so use the same ones. 217 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 218 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 219 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 220 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 221 222 // Conversions between floating types. 223 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 224 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 225 226 // Integer to floating-point conversions. 227 // i64 conversions are done via library routines even when generating VFP 228 // instructions, so use the same ones. 229 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 230 // e.g., __floatunsidf vs. __floatunssidfvfp. 231 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 232 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 233 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 234 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 235 } 236 } 237 238 // These libcalls are not available in 32-bit. 239 setLibcallName(RTLIB::SHL_I128, 0); 240 setLibcallName(RTLIB::SRL_I128, 0); 241 setLibcallName(RTLIB::SRA_I128, 0); 242 243 if (Subtarget->isAAPCS_ABI()) { 244 // Double-precision floating-point arithmetic helper functions 245 // RTABI chapter 4.1.2, Table 2 246 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 247 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 248 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 249 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 250 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 251 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 252 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 253 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 254 255 // Double-precision floating-point comparison helper functions 256 // RTABI chapter 4.1.2, Table 3 257 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 258 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 259 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 260 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 261 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 262 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 263 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 264 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 265 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 266 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 267 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 268 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 269 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 270 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 271 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 272 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 273 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 277 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 278 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 279 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 280 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 281 282 // Single-precision floating-point arithmetic helper functions 283 // RTABI chapter 4.1.2, Table 4 284 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 285 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 286 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 287 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 288 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 289 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 290 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 291 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 292 293 // Single-precision floating-point comparison helper functions 294 // RTABI chapter 4.1.2, Table 5 295 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 296 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 297 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 298 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 299 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 300 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 301 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 302 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 303 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 304 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 305 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 306 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 307 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 308 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 309 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 310 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 311 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 315 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 316 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 317 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 318 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 319 320 // Floating-point to integer conversions. 321 // RTABI chapter 4.1.2, Table 6 322 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 323 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 324 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 325 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 326 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 327 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 328 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 329 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 330 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 335 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 338 339 // Conversions between floating types. 340 // RTABI chapter 4.1.2, Table 7 341 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 342 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 343 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 344 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 345 346 // Integer to floating-point conversions. 347 // RTABI chapter 4.1.2, Table 8 348 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 349 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 350 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 351 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 352 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 353 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 354 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 355 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 356 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 361 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 362 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 363 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 364 365 // Long long helper functions 366 // RTABI chapter 4.2, Table 9 367 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 368 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 369 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 370 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 371 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 372 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 373 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 376 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 377 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 378 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 379 380 // Integer division functions 381 // RTABI chapter 4.3.1 382 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 383 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 384 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 385 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 386 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 387 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 388 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 391 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 392 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 393 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 394 } 395 396 if (Subtarget->isThumb1Only()) 397 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 398 else 399 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 400 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 401 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 402 if (!Subtarget->isFPOnlySP()) 403 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 404 405 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 406 } 407 408 if (Subtarget->hasNEON()) { 409 addDRTypeForNEON(MVT::v2f32); 410 addDRTypeForNEON(MVT::v8i8); 411 addDRTypeForNEON(MVT::v4i16); 412 addDRTypeForNEON(MVT::v2i32); 413 addDRTypeForNEON(MVT::v1i64); 414 415 addQRTypeForNEON(MVT::v4f32); 416 addQRTypeForNEON(MVT::v2f64); 417 addQRTypeForNEON(MVT::v16i8); 418 addQRTypeForNEON(MVT::v8i16); 419 addQRTypeForNEON(MVT::v4i32); 420 addQRTypeForNEON(MVT::v2i64); 421 422 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 423 // neither Neon nor VFP support any arithmetic operations on it. 424 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 425 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 426 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 427 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 428 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 429 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 430 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 431 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 432 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 433 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 434 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 435 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 436 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 437 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 438 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 439 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 440 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 441 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 442 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 443 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 444 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 445 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 446 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 447 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 448 449 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 450 451 // Neon does not support some operations on v1i64 and v2i64 types. 452 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 453 // Custom handling for some quad-vector types to detect VMULL. 454 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 455 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 456 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 457 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 458 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 459 460 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 461 setTargetDAGCombine(ISD::SHL); 462 setTargetDAGCombine(ISD::SRL); 463 setTargetDAGCombine(ISD::SRA); 464 setTargetDAGCombine(ISD::SIGN_EXTEND); 465 setTargetDAGCombine(ISD::ZERO_EXTEND); 466 setTargetDAGCombine(ISD::ANY_EXTEND); 467 setTargetDAGCombine(ISD::SELECT_CC); 468 setTargetDAGCombine(ISD::BUILD_VECTOR); 469 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 470 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 471 setTargetDAGCombine(ISD::STORE); 472 } 473 474 computeRegisterProperties(); 475 476 // ARM does not have f32 extending load. 477 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 478 479 // ARM does not have i1 sign extending load. 480 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 481 482 // ARM supports all 4 flavors of integer indexed load / store. 483 if (!Subtarget->isThumb1Only()) { 484 for (unsigned im = (unsigned)ISD::PRE_INC; 485 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 486 setIndexedLoadAction(im, MVT::i1, Legal); 487 setIndexedLoadAction(im, MVT::i8, Legal); 488 setIndexedLoadAction(im, MVT::i16, Legal); 489 setIndexedLoadAction(im, MVT::i32, Legal); 490 setIndexedStoreAction(im, MVT::i1, Legal); 491 setIndexedStoreAction(im, MVT::i8, Legal); 492 setIndexedStoreAction(im, MVT::i16, Legal); 493 setIndexedStoreAction(im, MVT::i32, Legal); 494 } 495 } 496 497 // i64 operation support. 498 if (Subtarget->isThumb1Only()) { 499 setOperationAction(ISD::MUL, MVT::i64, Expand); 500 setOperationAction(ISD::MULHU, MVT::i32, Expand); 501 setOperationAction(ISD::MULHS, MVT::i32, Expand); 502 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 503 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 504 } else { 505 setOperationAction(ISD::MUL, MVT::i64, Expand); 506 setOperationAction(ISD::MULHU, MVT::i32, Expand); 507 if (!Subtarget->hasV6Ops()) 508 setOperationAction(ISD::MULHS, MVT::i32, Expand); 509 } 510 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 511 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 512 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 513 setOperationAction(ISD::SRL, MVT::i64, Custom); 514 setOperationAction(ISD::SRA, MVT::i64, Custom); 515 516 // ARM does not have ROTL. 517 setOperationAction(ISD::ROTL, MVT::i32, Expand); 518 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 519 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 520 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 521 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 522 523 // Only ARMv6 has BSWAP. 524 if (!Subtarget->hasV6Ops()) 525 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 526 527 // These are expanded into libcalls. 528 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 529 // v7M has a hardware divider 530 setOperationAction(ISD::SDIV, MVT::i32, Expand); 531 setOperationAction(ISD::UDIV, MVT::i32, Expand); 532 } 533 setOperationAction(ISD::SREM, MVT::i32, Expand); 534 setOperationAction(ISD::UREM, MVT::i32, Expand); 535 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 536 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 537 538 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 539 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 540 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 541 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 542 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 543 544 setOperationAction(ISD::TRAP, MVT::Other, Legal); 545 546 // Use the default implementation. 547 setOperationAction(ISD::VASTART, MVT::Other, Custom); 548 setOperationAction(ISD::VAARG, MVT::Other, Expand); 549 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 550 setOperationAction(ISD::VAEND, MVT::Other, Expand); 551 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 552 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 553 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 554 // FIXME: Shouldn't need this, since no register is used, but the legalizer 555 // doesn't yet know how to not do that for SjLj. 556 setExceptionSelectorRegister(ARM::R0); 557 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 558 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 559 // the default expansion. 560 if (Subtarget->hasDataBarrier() || 561 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 562 // membarrier needs custom lowering; the rest are legal and handled 563 // normally. 564 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 565 } else { 566 // Set them all for expansion, which will force libcalls. 567 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 568 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 569 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 570 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 571 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 572 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 573 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 574 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 575 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 576 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 577 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 578 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 579 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 580 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 581 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 582 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 583 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 584 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 585 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 586 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 587 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 588 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 589 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 590 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 591 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 592 // Since the libcalls include locking, fold in the fences 593 setShouldFoldAtomicFences(true); 594 } 595 // 64-bit versions are always libcalls (for now) 596 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 597 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 598 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 599 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 600 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 601 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 602 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 603 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 604 605 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 606 607 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 608 if (!Subtarget->hasV6Ops()) { 609 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 610 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 611 } 612 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 613 614 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 615 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 616 // iff target supports vfp2. 617 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 618 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 619 } 620 621 // We want to custom lower some of our intrinsics. 622 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 623 if (Subtarget->isTargetDarwin()) { 624 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 625 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 626 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 627 } 628 629 setOperationAction(ISD::SETCC, MVT::i32, Expand); 630 setOperationAction(ISD::SETCC, MVT::f32, Expand); 631 setOperationAction(ISD::SETCC, MVT::f64, Expand); 632 setOperationAction(ISD::SELECT, MVT::i32, Custom); 633 setOperationAction(ISD::SELECT, MVT::f32, Custom); 634 setOperationAction(ISD::SELECT, MVT::f64, Custom); 635 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 636 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 637 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 638 639 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 640 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 641 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 642 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 643 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 644 645 // We don't support sin/cos/fmod/copysign/pow 646 setOperationAction(ISD::FSIN, MVT::f64, Expand); 647 setOperationAction(ISD::FSIN, MVT::f32, Expand); 648 setOperationAction(ISD::FCOS, MVT::f32, Expand); 649 setOperationAction(ISD::FCOS, MVT::f64, Expand); 650 setOperationAction(ISD::FREM, MVT::f64, Expand); 651 setOperationAction(ISD::FREM, MVT::f32, Expand); 652 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 653 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 654 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 655 } 656 setOperationAction(ISD::FPOW, MVT::f64, Expand); 657 setOperationAction(ISD::FPOW, MVT::f32, Expand); 658 659 // Various VFP goodness 660 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 661 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 662 if (Subtarget->hasVFP2()) { 663 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 664 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 665 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 666 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 667 } 668 // Special handling for half-precision FP. 669 if (!Subtarget->hasFP16()) { 670 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 671 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 672 } 673 } 674 675 // We have target-specific dag combine patterns for the following nodes: 676 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 677 setTargetDAGCombine(ISD::ADD); 678 setTargetDAGCombine(ISD::SUB); 679 setTargetDAGCombine(ISD::MUL); 680 681 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 682 setTargetDAGCombine(ISD::OR); 683 if (Subtarget->hasNEON()) 684 setTargetDAGCombine(ISD::AND); 685 686 setStackPointerRegisterToSaveRestore(ARM::SP); 687 688 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 689 setSchedulingPreference(Sched::RegPressure); 690 else 691 setSchedulingPreference(Sched::Hybrid); 692 693 //// temporary - rewrite interface to use type 694 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 695 696 // On ARM arguments smaller than 4 bytes are extended, so all arguments 697 // are at least 4 bytes aligned. 698 setMinStackArgumentAlignment(4); 699 700 benefitFromCodePlacementOpt = true; 701} 702 703std::pair<const TargetRegisterClass*, uint8_t> 704ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 705 const TargetRegisterClass *RRC = 0; 706 uint8_t Cost = 1; 707 switch (VT.getSimpleVT().SimpleTy) { 708 default: 709 return TargetLowering::findRepresentativeClass(VT); 710 // Use DPR as representative register class for all floating point 711 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 712 // the cost is 1 for both f32 and f64. 713 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 714 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 715 RRC = ARM::DPRRegisterClass; 716 break; 717 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 718 case MVT::v4f32: case MVT::v2f64: 719 RRC = ARM::DPRRegisterClass; 720 Cost = 2; 721 break; 722 case MVT::v4i64: 723 RRC = ARM::DPRRegisterClass; 724 Cost = 4; 725 break; 726 case MVT::v8i64: 727 RRC = ARM::DPRRegisterClass; 728 Cost = 8; 729 break; 730 } 731 return std::make_pair(RRC, Cost); 732} 733 734const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 735 switch (Opcode) { 736 default: return 0; 737 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 738 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 739 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 740 case ARMISD::CALL: return "ARMISD::CALL"; 741 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 742 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 743 case ARMISD::tCALL: return "ARMISD::tCALL"; 744 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 745 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 746 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 747 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 748 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 749 case ARMISD::CMP: return "ARMISD::CMP"; 750 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 751 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 752 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 753 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 754 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 755 case ARMISD::CMOV: return "ARMISD::CMOV"; 756 case ARMISD::CNEG: return "ARMISD::CNEG"; 757 758 case ARMISD::RBIT: return "ARMISD::RBIT"; 759 760 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 761 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 762 case ARMISD::SITOF: return "ARMISD::SITOF"; 763 case ARMISD::UITOF: return "ARMISD::UITOF"; 764 765 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 766 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 767 case ARMISD::RRX: return "ARMISD::RRX"; 768 769 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 770 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 771 772 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 773 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 774 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 775 776 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 777 778 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 779 780 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 781 782 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 783 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 784 785 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 786 787 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 788 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 789 case ARMISD::VCGE: return "ARMISD::VCGE"; 790 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 791 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 792 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 793 case ARMISD::VCGT: return "ARMISD::VCGT"; 794 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 795 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 796 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 797 case ARMISD::VTST: return "ARMISD::VTST"; 798 799 case ARMISD::VSHL: return "ARMISD::VSHL"; 800 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 801 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 802 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 803 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 804 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 805 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 806 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 807 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 808 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 809 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 810 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 811 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 812 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 813 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 814 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 815 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 816 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 817 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 818 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 819 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 820 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 821 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 822 case ARMISD::VDUP: return "ARMISD::VDUP"; 823 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 824 case ARMISD::VEXT: return "ARMISD::VEXT"; 825 case ARMISD::VREV64: return "ARMISD::VREV64"; 826 case ARMISD::VREV32: return "ARMISD::VREV32"; 827 case ARMISD::VREV16: return "ARMISD::VREV16"; 828 case ARMISD::VZIP: return "ARMISD::VZIP"; 829 case ARMISD::VUZP: return "ARMISD::VUZP"; 830 case ARMISD::VTRN: return "ARMISD::VTRN"; 831 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 832 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 833 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 834 case ARMISD::FMAX: return "ARMISD::FMAX"; 835 case ARMISD::FMIN: return "ARMISD::FMIN"; 836 case ARMISD::BFI: return "ARMISD::BFI"; 837 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 838 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 839 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 840 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 841 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 842 } 843} 844 845/// getRegClassFor - Return the register class that should be used for the 846/// specified value type. 847TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 848 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 849 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 850 // load / store 4 to 8 consecutive D registers. 851 if (Subtarget->hasNEON()) { 852 if (VT == MVT::v4i64) 853 return ARM::QQPRRegisterClass; 854 else if (VT == MVT::v8i64) 855 return ARM::QQQQPRRegisterClass; 856 } 857 return TargetLowering::getRegClassFor(VT); 858} 859 860// Create a fast isel object. 861FastISel * 862ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 863 return ARM::createFastISel(funcInfo); 864} 865 866/// getFunctionAlignment - Return the Log2 alignment of this function. 867unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 868 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 869} 870 871/// getMaximalGlobalOffset - Returns the maximal possible offset which can 872/// be used for loads / stores from the global. 873unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 874 return (Subtarget->isThumb1Only() ? 127 : 4095); 875} 876 877Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 878 unsigned NumVals = N->getNumValues(); 879 if (!NumVals) 880 return Sched::RegPressure; 881 882 for (unsigned i = 0; i != NumVals; ++i) { 883 EVT VT = N->getValueType(i); 884 if (VT == MVT::Glue || VT == MVT::Other) 885 continue; 886 if (VT.isFloatingPoint() || VT.isVector()) 887 return Sched::Latency; 888 } 889 890 if (!N->isMachineOpcode()) 891 return Sched::RegPressure; 892 893 // Load are scheduled for latency even if there instruction itinerary 894 // is not available. 895 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 896 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 897 898 if (TID.getNumDefs() == 0) 899 return Sched::RegPressure; 900 if (!Itins->isEmpty() && 901 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 902 return Sched::Latency; 903 904 return Sched::RegPressure; 905} 906 907// FIXME: Move to RegInfo 908unsigned 909ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 910 MachineFunction &MF) const { 911 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); 912 913 switch (RC->getID()) { 914 default: 915 return 0; 916 case ARM::tGPRRegClassID: 917 return TFI->hasFP(MF) ? 4 : 5; 918 case ARM::GPRRegClassID: { 919 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 920 return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0); 921 } 922 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 923 case ARM::DPRRegClassID: 924 return 32 - 10; 925 } 926} 927 928//===----------------------------------------------------------------------===// 929// Lowering Code 930//===----------------------------------------------------------------------===// 931 932/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 933static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 934 switch (CC) { 935 default: llvm_unreachable("Unknown condition code!"); 936 case ISD::SETNE: return ARMCC::NE; 937 case ISD::SETEQ: return ARMCC::EQ; 938 case ISD::SETGT: return ARMCC::GT; 939 case ISD::SETGE: return ARMCC::GE; 940 case ISD::SETLT: return ARMCC::LT; 941 case ISD::SETLE: return ARMCC::LE; 942 case ISD::SETUGT: return ARMCC::HI; 943 case ISD::SETUGE: return ARMCC::HS; 944 case ISD::SETULT: return ARMCC::LO; 945 case ISD::SETULE: return ARMCC::LS; 946 } 947} 948 949/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 950static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 951 ARMCC::CondCodes &CondCode2) { 952 CondCode2 = ARMCC::AL; 953 switch (CC) { 954 default: llvm_unreachable("Unknown FP condition!"); 955 case ISD::SETEQ: 956 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 957 case ISD::SETGT: 958 case ISD::SETOGT: CondCode = ARMCC::GT; break; 959 case ISD::SETGE: 960 case ISD::SETOGE: CondCode = ARMCC::GE; break; 961 case ISD::SETOLT: CondCode = ARMCC::MI; break; 962 case ISD::SETOLE: CondCode = ARMCC::LS; break; 963 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 964 case ISD::SETO: CondCode = ARMCC::VC; break; 965 case ISD::SETUO: CondCode = ARMCC::VS; break; 966 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 967 case ISD::SETUGT: CondCode = ARMCC::HI; break; 968 case ISD::SETUGE: CondCode = ARMCC::PL; break; 969 case ISD::SETLT: 970 case ISD::SETULT: CondCode = ARMCC::LT; break; 971 case ISD::SETLE: 972 case ISD::SETULE: CondCode = ARMCC::LE; break; 973 case ISD::SETNE: 974 case ISD::SETUNE: CondCode = ARMCC::NE; break; 975 } 976} 977 978//===----------------------------------------------------------------------===// 979// Calling Convention Implementation 980//===----------------------------------------------------------------------===// 981 982#include "ARMGenCallingConv.inc" 983 984/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 985/// given CallingConvention value. 986CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 987 bool Return, 988 bool isVarArg) const { 989 switch (CC) { 990 default: 991 llvm_unreachable("Unsupported calling convention"); 992 case CallingConv::Fast: 993 if (Subtarget->hasVFP2() && !isVarArg) { 994 if (!Subtarget->isAAPCS_ABI()) 995 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 996 // For AAPCS ABI targets, just use VFP variant of the calling convention. 997 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 998 } 999 // Fallthrough 1000 case CallingConv::C: { 1001 // Use target triple & subtarget features to do actual dispatch. 1002 if (!Subtarget->isAAPCS_ABI()) 1003 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1004 else if (Subtarget->hasVFP2() && 1005 FloatABIType == FloatABI::Hard && !isVarArg) 1006 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1007 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1008 } 1009 case CallingConv::ARM_AAPCS_VFP: 1010 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1011 case CallingConv::ARM_AAPCS: 1012 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1013 case CallingConv::ARM_APCS: 1014 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1015 } 1016} 1017 1018/// LowerCallResult - Lower the result values of a call into the 1019/// appropriate copies out of appropriate physical registers. 1020SDValue 1021ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1022 CallingConv::ID CallConv, bool isVarArg, 1023 const SmallVectorImpl<ISD::InputArg> &Ins, 1024 DebugLoc dl, SelectionDAG &DAG, 1025 SmallVectorImpl<SDValue> &InVals) const { 1026 1027 // Assign locations to each value returned by this call. 1028 SmallVector<CCValAssign, 16> RVLocs; 1029 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1030 RVLocs, *DAG.getContext()); 1031 CCInfo.AnalyzeCallResult(Ins, 1032 CCAssignFnForNode(CallConv, /* Return*/ true, 1033 isVarArg)); 1034 1035 // Copy all of the result registers out of their specified physreg. 1036 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1037 CCValAssign VA = RVLocs[i]; 1038 1039 SDValue Val; 1040 if (VA.needsCustom()) { 1041 // Handle f64 or half of a v2f64. 1042 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1043 InFlag); 1044 Chain = Lo.getValue(1); 1045 InFlag = Lo.getValue(2); 1046 VA = RVLocs[++i]; // skip ahead to next loc 1047 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1048 InFlag); 1049 Chain = Hi.getValue(1); 1050 InFlag = Hi.getValue(2); 1051 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1052 1053 if (VA.getLocVT() == MVT::v2f64) { 1054 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1055 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1056 DAG.getConstant(0, MVT::i32)); 1057 1058 VA = RVLocs[++i]; // skip ahead to next loc 1059 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1060 Chain = Lo.getValue(1); 1061 InFlag = Lo.getValue(2); 1062 VA = RVLocs[++i]; // skip ahead to next loc 1063 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1064 Chain = Hi.getValue(1); 1065 InFlag = Hi.getValue(2); 1066 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1067 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1068 DAG.getConstant(1, MVT::i32)); 1069 } 1070 } else { 1071 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1072 InFlag); 1073 Chain = Val.getValue(1); 1074 InFlag = Val.getValue(2); 1075 } 1076 1077 switch (VA.getLocInfo()) { 1078 default: llvm_unreachable("Unknown loc info!"); 1079 case CCValAssign::Full: break; 1080 case CCValAssign::BCvt: 1081 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1082 break; 1083 } 1084 1085 InVals.push_back(Val); 1086 } 1087 1088 return Chain; 1089} 1090 1091/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1092/// by "Src" to address "Dst" of size "Size". Alignment information is 1093/// specified by the specific parameter attribute. The copy will be passed as 1094/// a byval function parameter. 1095/// Sometimes what we are copying is the end of a larger object, the part that 1096/// does not fit in registers. 1097static SDValue 1098CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1099 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1100 DebugLoc dl) { 1101 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1102 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1103 /*isVolatile=*/false, /*AlwaysInline=*/false, 1104 MachinePointerInfo(0), MachinePointerInfo(0)); 1105} 1106 1107/// LowerMemOpCallTo - Store the argument to the stack. 1108SDValue 1109ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1110 SDValue StackPtr, SDValue Arg, 1111 DebugLoc dl, SelectionDAG &DAG, 1112 const CCValAssign &VA, 1113 ISD::ArgFlagsTy Flags) const { 1114 unsigned LocMemOffset = VA.getLocMemOffset(); 1115 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1116 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1117 if (Flags.isByVal()) 1118 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1119 1120 return DAG.getStore(Chain, dl, Arg, PtrOff, 1121 MachinePointerInfo::getStack(LocMemOffset), 1122 false, false, 0); 1123} 1124 1125void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1126 SDValue Chain, SDValue &Arg, 1127 RegsToPassVector &RegsToPass, 1128 CCValAssign &VA, CCValAssign &NextVA, 1129 SDValue &StackPtr, 1130 SmallVector<SDValue, 8> &MemOpChains, 1131 ISD::ArgFlagsTy Flags) const { 1132 1133 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1134 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1135 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1136 1137 if (NextVA.isRegLoc()) 1138 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1139 else { 1140 assert(NextVA.isMemLoc()); 1141 if (StackPtr.getNode() == 0) 1142 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1143 1144 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1145 dl, DAG, NextVA, 1146 Flags)); 1147 } 1148} 1149 1150/// LowerCall - Lowering a call into a callseq_start <- 1151/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1152/// nodes. 1153SDValue 1154ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1155 CallingConv::ID CallConv, bool isVarArg, 1156 bool &isTailCall, 1157 const SmallVectorImpl<ISD::OutputArg> &Outs, 1158 const SmallVectorImpl<SDValue> &OutVals, 1159 const SmallVectorImpl<ISD::InputArg> &Ins, 1160 DebugLoc dl, SelectionDAG &DAG, 1161 SmallVectorImpl<SDValue> &InVals) const { 1162 MachineFunction &MF = DAG.getMachineFunction(); 1163 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1164 bool IsSibCall = false; 1165 // Temporarily disable tail calls so things don't break. 1166 if (!EnableARMTailCalls) 1167 isTailCall = false; 1168 if (isTailCall) { 1169 // Check if it's really possible to do a tail call. 1170 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1171 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1172 Outs, OutVals, Ins, DAG); 1173 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1174 // detected sibcalls. 1175 if (isTailCall) { 1176 ++NumTailCalls; 1177 IsSibCall = true; 1178 } 1179 } 1180 1181 // Analyze operands of the call, assigning locations to each operand. 1182 SmallVector<CCValAssign, 16> ArgLocs; 1183 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1184 *DAG.getContext()); 1185 CCInfo.AnalyzeCallOperands(Outs, 1186 CCAssignFnForNode(CallConv, /* Return*/ false, 1187 isVarArg)); 1188 1189 // Get a count of how many bytes are to be pushed on the stack. 1190 unsigned NumBytes = CCInfo.getNextStackOffset(); 1191 1192 // For tail calls, memory operands are available in our caller's stack. 1193 if (IsSibCall) 1194 NumBytes = 0; 1195 1196 // Adjust the stack pointer for the new arguments... 1197 // These operations are automatically eliminated by the prolog/epilog pass 1198 if (!IsSibCall) 1199 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1200 1201 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1202 1203 RegsToPassVector RegsToPass; 1204 SmallVector<SDValue, 8> MemOpChains; 1205 1206 // Walk the register/memloc assignments, inserting copies/loads. In the case 1207 // of tail call optimization, arguments are handled later. 1208 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1209 i != e; 1210 ++i, ++realArgIdx) { 1211 CCValAssign &VA = ArgLocs[i]; 1212 SDValue Arg = OutVals[realArgIdx]; 1213 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1214 1215 // Promote the value if needed. 1216 switch (VA.getLocInfo()) { 1217 default: llvm_unreachable("Unknown loc info!"); 1218 case CCValAssign::Full: break; 1219 case CCValAssign::SExt: 1220 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1221 break; 1222 case CCValAssign::ZExt: 1223 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1224 break; 1225 case CCValAssign::AExt: 1226 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1227 break; 1228 case CCValAssign::BCvt: 1229 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1230 break; 1231 } 1232 1233 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1234 if (VA.needsCustom()) { 1235 if (VA.getLocVT() == MVT::v2f64) { 1236 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1237 DAG.getConstant(0, MVT::i32)); 1238 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1239 DAG.getConstant(1, MVT::i32)); 1240 1241 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1242 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1243 1244 VA = ArgLocs[++i]; // skip ahead to next loc 1245 if (VA.isRegLoc()) { 1246 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1247 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1248 } else { 1249 assert(VA.isMemLoc()); 1250 1251 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1252 dl, DAG, VA, Flags)); 1253 } 1254 } else { 1255 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1256 StackPtr, MemOpChains, Flags); 1257 } 1258 } else if (VA.isRegLoc()) { 1259 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1260 } else if (!IsSibCall) { 1261 assert(VA.isMemLoc()); 1262 1263 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1264 dl, DAG, VA, Flags)); 1265 } 1266 } 1267 1268 if (!MemOpChains.empty()) 1269 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1270 &MemOpChains[0], MemOpChains.size()); 1271 1272 // Build a sequence of copy-to-reg nodes chained together with token chain 1273 // and flag operands which copy the outgoing args into the appropriate regs. 1274 SDValue InFlag; 1275 // Tail call byval lowering might overwrite argument registers so in case of 1276 // tail call optimization the copies to registers are lowered later. 1277 if (!isTailCall) 1278 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1279 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1280 RegsToPass[i].second, InFlag); 1281 InFlag = Chain.getValue(1); 1282 } 1283 1284 // For tail calls lower the arguments to the 'real' stack slot. 1285 if (isTailCall) { 1286 // Force all the incoming stack arguments to be loaded from the stack 1287 // before any new outgoing arguments are stored to the stack, because the 1288 // outgoing stack slots may alias the incoming argument stack slots, and 1289 // the alias isn't otherwise explicit. This is slightly more conservative 1290 // than necessary, because it means that each store effectively depends 1291 // on every argument instead of just those arguments it would clobber. 1292 1293 // Do not flag preceeding copytoreg stuff together with the following stuff. 1294 InFlag = SDValue(); 1295 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1296 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1297 RegsToPass[i].second, InFlag); 1298 InFlag = Chain.getValue(1); 1299 } 1300 InFlag =SDValue(); 1301 } 1302 1303 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1304 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1305 // node so that legalize doesn't hack it. 1306 bool isDirect = false; 1307 bool isARMFunc = false; 1308 bool isLocalARMFunc = false; 1309 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1310 1311 if (EnableARMLongCalls) { 1312 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1313 && "long-calls with non-static relocation model!"); 1314 // Handle a global address or an external symbol. If it's not one of 1315 // those, the target's already in a register, so we don't need to do 1316 // anything extra. 1317 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1318 const GlobalValue *GV = G->getGlobal(); 1319 // Create a constant pool entry for the callee address 1320 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1321 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1322 ARMPCLabelIndex, 1323 ARMCP::CPValue, 0); 1324 // Get the address of the callee into a register 1325 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1326 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1327 Callee = DAG.getLoad(getPointerTy(), dl, 1328 DAG.getEntryNode(), CPAddr, 1329 MachinePointerInfo::getConstantPool(), 1330 false, false, 0); 1331 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1332 const char *Sym = S->getSymbol(); 1333 1334 // Create a constant pool entry for the callee address 1335 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1336 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1337 Sym, ARMPCLabelIndex, 0); 1338 // Get the address of the callee into a register 1339 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1340 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1341 Callee = DAG.getLoad(getPointerTy(), dl, 1342 DAG.getEntryNode(), CPAddr, 1343 MachinePointerInfo::getConstantPool(), 1344 false, false, 0); 1345 } 1346 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1347 const GlobalValue *GV = G->getGlobal(); 1348 isDirect = true; 1349 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1350 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1351 getTargetMachine().getRelocationModel() != Reloc::Static; 1352 isARMFunc = !Subtarget->isThumb() || isStub; 1353 // ARM call to a local ARM function is predicable. 1354 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1355 // tBX takes a register source operand. 1356 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1357 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1358 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1359 ARMPCLabelIndex, 1360 ARMCP::CPValue, 4); 1361 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1362 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1363 Callee = DAG.getLoad(getPointerTy(), dl, 1364 DAG.getEntryNode(), CPAddr, 1365 MachinePointerInfo::getConstantPool(), 1366 false, false, 0); 1367 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1368 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1369 getPointerTy(), Callee, PICLabel); 1370 } else { 1371 // On ELF targets for PIC code, direct calls should go through the PLT 1372 unsigned OpFlags = 0; 1373 if (Subtarget->isTargetELF() && 1374 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1375 OpFlags = ARMII::MO_PLT; 1376 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1377 } 1378 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1379 isDirect = true; 1380 bool isStub = Subtarget->isTargetDarwin() && 1381 getTargetMachine().getRelocationModel() != Reloc::Static; 1382 isARMFunc = !Subtarget->isThumb() || isStub; 1383 // tBX takes a register source operand. 1384 const char *Sym = S->getSymbol(); 1385 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1386 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1387 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1388 Sym, ARMPCLabelIndex, 4); 1389 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1390 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1391 Callee = DAG.getLoad(getPointerTy(), dl, 1392 DAG.getEntryNode(), CPAddr, 1393 MachinePointerInfo::getConstantPool(), 1394 false, false, 0); 1395 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1396 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1397 getPointerTy(), Callee, PICLabel); 1398 } else { 1399 unsigned OpFlags = 0; 1400 // On ELF targets for PIC code, direct calls should go through the PLT 1401 if (Subtarget->isTargetELF() && 1402 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1403 OpFlags = ARMII::MO_PLT; 1404 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1405 } 1406 } 1407 1408 // FIXME: handle tail calls differently. 1409 unsigned CallOpc; 1410 if (Subtarget->isThumb()) { 1411 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1412 CallOpc = ARMISD::CALL_NOLINK; 1413 else 1414 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1415 } else { 1416 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1417 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1418 : ARMISD::CALL_NOLINK; 1419 } 1420 1421 std::vector<SDValue> Ops; 1422 Ops.push_back(Chain); 1423 Ops.push_back(Callee); 1424 1425 // Add argument registers to the end of the list so that they are known live 1426 // into the call. 1427 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1428 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1429 RegsToPass[i].second.getValueType())); 1430 1431 if (InFlag.getNode()) 1432 Ops.push_back(InFlag); 1433 1434 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1435 if (isTailCall) 1436 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1437 1438 // Returns a chain and a flag for retval copy to use. 1439 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1440 InFlag = Chain.getValue(1); 1441 1442 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1443 DAG.getIntPtrConstant(0, true), InFlag); 1444 if (!Ins.empty()) 1445 InFlag = Chain.getValue(1); 1446 1447 // Handle result values, copying them out of physregs into vregs that we 1448 // return. 1449 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1450 dl, DAG, InVals); 1451} 1452 1453/// MatchingStackOffset - Return true if the given stack call argument is 1454/// already available in the same position (relatively) of the caller's 1455/// incoming argument stack. 1456static 1457bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1458 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1459 const ARMInstrInfo *TII) { 1460 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1461 int FI = INT_MAX; 1462 if (Arg.getOpcode() == ISD::CopyFromReg) { 1463 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1464 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1465 return false; 1466 MachineInstr *Def = MRI->getVRegDef(VR); 1467 if (!Def) 1468 return false; 1469 if (!Flags.isByVal()) { 1470 if (!TII->isLoadFromStackSlot(Def, FI)) 1471 return false; 1472 } else { 1473 return false; 1474 } 1475 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1476 if (Flags.isByVal()) 1477 // ByVal argument is passed in as a pointer but it's now being 1478 // dereferenced. e.g. 1479 // define @foo(%struct.X* %A) { 1480 // tail call @bar(%struct.X* byval %A) 1481 // } 1482 return false; 1483 SDValue Ptr = Ld->getBasePtr(); 1484 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1485 if (!FINode) 1486 return false; 1487 FI = FINode->getIndex(); 1488 } else 1489 return false; 1490 1491 assert(FI != INT_MAX); 1492 if (!MFI->isFixedObjectIndex(FI)) 1493 return false; 1494 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1495} 1496 1497/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1498/// for tail call optimization. Targets which want to do tail call 1499/// optimization should implement this function. 1500bool 1501ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1502 CallingConv::ID CalleeCC, 1503 bool isVarArg, 1504 bool isCalleeStructRet, 1505 bool isCallerStructRet, 1506 const SmallVectorImpl<ISD::OutputArg> &Outs, 1507 const SmallVectorImpl<SDValue> &OutVals, 1508 const SmallVectorImpl<ISD::InputArg> &Ins, 1509 SelectionDAG& DAG) const { 1510 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1511 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1512 bool CCMatch = CallerCC == CalleeCC; 1513 1514 // Look for obvious safe cases to perform tail call optimization that do not 1515 // require ABI changes. This is what gcc calls sibcall. 1516 1517 // Do not sibcall optimize vararg calls unless the call site is not passing 1518 // any arguments. 1519 if (isVarArg && !Outs.empty()) 1520 return false; 1521 1522 // Also avoid sibcall optimization if either caller or callee uses struct 1523 // return semantics. 1524 if (isCalleeStructRet || isCallerStructRet) 1525 return false; 1526 1527 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1528 // emitEpilogue is not ready for them. 1529 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1530 // LR. This means if we need to reload LR, it takes an extra instructions, 1531 // which outweighs the value of the tail call; but here we don't know yet 1532 // whether LR is going to be used. Probably the right approach is to 1533 // generate the tail call here and turn it back into CALL/RET in 1534 // emitEpilogue if LR is used. 1535 1536 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1537 // but we need to make sure there are enough registers; the only valid 1538 // registers are the 4 used for parameters. We don't currently do this 1539 // case. 1540 if (Subtarget->isThumb1Only()) 1541 return false; 1542 1543 // If the calling conventions do not match, then we'd better make sure the 1544 // results are returned in the same way as what the caller expects. 1545 if (!CCMatch) { 1546 SmallVector<CCValAssign, 16> RVLocs1; 1547 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1548 RVLocs1, *DAG.getContext()); 1549 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1550 1551 SmallVector<CCValAssign, 16> RVLocs2; 1552 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1553 RVLocs2, *DAG.getContext()); 1554 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1555 1556 if (RVLocs1.size() != RVLocs2.size()) 1557 return false; 1558 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1559 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1560 return false; 1561 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1562 return false; 1563 if (RVLocs1[i].isRegLoc()) { 1564 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1565 return false; 1566 } else { 1567 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1568 return false; 1569 } 1570 } 1571 } 1572 1573 // If the callee takes no arguments then go on to check the results of the 1574 // call. 1575 if (!Outs.empty()) { 1576 // Check if stack adjustment is needed. For now, do not do this if any 1577 // argument is passed on the stack. 1578 SmallVector<CCValAssign, 16> ArgLocs; 1579 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1580 ArgLocs, *DAG.getContext()); 1581 CCInfo.AnalyzeCallOperands(Outs, 1582 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1583 if (CCInfo.getNextStackOffset()) { 1584 MachineFunction &MF = DAG.getMachineFunction(); 1585 1586 // Check if the arguments are already laid out in the right way as 1587 // the caller's fixed stack objects. 1588 MachineFrameInfo *MFI = MF.getFrameInfo(); 1589 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1590 const ARMInstrInfo *TII = 1591 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1592 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1593 i != e; 1594 ++i, ++realArgIdx) { 1595 CCValAssign &VA = ArgLocs[i]; 1596 EVT RegVT = VA.getLocVT(); 1597 SDValue Arg = OutVals[realArgIdx]; 1598 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1599 if (VA.getLocInfo() == CCValAssign::Indirect) 1600 return false; 1601 if (VA.needsCustom()) { 1602 // f64 and vector types are split into multiple registers or 1603 // register/stack-slot combinations. The types will not match 1604 // the registers; give up on memory f64 refs until we figure 1605 // out what to do about this. 1606 if (!VA.isRegLoc()) 1607 return false; 1608 if (!ArgLocs[++i].isRegLoc()) 1609 return false; 1610 if (RegVT == MVT::v2f64) { 1611 if (!ArgLocs[++i].isRegLoc()) 1612 return false; 1613 if (!ArgLocs[++i].isRegLoc()) 1614 return false; 1615 } 1616 } else if (!VA.isRegLoc()) { 1617 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1618 MFI, MRI, TII)) 1619 return false; 1620 } 1621 } 1622 } 1623 } 1624 1625 return true; 1626} 1627 1628SDValue 1629ARMTargetLowering::LowerReturn(SDValue Chain, 1630 CallingConv::ID CallConv, bool isVarArg, 1631 const SmallVectorImpl<ISD::OutputArg> &Outs, 1632 const SmallVectorImpl<SDValue> &OutVals, 1633 DebugLoc dl, SelectionDAG &DAG) const { 1634 1635 // CCValAssign - represent the assignment of the return value to a location. 1636 SmallVector<CCValAssign, 16> RVLocs; 1637 1638 // CCState - Info about the registers and stack slots. 1639 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1640 *DAG.getContext()); 1641 1642 // Analyze outgoing return values. 1643 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1644 isVarArg)); 1645 1646 // If this is the first return lowered for this function, add 1647 // the regs to the liveout set for the function. 1648 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1649 for (unsigned i = 0; i != RVLocs.size(); ++i) 1650 if (RVLocs[i].isRegLoc()) 1651 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1652 } 1653 1654 SDValue Flag; 1655 1656 // Copy the result values into the output registers. 1657 for (unsigned i = 0, realRVLocIdx = 0; 1658 i != RVLocs.size(); 1659 ++i, ++realRVLocIdx) { 1660 CCValAssign &VA = RVLocs[i]; 1661 assert(VA.isRegLoc() && "Can only return in registers!"); 1662 1663 SDValue Arg = OutVals[realRVLocIdx]; 1664 1665 switch (VA.getLocInfo()) { 1666 default: llvm_unreachable("Unknown loc info!"); 1667 case CCValAssign::Full: break; 1668 case CCValAssign::BCvt: 1669 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1670 break; 1671 } 1672 1673 if (VA.needsCustom()) { 1674 if (VA.getLocVT() == MVT::v2f64) { 1675 // Extract the first half and return it in two registers. 1676 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1677 DAG.getConstant(0, MVT::i32)); 1678 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1679 DAG.getVTList(MVT::i32, MVT::i32), Half); 1680 1681 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1682 Flag = Chain.getValue(1); 1683 VA = RVLocs[++i]; // skip ahead to next loc 1684 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1685 HalfGPRs.getValue(1), Flag); 1686 Flag = Chain.getValue(1); 1687 VA = RVLocs[++i]; // skip ahead to next loc 1688 1689 // Extract the 2nd half and fall through to handle it as an f64 value. 1690 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1691 DAG.getConstant(1, MVT::i32)); 1692 } 1693 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1694 // available. 1695 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1696 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1697 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1698 Flag = Chain.getValue(1); 1699 VA = RVLocs[++i]; // skip ahead to next loc 1700 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1701 Flag); 1702 } else 1703 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1704 1705 // Guarantee that all emitted copies are 1706 // stuck together, avoiding something bad. 1707 Flag = Chain.getValue(1); 1708 } 1709 1710 SDValue result; 1711 if (Flag.getNode()) 1712 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1713 else // Return Void 1714 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1715 1716 return result; 1717} 1718 1719bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1720 if (N->getNumValues() != 1) 1721 return false; 1722 if (!N->hasNUsesOfValue(1, 0)) 1723 return false; 1724 1725 unsigned NumCopies = 0; 1726 SDNode* Copies[2]; 1727 SDNode *Use = *N->use_begin(); 1728 if (Use->getOpcode() == ISD::CopyToReg) { 1729 Copies[NumCopies++] = Use; 1730 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1731 // f64 returned in a pair of GPRs. 1732 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1733 UI != UE; ++UI) { 1734 if (UI->getOpcode() != ISD::CopyToReg) 1735 return false; 1736 Copies[UI.getUse().getResNo()] = *UI; 1737 ++NumCopies; 1738 } 1739 } else if (Use->getOpcode() == ISD::BITCAST) { 1740 // f32 returned in a single GPR. 1741 if (!Use->hasNUsesOfValue(1, 0)) 1742 return false; 1743 Use = *Use->use_begin(); 1744 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1745 return false; 1746 Copies[NumCopies++] = Use; 1747 } else { 1748 return false; 1749 } 1750 1751 if (NumCopies != 1 && NumCopies != 2) 1752 return false; 1753 1754 bool HasRet = false; 1755 for (unsigned i = 0; i < NumCopies; ++i) { 1756 SDNode *Copy = Copies[i]; 1757 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1758 UI != UE; ++UI) { 1759 if (UI->getOpcode() == ISD::CopyToReg) { 1760 SDNode *Use = *UI; 1761 if (Use == Copies[0] || Use == Copies[1]) 1762 continue; 1763 return false; 1764 } 1765 if (UI->getOpcode() != ARMISD::RET_FLAG) 1766 return false; 1767 HasRet = true; 1768 } 1769 } 1770 1771 return HasRet; 1772} 1773 1774// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1775// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1776// one of the above mentioned nodes. It has to be wrapped because otherwise 1777// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1778// be used to form addressing mode. These wrapped nodes will be selected 1779// into MOVi. 1780static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1781 EVT PtrVT = Op.getValueType(); 1782 // FIXME there is no actual debug info here 1783 DebugLoc dl = Op.getDebugLoc(); 1784 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1785 SDValue Res; 1786 if (CP->isMachineConstantPoolEntry()) 1787 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1788 CP->getAlignment()); 1789 else 1790 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1791 CP->getAlignment()); 1792 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1793} 1794 1795unsigned ARMTargetLowering::getJumpTableEncoding() const { 1796 return MachineJumpTableInfo::EK_Inline; 1797} 1798 1799SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1800 SelectionDAG &DAG) const { 1801 MachineFunction &MF = DAG.getMachineFunction(); 1802 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1803 unsigned ARMPCLabelIndex = 0; 1804 DebugLoc DL = Op.getDebugLoc(); 1805 EVT PtrVT = getPointerTy(); 1806 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1807 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1808 SDValue CPAddr; 1809 if (RelocM == Reloc::Static) { 1810 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1811 } else { 1812 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1813 ARMPCLabelIndex = AFI->createPICLabelUId(); 1814 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1815 ARMCP::CPBlockAddress, 1816 PCAdj); 1817 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1818 } 1819 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1820 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1821 MachinePointerInfo::getConstantPool(), 1822 false, false, 0); 1823 if (RelocM == Reloc::Static) 1824 return Result; 1825 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1826 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1827} 1828 1829// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1830SDValue 1831ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1832 SelectionDAG &DAG) const { 1833 DebugLoc dl = GA->getDebugLoc(); 1834 EVT PtrVT = getPointerTy(); 1835 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1836 MachineFunction &MF = DAG.getMachineFunction(); 1837 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1838 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1839 ARMConstantPoolValue *CPV = 1840 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1841 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1842 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1843 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1844 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1845 MachinePointerInfo::getConstantPool(), 1846 false, false, 0); 1847 SDValue Chain = Argument.getValue(1); 1848 1849 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1850 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1851 1852 // call __tls_get_addr. 1853 ArgListTy Args; 1854 ArgListEntry Entry; 1855 Entry.Node = Argument; 1856 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1857 Args.push_back(Entry); 1858 // FIXME: is there useful debug info available here? 1859 std::pair<SDValue, SDValue> CallResult = 1860 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1861 false, false, false, false, 1862 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1863 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1864 return CallResult.first; 1865} 1866 1867// Lower ISD::GlobalTLSAddress using the "initial exec" or 1868// "local exec" model. 1869SDValue 1870ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1871 SelectionDAG &DAG) const { 1872 const GlobalValue *GV = GA->getGlobal(); 1873 DebugLoc dl = GA->getDebugLoc(); 1874 SDValue Offset; 1875 SDValue Chain = DAG.getEntryNode(); 1876 EVT PtrVT = getPointerTy(); 1877 // Get the Thread Pointer 1878 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1879 1880 if (GV->isDeclaration()) { 1881 MachineFunction &MF = DAG.getMachineFunction(); 1882 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1883 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1884 // Initial exec model. 1885 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1886 ARMConstantPoolValue *CPV = 1887 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1888 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true); 1889 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1890 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1891 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1892 MachinePointerInfo::getConstantPool(), 1893 false, false, 0); 1894 Chain = Offset.getValue(1); 1895 1896 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1897 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1898 1899 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1900 MachinePointerInfo::getConstantPool(), 1901 false, false, 0); 1902 } else { 1903 // local exec model 1904 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF); 1905 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1906 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1907 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1908 MachinePointerInfo::getConstantPool(), 1909 false, false, 0); 1910 } 1911 1912 // The address of the thread local variable is the add of the thread 1913 // pointer with the offset of the variable. 1914 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1915} 1916 1917SDValue 1918ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1919 // TODO: implement the "local dynamic" model 1920 assert(Subtarget->isTargetELF() && 1921 "TLS not implemented for non-ELF targets"); 1922 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1923 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1924 // otherwise use the "Local Exec" TLS Model 1925 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1926 return LowerToTLSGeneralDynamicModel(GA, DAG); 1927 else 1928 return LowerToTLSExecModels(GA, DAG); 1929} 1930 1931SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1932 SelectionDAG &DAG) const { 1933 EVT PtrVT = getPointerTy(); 1934 DebugLoc dl = Op.getDebugLoc(); 1935 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1936 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1937 if (RelocM == Reloc::PIC_) { 1938 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1939 ARMConstantPoolValue *CPV = 1940 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 1941 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1942 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1943 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1944 CPAddr, 1945 MachinePointerInfo::getConstantPool(), 1946 false, false, 0); 1947 SDValue Chain = Result.getValue(1); 1948 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1949 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1950 if (!UseGOTOFF) 1951 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1952 MachinePointerInfo::getGOT(), false, false, 0); 1953 return Result; 1954 } 1955 1956 // If we have T2 ops, we can materialize the address directly via movt/movw 1957 // pair. This is always cheaper. 1958 if (Subtarget->useMovt()) { 1959 ++NumMovwMovt; 1960 // FIXME: Once remat is capable of dealing with instructions with register 1961 // operands, expand this into two nodes. 1962 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1963 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 1964 } else { 1965 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1966 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1967 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1968 MachinePointerInfo::getConstantPool(), 1969 false, false, 0); 1970 } 1971} 1972 1973SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1974 SelectionDAG &DAG) const { 1975 EVT PtrVT = getPointerTy(); 1976 DebugLoc dl = Op.getDebugLoc(); 1977 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1978 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1979 MachineFunction &MF = DAG.getMachineFunction(); 1980 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1981 1982 if (Subtarget->useMovt()) { 1983 ++NumMovwMovt; 1984 // FIXME: Once remat is capable of dealing with instructions with register 1985 // operands, expand this into two nodes. 1986 if (RelocM != Reloc::PIC_) 1987 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1988 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 1989 1990 // FIXME: Not a constant pool! 1991 unsigned PICLabelIndex = AFI->createPICLabelUId(); 1992 SDValue PICLabel = DAG.getConstant(PICLabelIndex, MVT::i32); 1993 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, 1994 DAG.getTargetGlobalAddress(GV, dl, PtrVT), 1995 PICLabel); 1996 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1997 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 1998 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 1999 MachinePointerInfo::getGOT(), false, false, 0); 2000 return Result; 2001 } 2002 2003 unsigned ARMPCLabelIndex = 0; 2004 SDValue CPAddr; 2005 if (RelocM == Reloc::Static) { 2006 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2007 } else { 2008 ARMPCLabelIndex = AFI->createPICLabelUId(); 2009 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2010 ARMConstantPoolValue *CPV = 2011 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 2012 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2013 } 2014 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2015 2016 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2017 MachinePointerInfo::getConstantPool(), 2018 false, false, 0); 2019 SDValue Chain = Result.getValue(1); 2020 2021 if (RelocM == Reloc::PIC_) { 2022 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2023 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2024 } 2025 2026 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2027 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2028 false, false, 0); 2029 2030 return Result; 2031} 2032 2033SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2034 SelectionDAG &DAG) const { 2035 assert(Subtarget->isTargetELF() && 2036 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2037 MachineFunction &MF = DAG.getMachineFunction(); 2038 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2039 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2040 EVT PtrVT = getPointerTy(); 2041 DebugLoc dl = Op.getDebugLoc(); 2042 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2043 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 2044 "_GLOBAL_OFFSET_TABLE_", 2045 ARMPCLabelIndex, PCAdj); 2046 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2047 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2048 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2049 MachinePointerInfo::getConstantPool(), 2050 false, false, 0); 2051 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2052 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2053} 2054 2055SDValue 2056ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2057 const { 2058 DebugLoc dl = Op.getDebugLoc(); 2059 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2060 Op.getOperand(0), Op.getOperand(1)); 2061} 2062 2063SDValue 2064ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2065 DebugLoc dl = Op.getDebugLoc(); 2066 SDValue Val = DAG.getConstant(0, MVT::i32); 2067 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2068 Op.getOperand(1), Val); 2069} 2070 2071SDValue 2072ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2073 DebugLoc dl = Op.getDebugLoc(); 2074 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2075 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2076} 2077 2078SDValue 2079ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2080 const ARMSubtarget *Subtarget) const { 2081 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2082 DebugLoc dl = Op.getDebugLoc(); 2083 switch (IntNo) { 2084 default: return SDValue(); // Don't custom lower most intrinsics. 2085 case Intrinsic::arm_thread_pointer: { 2086 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2087 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2088 } 2089 case Intrinsic::eh_sjlj_lsda: { 2090 MachineFunction &MF = DAG.getMachineFunction(); 2091 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2092 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2093 EVT PtrVT = getPointerTy(); 2094 DebugLoc dl = Op.getDebugLoc(); 2095 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2096 SDValue CPAddr; 2097 unsigned PCAdj = (RelocM != Reloc::PIC_) 2098 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2099 ARMConstantPoolValue *CPV = 2100 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2101 ARMCP::CPLSDA, PCAdj); 2102 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2103 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2104 SDValue Result = 2105 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2106 MachinePointerInfo::getConstantPool(), 2107 false, false, 0); 2108 2109 if (RelocM == Reloc::PIC_) { 2110 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2111 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2112 } 2113 return Result; 2114 } 2115 } 2116} 2117 2118static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2119 const ARMSubtarget *Subtarget) { 2120 DebugLoc dl = Op.getDebugLoc(); 2121 if (!Subtarget->hasDataBarrier()) { 2122 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2123 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2124 // here. 2125 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2126 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2127 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2128 DAG.getConstant(0, MVT::i32)); 2129 } 2130 2131 SDValue Op5 = Op.getOperand(5); 2132 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2133 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2134 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2135 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2136 2137 ARM_MB::MemBOpt DMBOpt; 2138 if (isDeviceBarrier) 2139 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2140 else 2141 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2142 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2143 DAG.getConstant(DMBOpt, MVT::i32)); 2144} 2145 2146static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2147 const ARMSubtarget *Subtarget) { 2148 // ARM pre v5TE and Thumb1 does not have preload instructions. 2149 if (!(Subtarget->isThumb2() || 2150 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2151 // Just preserve the chain. 2152 return Op.getOperand(0); 2153 2154 DebugLoc dl = Op.getDebugLoc(); 2155 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2156 if (!isRead && 2157 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2158 // ARMv7 with MP extension has PLDW. 2159 return Op.getOperand(0); 2160 2161 if (Subtarget->isThumb()) 2162 // Invert the bits. 2163 isRead = ~isRead & 1; 2164 unsigned isData = Subtarget->isThumb() ? 0 : 1; 2165 2166 // Currently there is no intrinsic that matches pli. 2167 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2168 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2169 DAG.getConstant(isData, MVT::i32)); 2170} 2171 2172static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2173 MachineFunction &MF = DAG.getMachineFunction(); 2174 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2175 2176 // vastart just stores the address of the VarArgsFrameIndex slot into the 2177 // memory location argument. 2178 DebugLoc dl = Op.getDebugLoc(); 2179 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2180 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2181 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2182 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2183 MachinePointerInfo(SV), false, false, 0); 2184} 2185 2186SDValue 2187ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2188 SDValue &Root, SelectionDAG &DAG, 2189 DebugLoc dl) const { 2190 MachineFunction &MF = DAG.getMachineFunction(); 2191 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2192 2193 TargetRegisterClass *RC; 2194 if (AFI->isThumb1OnlyFunction()) 2195 RC = ARM::tGPRRegisterClass; 2196 else 2197 RC = ARM::GPRRegisterClass; 2198 2199 // Transform the arguments stored in physical registers into virtual ones. 2200 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2201 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2202 2203 SDValue ArgValue2; 2204 if (NextVA.isMemLoc()) { 2205 MachineFrameInfo *MFI = MF.getFrameInfo(); 2206 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2207 2208 // Create load node to retrieve arguments from the stack. 2209 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2210 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2211 MachinePointerInfo::getFixedStack(FI), 2212 false, false, 0); 2213 } else { 2214 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2215 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2216 } 2217 2218 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2219} 2220 2221SDValue 2222ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2223 CallingConv::ID CallConv, bool isVarArg, 2224 const SmallVectorImpl<ISD::InputArg> 2225 &Ins, 2226 DebugLoc dl, SelectionDAG &DAG, 2227 SmallVectorImpl<SDValue> &InVals) 2228 const { 2229 2230 MachineFunction &MF = DAG.getMachineFunction(); 2231 MachineFrameInfo *MFI = MF.getFrameInfo(); 2232 2233 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2234 2235 // Assign locations to all of the incoming arguments. 2236 SmallVector<CCValAssign, 16> ArgLocs; 2237 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2238 *DAG.getContext()); 2239 CCInfo.AnalyzeFormalArguments(Ins, 2240 CCAssignFnForNode(CallConv, /* Return*/ false, 2241 isVarArg)); 2242 2243 SmallVector<SDValue, 16> ArgValues; 2244 2245 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2246 CCValAssign &VA = ArgLocs[i]; 2247 2248 // Arguments stored in registers. 2249 if (VA.isRegLoc()) { 2250 EVT RegVT = VA.getLocVT(); 2251 2252 SDValue ArgValue; 2253 if (VA.needsCustom()) { 2254 // f64 and vector types are split up into multiple registers or 2255 // combinations of registers and stack slots. 2256 if (VA.getLocVT() == MVT::v2f64) { 2257 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2258 Chain, DAG, dl); 2259 VA = ArgLocs[++i]; // skip ahead to next loc 2260 SDValue ArgValue2; 2261 if (VA.isMemLoc()) { 2262 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2263 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2264 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2265 MachinePointerInfo::getFixedStack(FI), 2266 false, false, 0); 2267 } else { 2268 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2269 Chain, DAG, dl); 2270 } 2271 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2272 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2273 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2274 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2275 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2276 } else 2277 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2278 2279 } else { 2280 TargetRegisterClass *RC; 2281 2282 if (RegVT == MVT::f32) 2283 RC = ARM::SPRRegisterClass; 2284 else if (RegVT == MVT::f64) 2285 RC = ARM::DPRRegisterClass; 2286 else if (RegVT == MVT::v2f64) 2287 RC = ARM::QPRRegisterClass; 2288 else if (RegVT == MVT::i32) 2289 RC = (AFI->isThumb1OnlyFunction() ? 2290 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2291 else 2292 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2293 2294 // Transform the arguments in physical registers into virtual ones. 2295 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2296 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2297 } 2298 2299 // If this is an 8 or 16-bit value, it is really passed promoted 2300 // to 32 bits. Insert an assert[sz]ext to capture this, then 2301 // truncate to the right size. 2302 switch (VA.getLocInfo()) { 2303 default: llvm_unreachable("Unknown loc info!"); 2304 case CCValAssign::Full: break; 2305 case CCValAssign::BCvt: 2306 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2307 break; 2308 case CCValAssign::SExt: 2309 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2310 DAG.getValueType(VA.getValVT())); 2311 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2312 break; 2313 case CCValAssign::ZExt: 2314 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2315 DAG.getValueType(VA.getValVT())); 2316 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2317 break; 2318 } 2319 2320 InVals.push_back(ArgValue); 2321 2322 } else { // VA.isRegLoc() 2323 2324 // sanity check 2325 assert(VA.isMemLoc()); 2326 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2327 2328 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 2329 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true); 2330 2331 // Create load nodes to retrieve arguments from the stack. 2332 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2333 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2334 MachinePointerInfo::getFixedStack(FI), 2335 false, false, 0)); 2336 } 2337 } 2338 2339 // varargs 2340 if (isVarArg) { 2341 static const unsigned GPRArgRegs[] = { 2342 ARM::R0, ARM::R1, ARM::R2, ARM::R3 2343 }; 2344 2345 unsigned NumGPRs = CCInfo.getFirstUnallocated 2346 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2347 2348 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2349 unsigned VARegSize = (4 - NumGPRs) * 4; 2350 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2351 unsigned ArgOffset = CCInfo.getNextStackOffset(); 2352 if (VARegSaveSize) { 2353 // If this function is vararg, store any remaining integer argument regs 2354 // to their spots on the stack so that they may be loaded by deferencing 2355 // the result of va_next. 2356 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2357 AFI->setVarArgsFrameIndex( 2358 MFI->CreateFixedObject(VARegSaveSize, 2359 ArgOffset + VARegSaveSize - VARegSize, 2360 false)); 2361 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2362 getPointerTy()); 2363 2364 SmallVector<SDValue, 4> MemOps; 2365 for (; NumGPRs < 4; ++NumGPRs) { 2366 TargetRegisterClass *RC; 2367 if (AFI->isThumb1OnlyFunction()) 2368 RC = ARM::tGPRRegisterClass; 2369 else 2370 RC = ARM::GPRRegisterClass; 2371 2372 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 2373 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2374 SDValue Store = 2375 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2376 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2377 false, false, 0); 2378 MemOps.push_back(Store); 2379 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2380 DAG.getConstant(4, getPointerTy())); 2381 } 2382 if (!MemOps.empty()) 2383 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2384 &MemOps[0], MemOps.size()); 2385 } else 2386 // This will point to the next argument passed via stack. 2387 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2388 } 2389 2390 return Chain; 2391} 2392 2393/// isFloatingPointZero - Return true if this is +0.0. 2394static bool isFloatingPointZero(SDValue Op) { 2395 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2396 return CFP->getValueAPF().isPosZero(); 2397 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2398 // Maybe this has already been legalized into the constant pool? 2399 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2400 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2401 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2402 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2403 return CFP->getValueAPF().isPosZero(); 2404 } 2405 } 2406 return false; 2407} 2408 2409/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2410/// the given operands. 2411SDValue 2412ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2413 SDValue &ARMcc, SelectionDAG &DAG, 2414 DebugLoc dl) const { 2415 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2416 unsigned C = RHSC->getZExtValue(); 2417 if (!isLegalICmpImmediate(C)) { 2418 // Constant does not fit, try adjusting it by one? 2419 switch (CC) { 2420 default: break; 2421 case ISD::SETLT: 2422 case ISD::SETGE: 2423 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2424 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2425 RHS = DAG.getConstant(C-1, MVT::i32); 2426 } 2427 break; 2428 case ISD::SETULT: 2429 case ISD::SETUGE: 2430 if (C != 0 && isLegalICmpImmediate(C-1)) { 2431 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2432 RHS = DAG.getConstant(C-1, MVT::i32); 2433 } 2434 break; 2435 case ISD::SETLE: 2436 case ISD::SETGT: 2437 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2438 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2439 RHS = DAG.getConstant(C+1, MVT::i32); 2440 } 2441 break; 2442 case ISD::SETULE: 2443 case ISD::SETUGT: 2444 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2445 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2446 RHS = DAG.getConstant(C+1, MVT::i32); 2447 } 2448 break; 2449 } 2450 } 2451 } 2452 2453 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2454 ARMISD::NodeType CompareType; 2455 switch (CondCode) { 2456 default: 2457 CompareType = ARMISD::CMP; 2458 break; 2459 case ARMCC::EQ: 2460 case ARMCC::NE: 2461 // Uses only Z Flag 2462 CompareType = ARMISD::CMPZ; 2463 break; 2464 } 2465 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2466 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2467} 2468 2469/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2470SDValue 2471ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2472 DebugLoc dl) const { 2473 SDValue Cmp; 2474 if (!isFloatingPointZero(RHS)) 2475 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2476 else 2477 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2478 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2479} 2480 2481SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2482 SDValue Cond = Op.getOperand(0); 2483 SDValue SelectTrue = Op.getOperand(1); 2484 SDValue SelectFalse = Op.getOperand(2); 2485 DebugLoc dl = Op.getDebugLoc(); 2486 2487 // Convert: 2488 // 2489 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2490 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2491 // 2492 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2493 const ConstantSDNode *CMOVTrue = 2494 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2495 const ConstantSDNode *CMOVFalse = 2496 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2497 2498 if (CMOVTrue && CMOVFalse) { 2499 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2500 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2501 2502 SDValue True; 2503 SDValue False; 2504 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2505 True = SelectTrue; 2506 False = SelectFalse; 2507 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2508 True = SelectFalse; 2509 False = SelectTrue; 2510 } 2511 2512 if (True.getNode() && False.getNode()) { 2513 EVT VT = Cond.getValueType(); 2514 SDValue ARMcc = Cond.getOperand(2); 2515 SDValue CCR = Cond.getOperand(3); 2516 SDValue Cmp = Cond.getOperand(4); 2517 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2518 } 2519 } 2520 } 2521 2522 return DAG.getSelectCC(dl, Cond, 2523 DAG.getConstant(0, Cond.getValueType()), 2524 SelectTrue, SelectFalse, ISD::SETNE); 2525} 2526 2527SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2528 EVT VT = Op.getValueType(); 2529 SDValue LHS = Op.getOperand(0); 2530 SDValue RHS = Op.getOperand(1); 2531 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2532 SDValue TrueVal = Op.getOperand(2); 2533 SDValue FalseVal = Op.getOperand(3); 2534 DebugLoc dl = Op.getDebugLoc(); 2535 2536 if (LHS.getValueType() == MVT::i32) { 2537 SDValue ARMcc; 2538 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2539 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2540 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2541 } 2542 2543 ARMCC::CondCodes CondCode, CondCode2; 2544 FPCCToARMCC(CC, CondCode, CondCode2); 2545 2546 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2547 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2548 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2549 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2550 ARMcc, CCR, Cmp); 2551 if (CondCode2 != ARMCC::AL) { 2552 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2553 // FIXME: Needs another CMP because flag can have but one use. 2554 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2555 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2556 Result, TrueVal, ARMcc2, CCR, Cmp2); 2557 } 2558 return Result; 2559} 2560 2561/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2562/// to morph to an integer compare sequence. 2563static bool canChangeToInt(SDValue Op, bool &SeenZero, 2564 const ARMSubtarget *Subtarget) { 2565 SDNode *N = Op.getNode(); 2566 if (!N->hasOneUse()) 2567 // Otherwise it requires moving the value from fp to integer registers. 2568 return false; 2569 if (!N->getNumValues()) 2570 return false; 2571 EVT VT = Op.getValueType(); 2572 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2573 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2574 // vmrs are very slow, e.g. cortex-a8. 2575 return false; 2576 2577 if (isFloatingPointZero(Op)) { 2578 SeenZero = true; 2579 return true; 2580 } 2581 return ISD::isNormalLoad(N); 2582} 2583 2584static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2585 if (isFloatingPointZero(Op)) 2586 return DAG.getConstant(0, MVT::i32); 2587 2588 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2589 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2590 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2591 Ld->isVolatile(), Ld->isNonTemporal(), 2592 Ld->getAlignment()); 2593 2594 llvm_unreachable("Unknown VFP cmp argument!"); 2595} 2596 2597static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2598 SDValue &RetVal1, SDValue &RetVal2) { 2599 if (isFloatingPointZero(Op)) { 2600 RetVal1 = DAG.getConstant(0, MVT::i32); 2601 RetVal2 = DAG.getConstant(0, MVT::i32); 2602 return; 2603 } 2604 2605 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2606 SDValue Ptr = Ld->getBasePtr(); 2607 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2608 Ld->getChain(), Ptr, 2609 Ld->getPointerInfo(), 2610 Ld->isVolatile(), Ld->isNonTemporal(), 2611 Ld->getAlignment()); 2612 2613 EVT PtrType = Ptr.getValueType(); 2614 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2615 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2616 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2617 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2618 Ld->getChain(), NewPtr, 2619 Ld->getPointerInfo().getWithOffset(4), 2620 Ld->isVolatile(), Ld->isNonTemporal(), 2621 NewAlign); 2622 return; 2623 } 2624 2625 llvm_unreachable("Unknown VFP cmp argument!"); 2626} 2627 2628/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2629/// f32 and even f64 comparisons to integer ones. 2630SDValue 2631ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2632 SDValue Chain = Op.getOperand(0); 2633 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2634 SDValue LHS = Op.getOperand(2); 2635 SDValue RHS = Op.getOperand(3); 2636 SDValue Dest = Op.getOperand(4); 2637 DebugLoc dl = Op.getDebugLoc(); 2638 2639 bool SeenZero = false; 2640 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2641 canChangeToInt(RHS, SeenZero, Subtarget) && 2642 // If one of the operand is zero, it's safe to ignore the NaN case since 2643 // we only care about equality comparisons. 2644 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2645 // If unsafe fp math optimization is enabled and there are no othter uses of 2646 // the CMP operands, and the condition code is EQ oe NE, we can optimize it 2647 // to an integer comparison. 2648 if (CC == ISD::SETOEQ) 2649 CC = ISD::SETEQ; 2650 else if (CC == ISD::SETUNE) 2651 CC = ISD::SETNE; 2652 2653 SDValue ARMcc; 2654 if (LHS.getValueType() == MVT::f32) { 2655 LHS = bitcastf32Toi32(LHS, DAG); 2656 RHS = bitcastf32Toi32(RHS, DAG); 2657 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2658 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2659 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2660 Chain, Dest, ARMcc, CCR, Cmp); 2661 } 2662 2663 SDValue LHS1, LHS2; 2664 SDValue RHS1, RHS2; 2665 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2666 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2667 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2668 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2669 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2670 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2671 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2672 } 2673 2674 return SDValue(); 2675} 2676 2677SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2678 SDValue Chain = Op.getOperand(0); 2679 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2680 SDValue LHS = Op.getOperand(2); 2681 SDValue RHS = Op.getOperand(3); 2682 SDValue Dest = Op.getOperand(4); 2683 DebugLoc dl = Op.getDebugLoc(); 2684 2685 if (LHS.getValueType() == MVT::i32) { 2686 SDValue ARMcc; 2687 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2688 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2689 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2690 Chain, Dest, ARMcc, CCR, Cmp); 2691 } 2692 2693 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2694 2695 if (UnsafeFPMath && 2696 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2697 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2698 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2699 if (Result.getNode()) 2700 return Result; 2701 } 2702 2703 ARMCC::CondCodes CondCode, CondCode2; 2704 FPCCToARMCC(CC, CondCode, CondCode2); 2705 2706 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2707 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2708 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2709 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2710 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2711 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2712 if (CondCode2 != ARMCC::AL) { 2713 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2714 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2715 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2716 } 2717 return Res; 2718} 2719 2720SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2721 SDValue Chain = Op.getOperand(0); 2722 SDValue Table = Op.getOperand(1); 2723 SDValue Index = Op.getOperand(2); 2724 DebugLoc dl = Op.getDebugLoc(); 2725 2726 EVT PTy = getPointerTy(); 2727 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2728 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2729 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2730 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2731 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2732 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2733 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2734 if (Subtarget->isThumb2()) { 2735 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2736 // which does another jump to the destination. This also makes it easier 2737 // to translate it to TBB / TBH later. 2738 // FIXME: This might not work if the function is extremely large. 2739 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2740 Addr, Op.getOperand(2), JTI, UId); 2741 } 2742 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2743 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2744 MachinePointerInfo::getJumpTable(), 2745 false, false, 0); 2746 Chain = Addr.getValue(1); 2747 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2748 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2749 } else { 2750 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2751 MachinePointerInfo::getJumpTable(), false, false, 0); 2752 Chain = Addr.getValue(1); 2753 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2754 } 2755} 2756 2757static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2758 DebugLoc dl = Op.getDebugLoc(); 2759 unsigned Opc; 2760 2761 switch (Op.getOpcode()) { 2762 default: 2763 assert(0 && "Invalid opcode!"); 2764 case ISD::FP_TO_SINT: 2765 Opc = ARMISD::FTOSI; 2766 break; 2767 case ISD::FP_TO_UINT: 2768 Opc = ARMISD::FTOUI; 2769 break; 2770 } 2771 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2772 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 2773} 2774 2775static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2776 EVT VT = Op.getValueType(); 2777 DebugLoc dl = Op.getDebugLoc(); 2778 unsigned Opc; 2779 2780 switch (Op.getOpcode()) { 2781 default: 2782 assert(0 && "Invalid opcode!"); 2783 case ISD::SINT_TO_FP: 2784 Opc = ARMISD::SITOF; 2785 break; 2786 case ISD::UINT_TO_FP: 2787 Opc = ARMISD::UITOF; 2788 break; 2789 } 2790 2791 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 2792 return DAG.getNode(Opc, dl, VT, Op); 2793} 2794 2795SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 2796 // Implement fcopysign with a fabs and a conditional fneg. 2797 SDValue Tmp0 = Op.getOperand(0); 2798 SDValue Tmp1 = Op.getOperand(1); 2799 DebugLoc dl = Op.getDebugLoc(); 2800 EVT VT = Op.getValueType(); 2801 EVT SrcVT = Tmp1.getValueType(); 2802 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 2803 SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32); 2804 SDValue FP0 = DAG.getConstantFP(0.0, SrcVT); 2805 SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl); 2806 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2807 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp); 2808} 2809 2810SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2811 MachineFunction &MF = DAG.getMachineFunction(); 2812 MachineFrameInfo *MFI = MF.getFrameInfo(); 2813 MFI->setReturnAddressIsTaken(true); 2814 2815 EVT VT = Op.getValueType(); 2816 DebugLoc dl = Op.getDebugLoc(); 2817 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2818 if (Depth) { 2819 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2820 SDValue Offset = DAG.getConstant(4, MVT::i32); 2821 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2822 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2823 MachinePointerInfo(), false, false, 0); 2824 } 2825 2826 // Return LR, which contains the return address. Mark it an implicit live-in. 2827 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 2828 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 2829} 2830 2831SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 2832 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2833 MFI->setFrameAddressIsTaken(true); 2834 2835 EVT VT = Op.getValueType(); 2836 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 2837 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2838 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 2839 ? ARM::R7 : ARM::R11; 2840 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2841 while (Depth--) 2842 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2843 MachinePointerInfo(), 2844 false, false, 0); 2845 return FrameAddr; 2846} 2847 2848/// ExpandBITCAST - If the target supports VFP, this function is called to 2849/// expand a bit convert where either the source or destination type is i64 to 2850/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 2851/// operand type is illegal (e.g., v2f32 for a target that doesn't support 2852/// vectors), since the legalizer won't know what to do with that. 2853static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 2854 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2855 DebugLoc dl = N->getDebugLoc(); 2856 SDValue Op = N->getOperand(0); 2857 2858 // This function is only supposed to be called for i64 types, either as the 2859 // source or destination of the bit convert. 2860 EVT SrcVT = Op.getValueType(); 2861 EVT DstVT = N->getValueType(0); 2862 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 2863 "ExpandBITCAST called for non-i64 type"); 2864 2865 // Turn i64->f64 into VMOVDRR. 2866 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 2867 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2868 DAG.getConstant(0, MVT::i32)); 2869 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2870 DAG.getConstant(1, MVT::i32)); 2871 return DAG.getNode(ISD::BITCAST, dl, DstVT, 2872 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 2873 } 2874 2875 // Turn f64->i64 into VMOVRRD. 2876 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 2877 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 2878 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 2879 // Merge the pieces into a single i64 value. 2880 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 2881 } 2882 2883 return SDValue(); 2884} 2885 2886/// getZeroVector - Returns a vector of specified type with all zero elements. 2887/// Zero vectors are used to represent vector negation and in those cases 2888/// will be implemented with the NEON VNEG instruction. However, VNEG does 2889/// not support i64 elements, so sometimes the zero vectors will need to be 2890/// explicitly constructed. Regardless, use a canonical VMOV to create the 2891/// zero vector. 2892static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2893 assert(VT.isVector() && "Expected a vector type"); 2894 // The canonical modified immediate encoding of a zero vector is....0! 2895 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 2896 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 2897 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 2898 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 2899} 2900 2901/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 2902/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2903SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 2904 SelectionDAG &DAG) const { 2905 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2906 EVT VT = Op.getValueType(); 2907 unsigned VTBits = VT.getSizeInBits(); 2908 DebugLoc dl = Op.getDebugLoc(); 2909 SDValue ShOpLo = Op.getOperand(0); 2910 SDValue ShOpHi = Op.getOperand(1); 2911 SDValue ShAmt = Op.getOperand(2); 2912 SDValue ARMcc; 2913 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 2914 2915 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 2916 2917 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2918 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2919 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 2920 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2921 DAG.getConstant(VTBits, MVT::i32)); 2922 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 2923 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2924 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 2925 2926 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2927 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2928 ARMcc, DAG, dl); 2929 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 2930 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 2931 CCR, Cmp); 2932 2933 SDValue Ops[2] = { Lo, Hi }; 2934 return DAG.getMergeValues(Ops, 2, dl); 2935} 2936 2937/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 2938/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2939SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 2940 SelectionDAG &DAG) const { 2941 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2942 EVT VT = Op.getValueType(); 2943 unsigned VTBits = VT.getSizeInBits(); 2944 DebugLoc dl = Op.getDebugLoc(); 2945 SDValue ShOpLo = Op.getOperand(0); 2946 SDValue ShOpHi = Op.getOperand(1); 2947 SDValue ShAmt = Op.getOperand(2); 2948 SDValue ARMcc; 2949 2950 assert(Op.getOpcode() == ISD::SHL_PARTS); 2951 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2952 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2953 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 2954 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2955 DAG.getConstant(VTBits, MVT::i32)); 2956 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 2957 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 2958 2959 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2960 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2961 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2962 ARMcc, DAG, dl); 2963 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 2964 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 2965 CCR, Cmp); 2966 2967 SDValue Ops[2] = { Lo, Hi }; 2968 return DAG.getMergeValues(Ops, 2, dl); 2969} 2970 2971SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 2972 SelectionDAG &DAG) const { 2973 // The rounding mode is in bits 23:22 of the FPSCR. 2974 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 2975 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 2976 // so that the shift + and get folded into a bitfield extract. 2977 DebugLoc dl = Op.getDebugLoc(); 2978 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 2979 DAG.getConstant(Intrinsic::arm_get_fpscr, 2980 MVT::i32)); 2981 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 2982 DAG.getConstant(1U << 22, MVT::i32)); 2983 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 2984 DAG.getConstant(22, MVT::i32)); 2985 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 2986 DAG.getConstant(3, MVT::i32)); 2987} 2988 2989static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 2990 const ARMSubtarget *ST) { 2991 EVT VT = N->getValueType(0); 2992 DebugLoc dl = N->getDebugLoc(); 2993 2994 if (!ST->hasV6T2Ops()) 2995 return SDValue(); 2996 2997 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 2998 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 2999} 3000 3001static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3002 const ARMSubtarget *ST) { 3003 EVT VT = N->getValueType(0); 3004 DebugLoc dl = N->getDebugLoc(); 3005 3006 if (!VT.isVector()) 3007 return SDValue(); 3008 3009 // Lower vector shifts on NEON to use VSHL. 3010 assert(ST->hasNEON() && "unexpected vector shift"); 3011 3012 // Left shifts translate directly to the vshiftu intrinsic. 3013 if (N->getOpcode() == ISD::SHL) 3014 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3015 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3016 N->getOperand(0), N->getOperand(1)); 3017 3018 assert((N->getOpcode() == ISD::SRA || 3019 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3020 3021 // NEON uses the same intrinsics for both left and right shifts. For 3022 // right shifts, the shift amounts are negative, so negate the vector of 3023 // shift amounts. 3024 EVT ShiftVT = N->getOperand(1).getValueType(); 3025 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3026 getZeroVector(ShiftVT, DAG, dl), 3027 N->getOperand(1)); 3028 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3029 Intrinsic::arm_neon_vshifts : 3030 Intrinsic::arm_neon_vshiftu); 3031 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3032 DAG.getConstant(vshiftInt, MVT::i32), 3033 N->getOperand(0), NegatedCount); 3034} 3035 3036static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3037 const ARMSubtarget *ST) { 3038 EVT VT = N->getValueType(0); 3039 DebugLoc dl = N->getDebugLoc(); 3040 3041 // We can get here for a node like i32 = ISD::SHL i32, i64 3042 if (VT != MVT::i64) 3043 return SDValue(); 3044 3045 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3046 "Unknown shift to lower!"); 3047 3048 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3049 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3050 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3051 return SDValue(); 3052 3053 // If we are in thumb mode, we don't have RRX. 3054 if (ST->isThumb1Only()) return SDValue(); 3055 3056 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3057 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3058 DAG.getConstant(0, MVT::i32)); 3059 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3060 DAG.getConstant(1, MVT::i32)); 3061 3062 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3063 // captures the result into a carry flag. 3064 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3065 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3066 3067 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3068 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3069 3070 // Merge the pieces into a single i64 value. 3071 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3072} 3073 3074static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3075 SDValue TmpOp0, TmpOp1; 3076 bool Invert = false; 3077 bool Swap = false; 3078 unsigned Opc = 0; 3079 3080 SDValue Op0 = Op.getOperand(0); 3081 SDValue Op1 = Op.getOperand(1); 3082 SDValue CC = Op.getOperand(2); 3083 EVT VT = Op.getValueType(); 3084 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3085 DebugLoc dl = Op.getDebugLoc(); 3086 3087 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3088 switch (SetCCOpcode) { 3089 default: llvm_unreachable("Illegal FP comparison"); break; 3090 case ISD::SETUNE: 3091 case ISD::SETNE: Invert = true; // Fallthrough 3092 case ISD::SETOEQ: 3093 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3094 case ISD::SETOLT: 3095 case ISD::SETLT: Swap = true; // Fallthrough 3096 case ISD::SETOGT: 3097 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3098 case ISD::SETOLE: 3099 case ISD::SETLE: Swap = true; // Fallthrough 3100 case ISD::SETOGE: 3101 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3102 case ISD::SETUGE: Swap = true; // Fallthrough 3103 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3104 case ISD::SETUGT: Swap = true; // Fallthrough 3105 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3106 case ISD::SETUEQ: Invert = true; // Fallthrough 3107 case ISD::SETONE: 3108 // Expand this to (OLT | OGT). 3109 TmpOp0 = Op0; 3110 TmpOp1 = Op1; 3111 Opc = ISD::OR; 3112 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3113 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3114 break; 3115 case ISD::SETUO: Invert = true; // Fallthrough 3116 case ISD::SETO: 3117 // Expand this to (OLT | OGE). 3118 TmpOp0 = Op0; 3119 TmpOp1 = Op1; 3120 Opc = ISD::OR; 3121 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3122 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3123 break; 3124 } 3125 } else { 3126 // Integer comparisons. 3127 switch (SetCCOpcode) { 3128 default: llvm_unreachable("Illegal integer comparison"); break; 3129 case ISD::SETNE: Invert = true; 3130 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3131 case ISD::SETLT: Swap = true; 3132 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3133 case ISD::SETLE: Swap = true; 3134 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3135 case ISD::SETULT: Swap = true; 3136 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3137 case ISD::SETULE: Swap = true; 3138 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3139 } 3140 3141 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3142 if (Opc == ARMISD::VCEQ) { 3143 3144 SDValue AndOp; 3145 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3146 AndOp = Op0; 3147 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3148 AndOp = Op1; 3149 3150 // Ignore bitconvert. 3151 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3152 AndOp = AndOp.getOperand(0); 3153 3154 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3155 Opc = ARMISD::VTST; 3156 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3157 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3158 Invert = !Invert; 3159 } 3160 } 3161 } 3162 3163 if (Swap) 3164 std::swap(Op0, Op1); 3165 3166 // If one of the operands is a constant vector zero, attempt to fold the 3167 // comparison to a specialized compare-against-zero form. 3168 SDValue SingleOp; 3169 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3170 SingleOp = Op0; 3171 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3172 if (Opc == ARMISD::VCGE) 3173 Opc = ARMISD::VCLEZ; 3174 else if (Opc == ARMISD::VCGT) 3175 Opc = ARMISD::VCLTZ; 3176 SingleOp = Op1; 3177 } 3178 3179 SDValue Result; 3180 if (SingleOp.getNode()) { 3181 switch (Opc) { 3182 case ARMISD::VCEQ: 3183 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3184 case ARMISD::VCGE: 3185 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3186 case ARMISD::VCLEZ: 3187 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3188 case ARMISD::VCGT: 3189 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3190 case ARMISD::VCLTZ: 3191 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3192 default: 3193 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3194 } 3195 } else { 3196 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3197 } 3198 3199 if (Invert) 3200 Result = DAG.getNOT(dl, Result, VT); 3201 3202 return Result; 3203} 3204 3205/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3206/// valid vector constant for a NEON instruction with a "modified immediate" 3207/// operand (e.g., VMOV). If so, return the encoded value. 3208static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3209 unsigned SplatBitSize, SelectionDAG &DAG, 3210 EVT &VT, bool is128Bits, NEONModImmType type) { 3211 unsigned OpCmode, Imm; 3212 3213 // SplatBitSize is set to the smallest size that splats the vector, so a 3214 // zero vector will always have SplatBitSize == 8. However, NEON modified 3215 // immediate instructions others than VMOV do not support the 8-bit encoding 3216 // of a zero vector, and the default encoding of zero is supposed to be the 3217 // 32-bit version. 3218 if (SplatBits == 0) 3219 SplatBitSize = 32; 3220 3221 switch (SplatBitSize) { 3222 case 8: 3223 if (type != VMOVModImm) 3224 return SDValue(); 3225 // Any 1-byte value is OK. Op=0, Cmode=1110. 3226 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3227 OpCmode = 0xe; 3228 Imm = SplatBits; 3229 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3230 break; 3231 3232 case 16: 3233 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3234 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3235 if ((SplatBits & ~0xff) == 0) { 3236 // Value = 0x00nn: Op=x, Cmode=100x. 3237 OpCmode = 0x8; 3238 Imm = SplatBits; 3239 break; 3240 } 3241 if ((SplatBits & ~0xff00) == 0) { 3242 // Value = 0xnn00: Op=x, Cmode=101x. 3243 OpCmode = 0xa; 3244 Imm = SplatBits >> 8; 3245 break; 3246 } 3247 return SDValue(); 3248 3249 case 32: 3250 // NEON's 32-bit VMOV supports splat values where: 3251 // * only one byte is nonzero, or 3252 // * the least significant byte is 0xff and the second byte is nonzero, or 3253 // * the least significant 2 bytes are 0xff and the third is nonzero. 3254 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3255 if ((SplatBits & ~0xff) == 0) { 3256 // Value = 0x000000nn: Op=x, Cmode=000x. 3257 OpCmode = 0; 3258 Imm = SplatBits; 3259 break; 3260 } 3261 if ((SplatBits & ~0xff00) == 0) { 3262 // Value = 0x0000nn00: Op=x, Cmode=001x. 3263 OpCmode = 0x2; 3264 Imm = SplatBits >> 8; 3265 break; 3266 } 3267 if ((SplatBits & ~0xff0000) == 0) { 3268 // Value = 0x00nn0000: Op=x, Cmode=010x. 3269 OpCmode = 0x4; 3270 Imm = SplatBits >> 16; 3271 break; 3272 } 3273 if ((SplatBits & ~0xff000000) == 0) { 3274 // Value = 0xnn000000: Op=x, Cmode=011x. 3275 OpCmode = 0x6; 3276 Imm = SplatBits >> 24; 3277 break; 3278 } 3279 3280 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3281 if (type == OtherModImm) return SDValue(); 3282 3283 if ((SplatBits & ~0xffff) == 0 && 3284 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3285 // Value = 0x0000nnff: Op=x, Cmode=1100. 3286 OpCmode = 0xc; 3287 Imm = SplatBits >> 8; 3288 SplatBits |= 0xff; 3289 break; 3290 } 3291 3292 if ((SplatBits & ~0xffffff) == 0 && 3293 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3294 // Value = 0x00nnffff: Op=x, Cmode=1101. 3295 OpCmode = 0xd; 3296 Imm = SplatBits >> 16; 3297 SplatBits |= 0xffff; 3298 break; 3299 } 3300 3301 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3302 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3303 // VMOV.I32. A (very) minor optimization would be to replicate the value 3304 // and fall through here to test for a valid 64-bit splat. But, then the 3305 // caller would also need to check and handle the change in size. 3306 return SDValue(); 3307 3308 case 64: { 3309 if (type != VMOVModImm) 3310 return SDValue(); 3311 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3312 uint64_t BitMask = 0xff; 3313 uint64_t Val = 0; 3314 unsigned ImmMask = 1; 3315 Imm = 0; 3316 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3317 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3318 Val |= BitMask; 3319 Imm |= ImmMask; 3320 } else if ((SplatBits & BitMask) != 0) { 3321 return SDValue(); 3322 } 3323 BitMask <<= 8; 3324 ImmMask <<= 1; 3325 } 3326 // Op=1, Cmode=1110. 3327 OpCmode = 0x1e; 3328 SplatBits = Val; 3329 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3330 break; 3331 } 3332 3333 default: 3334 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3335 return SDValue(); 3336 } 3337 3338 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3339 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3340} 3341 3342static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3343 bool &ReverseVEXT, unsigned &Imm) { 3344 unsigned NumElts = VT.getVectorNumElements(); 3345 ReverseVEXT = false; 3346 3347 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3348 if (M[0] < 0) 3349 return false; 3350 3351 Imm = M[0]; 3352 3353 // If this is a VEXT shuffle, the immediate value is the index of the first 3354 // element. The other shuffle indices must be the successive elements after 3355 // the first one. 3356 unsigned ExpectedElt = Imm; 3357 for (unsigned i = 1; i < NumElts; ++i) { 3358 // Increment the expected index. If it wraps around, it may still be 3359 // a VEXT but the source vectors must be swapped. 3360 ExpectedElt += 1; 3361 if (ExpectedElt == NumElts * 2) { 3362 ExpectedElt = 0; 3363 ReverseVEXT = true; 3364 } 3365 3366 if (M[i] < 0) continue; // ignore UNDEF indices 3367 if (ExpectedElt != static_cast<unsigned>(M[i])) 3368 return false; 3369 } 3370 3371 // Adjust the index value if the source operands will be swapped. 3372 if (ReverseVEXT) 3373 Imm -= NumElts; 3374 3375 return true; 3376} 3377 3378/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3379/// instruction with the specified blocksize. (The order of the elements 3380/// within each block of the vector is reversed.) 3381static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3382 unsigned BlockSize) { 3383 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3384 "Only possible block sizes for VREV are: 16, 32, 64"); 3385 3386 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3387 if (EltSz == 64) 3388 return false; 3389 3390 unsigned NumElts = VT.getVectorNumElements(); 3391 unsigned BlockElts = M[0] + 1; 3392 // If the first shuffle index is UNDEF, be optimistic. 3393 if (M[0] < 0) 3394 BlockElts = BlockSize / EltSz; 3395 3396 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3397 return false; 3398 3399 for (unsigned i = 0; i < NumElts; ++i) { 3400 if (M[i] < 0) continue; // ignore UNDEF indices 3401 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3402 return false; 3403 } 3404 3405 return true; 3406} 3407 3408static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3409 unsigned &WhichResult) { 3410 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3411 if (EltSz == 64) 3412 return false; 3413 3414 unsigned NumElts = VT.getVectorNumElements(); 3415 WhichResult = (M[0] == 0 ? 0 : 1); 3416 for (unsigned i = 0; i < NumElts; i += 2) { 3417 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3418 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3419 return false; 3420 } 3421 return true; 3422} 3423 3424/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3425/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3426/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3427static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3428 unsigned &WhichResult) { 3429 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3430 if (EltSz == 64) 3431 return false; 3432 3433 unsigned NumElts = VT.getVectorNumElements(); 3434 WhichResult = (M[0] == 0 ? 0 : 1); 3435 for (unsigned i = 0; i < NumElts; i += 2) { 3436 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3437 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3438 return false; 3439 } 3440 return true; 3441} 3442 3443static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3444 unsigned &WhichResult) { 3445 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3446 if (EltSz == 64) 3447 return false; 3448 3449 unsigned NumElts = VT.getVectorNumElements(); 3450 WhichResult = (M[0] == 0 ? 0 : 1); 3451 for (unsigned i = 0; i != NumElts; ++i) { 3452 if (M[i] < 0) continue; // ignore UNDEF indices 3453 if ((unsigned) M[i] != 2 * i + WhichResult) 3454 return false; 3455 } 3456 3457 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3458 if (VT.is64BitVector() && EltSz == 32) 3459 return false; 3460 3461 return true; 3462} 3463 3464/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3465/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3466/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3467static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3468 unsigned &WhichResult) { 3469 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3470 if (EltSz == 64) 3471 return false; 3472 3473 unsigned Half = VT.getVectorNumElements() / 2; 3474 WhichResult = (M[0] == 0 ? 0 : 1); 3475 for (unsigned j = 0; j != 2; ++j) { 3476 unsigned Idx = WhichResult; 3477 for (unsigned i = 0; i != Half; ++i) { 3478 int MIdx = M[i + j * Half]; 3479 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3480 return false; 3481 Idx += 2; 3482 } 3483 } 3484 3485 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3486 if (VT.is64BitVector() && EltSz == 32) 3487 return false; 3488 3489 return true; 3490} 3491 3492static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3493 unsigned &WhichResult) { 3494 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3495 if (EltSz == 64) 3496 return false; 3497 3498 unsigned NumElts = VT.getVectorNumElements(); 3499 WhichResult = (M[0] == 0 ? 0 : 1); 3500 unsigned Idx = WhichResult * NumElts / 2; 3501 for (unsigned i = 0; i != NumElts; i += 2) { 3502 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3503 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3504 return false; 3505 Idx += 1; 3506 } 3507 3508 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3509 if (VT.is64BitVector() && EltSz == 32) 3510 return false; 3511 3512 return true; 3513} 3514 3515/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3516/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3517/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3518static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3519 unsigned &WhichResult) { 3520 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3521 if (EltSz == 64) 3522 return false; 3523 3524 unsigned NumElts = VT.getVectorNumElements(); 3525 WhichResult = (M[0] == 0 ? 0 : 1); 3526 unsigned Idx = WhichResult * NumElts / 2; 3527 for (unsigned i = 0; i != NumElts; i += 2) { 3528 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3529 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3530 return false; 3531 Idx += 1; 3532 } 3533 3534 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3535 if (VT.is64BitVector() && EltSz == 32) 3536 return false; 3537 3538 return true; 3539} 3540 3541// If N is an integer constant that can be moved into a register in one 3542// instruction, return an SDValue of such a constant (will become a MOV 3543// instruction). Otherwise return null. 3544static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3545 const ARMSubtarget *ST, DebugLoc dl) { 3546 uint64_t Val; 3547 if (!isa<ConstantSDNode>(N)) 3548 return SDValue(); 3549 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3550 3551 if (ST->isThumb1Only()) { 3552 if (Val <= 255 || ~Val <= 255) 3553 return DAG.getConstant(Val, MVT::i32); 3554 } else { 3555 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3556 return DAG.getConstant(Val, MVT::i32); 3557 } 3558 return SDValue(); 3559} 3560 3561// If this is a case we can't handle, return null and let the default 3562// expansion code take care of it. 3563SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3564 const ARMSubtarget *ST) const { 3565 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3566 DebugLoc dl = Op.getDebugLoc(); 3567 EVT VT = Op.getValueType(); 3568 3569 APInt SplatBits, SplatUndef; 3570 unsigned SplatBitSize; 3571 bool HasAnyUndefs; 3572 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3573 if (SplatBitSize <= 64) { 3574 // Check if an immediate VMOV works. 3575 EVT VmovVT; 3576 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3577 SplatUndef.getZExtValue(), SplatBitSize, 3578 DAG, VmovVT, VT.is128BitVector(), 3579 VMOVModImm); 3580 if (Val.getNode()) { 3581 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3582 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3583 } 3584 3585 // Try an immediate VMVN. 3586 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3587 ((1LL << SplatBitSize) - 1)); 3588 Val = isNEONModifiedImm(NegatedImm, 3589 SplatUndef.getZExtValue(), SplatBitSize, 3590 DAG, VmovVT, VT.is128BitVector(), 3591 VMVNModImm); 3592 if (Val.getNode()) { 3593 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3594 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3595 } 3596 } 3597 } 3598 3599 // Scan through the operands to see if only one value is used. 3600 unsigned NumElts = VT.getVectorNumElements(); 3601 bool isOnlyLowElement = true; 3602 bool usesOnlyOneValue = true; 3603 bool isConstant = true; 3604 SDValue Value; 3605 for (unsigned i = 0; i < NumElts; ++i) { 3606 SDValue V = Op.getOperand(i); 3607 if (V.getOpcode() == ISD::UNDEF) 3608 continue; 3609 if (i > 0) 3610 isOnlyLowElement = false; 3611 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3612 isConstant = false; 3613 3614 if (!Value.getNode()) 3615 Value = V; 3616 else if (V != Value) 3617 usesOnlyOneValue = false; 3618 } 3619 3620 if (!Value.getNode()) 3621 return DAG.getUNDEF(VT); 3622 3623 if (isOnlyLowElement) 3624 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3625 3626 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3627 3628 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3629 // i32 and try again. 3630 if (usesOnlyOneValue && EltSize <= 32) { 3631 if (!isConstant) 3632 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3633 if (VT.getVectorElementType().isFloatingPoint()) { 3634 SmallVector<SDValue, 8> Ops; 3635 for (unsigned i = 0; i < NumElts; ++i) 3636 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3637 Op.getOperand(i))); 3638 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3639 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3640 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3641 if (Val.getNode()) 3642 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3643 } 3644 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3645 if (Val.getNode()) 3646 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3647 } 3648 3649 // If all elements are constants and the case above didn't get hit, fall back 3650 // to the default expansion, which will generate a load from the constant 3651 // pool. 3652 if (isConstant) 3653 return SDValue(); 3654 3655 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 3656 if (NumElts >= 4) { 3657 SDValue shuffle = ReconstructShuffle(Op, DAG); 3658 if (shuffle != SDValue()) 3659 return shuffle; 3660 } 3661 3662 // Vectors with 32- or 64-bit elements can be built by directly assigning 3663 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3664 // will be legalized. 3665 if (EltSize >= 32) { 3666 // Do the expansion with floating-point types, since that is what the VFP 3667 // registers are defined to use, and since i64 is not legal. 3668 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3669 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3670 SmallVector<SDValue, 8> Ops; 3671 for (unsigned i = 0; i < NumElts; ++i) 3672 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 3673 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3674 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3675 } 3676 3677 return SDValue(); 3678} 3679 3680// Gather data to see if the operation can be modelled as a 3681// shuffle in combination with VEXTs. 3682SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 3683 SelectionDAG &DAG) const { 3684 DebugLoc dl = Op.getDebugLoc(); 3685 EVT VT = Op.getValueType(); 3686 unsigned NumElts = VT.getVectorNumElements(); 3687 3688 SmallVector<SDValue, 2> SourceVecs; 3689 SmallVector<unsigned, 2> MinElts; 3690 SmallVector<unsigned, 2> MaxElts; 3691 3692 for (unsigned i = 0; i < NumElts; ++i) { 3693 SDValue V = Op.getOperand(i); 3694 if (V.getOpcode() == ISD::UNDEF) 3695 continue; 3696 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 3697 // A shuffle can only come from building a vector from various 3698 // elements of other vectors. 3699 return SDValue(); 3700 } 3701 3702 // Record this extraction against the appropriate vector if possible... 3703 SDValue SourceVec = V.getOperand(0); 3704 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 3705 bool FoundSource = false; 3706 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 3707 if (SourceVecs[j] == SourceVec) { 3708 if (MinElts[j] > EltNo) 3709 MinElts[j] = EltNo; 3710 if (MaxElts[j] < EltNo) 3711 MaxElts[j] = EltNo; 3712 FoundSource = true; 3713 break; 3714 } 3715 } 3716 3717 // Or record a new source if not... 3718 if (!FoundSource) { 3719 SourceVecs.push_back(SourceVec); 3720 MinElts.push_back(EltNo); 3721 MaxElts.push_back(EltNo); 3722 } 3723 } 3724 3725 // Currently only do something sane when at most two source vectors 3726 // involved. 3727 if (SourceVecs.size() > 2) 3728 return SDValue(); 3729 3730 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 3731 int VEXTOffsets[2] = {0, 0}; 3732 3733 // This loop extracts the usage patterns of the source vectors 3734 // and prepares appropriate SDValues for a shuffle if possible. 3735 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 3736 if (SourceVecs[i].getValueType() == VT) { 3737 // No VEXT necessary 3738 ShuffleSrcs[i] = SourceVecs[i]; 3739 VEXTOffsets[i] = 0; 3740 continue; 3741 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 3742 // It probably isn't worth padding out a smaller vector just to 3743 // break it down again in a shuffle. 3744 return SDValue(); 3745 } 3746 3747 // Since only 64-bit and 128-bit vectors are legal on ARM and 3748 // we've eliminated the other cases... 3749 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 3750 "unexpected vector sizes in ReconstructShuffle"); 3751 3752 if (MaxElts[i] - MinElts[i] >= NumElts) { 3753 // Span too large for a VEXT to cope 3754 return SDValue(); 3755 } 3756 3757 if (MinElts[i] >= NumElts) { 3758 // The extraction can just take the second half 3759 VEXTOffsets[i] = NumElts; 3760 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3761 SourceVecs[i], 3762 DAG.getIntPtrConstant(NumElts)); 3763 } else if (MaxElts[i] < NumElts) { 3764 // The extraction can just take the first half 3765 VEXTOffsets[i] = 0; 3766 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3767 SourceVecs[i], 3768 DAG.getIntPtrConstant(0)); 3769 } else { 3770 // An actual VEXT is needed 3771 VEXTOffsets[i] = MinElts[i]; 3772 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3773 SourceVecs[i], 3774 DAG.getIntPtrConstant(0)); 3775 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3776 SourceVecs[i], 3777 DAG.getIntPtrConstant(NumElts)); 3778 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 3779 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 3780 } 3781 } 3782 3783 SmallVector<int, 8> Mask; 3784 3785 for (unsigned i = 0; i < NumElts; ++i) { 3786 SDValue Entry = Op.getOperand(i); 3787 if (Entry.getOpcode() == ISD::UNDEF) { 3788 Mask.push_back(-1); 3789 continue; 3790 } 3791 3792 SDValue ExtractVec = Entry.getOperand(0); 3793 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 3794 .getOperand(1))->getSExtValue(); 3795 if (ExtractVec == SourceVecs[0]) { 3796 Mask.push_back(ExtractElt - VEXTOffsets[0]); 3797 } else { 3798 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 3799 } 3800 } 3801 3802 // Final check before we try to produce nonsense... 3803 if (isShuffleMaskLegal(Mask, VT)) 3804 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 3805 &Mask[0]); 3806 3807 return SDValue(); 3808} 3809 3810/// isShuffleMaskLegal - Targets can use this to indicate that they only 3811/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 3812/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 3813/// are assumed to be legal. 3814bool 3815ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 3816 EVT VT) const { 3817 if (VT.getVectorNumElements() == 4 && 3818 (VT.is128BitVector() || VT.is64BitVector())) { 3819 unsigned PFIndexes[4]; 3820 for (unsigned i = 0; i != 4; ++i) { 3821 if (M[i] < 0) 3822 PFIndexes[i] = 8; 3823 else 3824 PFIndexes[i] = M[i]; 3825 } 3826 3827 // Compute the index in the perfect shuffle table. 3828 unsigned PFTableIndex = 3829 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3830 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3831 unsigned Cost = (PFEntry >> 30); 3832 3833 if (Cost <= 4) 3834 return true; 3835 } 3836 3837 bool ReverseVEXT; 3838 unsigned Imm, WhichResult; 3839 3840 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3841 return (EltSize >= 32 || 3842 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 3843 isVREVMask(M, VT, 64) || 3844 isVREVMask(M, VT, 32) || 3845 isVREVMask(M, VT, 16) || 3846 isVEXTMask(M, VT, ReverseVEXT, Imm) || 3847 isVTRNMask(M, VT, WhichResult) || 3848 isVUZPMask(M, VT, WhichResult) || 3849 isVZIPMask(M, VT, WhichResult) || 3850 isVTRN_v_undef_Mask(M, VT, WhichResult) || 3851 isVUZP_v_undef_Mask(M, VT, WhichResult) || 3852 isVZIP_v_undef_Mask(M, VT, WhichResult)); 3853} 3854 3855/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3856/// the specified operations to build the shuffle. 3857static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3858 SDValue RHS, SelectionDAG &DAG, 3859 DebugLoc dl) { 3860 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3861 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3862 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3863 3864 enum { 3865 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3866 OP_VREV, 3867 OP_VDUP0, 3868 OP_VDUP1, 3869 OP_VDUP2, 3870 OP_VDUP3, 3871 OP_VEXT1, 3872 OP_VEXT2, 3873 OP_VEXT3, 3874 OP_VUZPL, // VUZP, left result 3875 OP_VUZPR, // VUZP, right result 3876 OP_VZIPL, // VZIP, left result 3877 OP_VZIPR, // VZIP, right result 3878 OP_VTRNL, // VTRN, left result 3879 OP_VTRNR // VTRN, right result 3880 }; 3881 3882 if (OpNum == OP_COPY) { 3883 if (LHSID == (1*9+2)*9+3) return LHS; 3884 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3885 return RHS; 3886 } 3887 3888 SDValue OpLHS, OpRHS; 3889 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 3890 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 3891 EVT VT = OpLHS.getValueType(); 3892 3893 switch (OpNum) { 3894 default: llvm_unreachable("Unknown shuffle opcode!"); 3895 case OP_VREV: 3896 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 3897 case OP_VDUP0: 3898 case OP_VDUP1: 3899 case OP_VDUP2: 3900 case OP_VDUP3: 3901 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 3902 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 3903 case OP_VEXT1: 3904 case OP_VEXT2: 3905 case OP_VEXT3: 3906 return DAG.getNode(ARMISD::VEXT, dl, VT, 3907 OpLHS, OpRHS, 3908 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 3909 case OP_VUZPL: 3910 case OP_VUZPR: 3911 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3912 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 3913 case OP_VZIPL: 3914 case OP_VZIPR: 3915 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3916 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 3917 case OP_VTRNL: 3918 case OP_VTRNR: 3919 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3920 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 3921 } 3922} 3923 3924static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 3925 SDValue V1 = Op.getOperand(0); 3926 SDValue V2 = Op.getOperand(1); 3927 DebugLoc dl = Op.getDebugLoc(); 3928 EVT VT = Op.getValueType(); 3929 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 3930 SmallVector<int, 8> ShuffleMask; 3931 3932 // Convert shuffles that are directly supported on NEON to target-specific 3933 // DAG nodes, instead of keeping them as shuffles and matching them again 3934 // during code selection. This is more efficient and avoids the possibility 3935 // of inconsistencies between legalization and selection. 3936 // FIXME: floating-point vectors should be canonicalized to integer vectors 3937 // of the same time so that they get CSEd properly. 3938 SVN->getMask(ShuffleMask); 3939 3940 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3941 if (EltSize <= 32) { 3942 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 3943 int Lane = SVN->getSplatIndex(); 3944 // If this is undef splat, generate it via "just" vdup, if possible. 3945 if (Lane == -1) Lane = 0; 3946 3947 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 3948 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 3949 } 3950 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 3951 DAG.getConstant(Lane, MVT::i32)); 3952 } 3953 3954 bool ReverseVEXT; 3955 unsigned Imm; 3956 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 3957 if (ReverseVEXT) 3958 std::swap(V1, V2); 3959 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 3960 DAG.getConstant(Imm, MVT::i32)); 3961 } 3962 3963 if (isVREVMask(ShuffleMask, VT, 64)) 3964 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 3965 if (isVREVMask(ShuffleMask, VT, 32)) 3966 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 3967 if (isVREVMask(ShuffleMask, VT, 16)) 3968 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 3969 3970 // Check for Neon shuffles that modify both input vectors in place. 3971 // If both results are used, i.e., if there are two shuffles with the same 3972 // source operands and with masks corresponding to both results of one of 3973 // these operations, DAG memoization will ensure that a single node is 3974 // used for both shuffles. 3975 unsigned WhichResult; 3976 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 3977 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3978 V1, V2).getValue(WhichResult); 3979 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 3980 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3981 V1, V2).getValue(WhichResult); 3982 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 3983 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3984 V1, V2).getValue(WhichResult); 3985 3986 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3987 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3988 V1, V1).getValue(WhichResult); 3989 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3990 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3991 V1, V1).getValue(WhichResult); 3992 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3993 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3994 V1, V1).getValue(WhichResult); 3995 } 3996 3997 // If the shuffle is not directly supported and it has 4 elements, use 3998 // the PerfectShuffle-generated table to synthesize it from other shuffles. 3999 unsigned NumElts = VT.getVectorNumElements(); 4000 if (NumElts == 4) { 4001 unsigned PFIndexes[4]; 4002 for (unsigned i = 0; i != 4; ++i) { 4003 if (ShuffleMask[i] < 0) 4004 PFIndexes[i] = 8; 4005 else 4006 PFIndexes[i] = ShuffleMask[i]; 4007 } 4008 4009 // Compute the index in the perfect shuffle table. 4010 unsigned PFTableIndex = 4011 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4012 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4013 unsigned Cost = (PFEntry >> 30); 4014 4015 if (Cost <= 4) 4016 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4017 } 4018 4019 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4020 if (EltSize >= 32) { 4021 // Do the expansion with floating-point types, since that is what the VFP 4022 // registers are defined to use, and since i64 is not legal. 4023 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4024 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4025 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4026 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4027 SmallVector<SDValue, 8> Ops; 4028 for (unsigned i = 0; i < NumElts; ++i) { 4029 if (ShuffleMask[i] < 0) 4030 Ops.push_back(DAG.getUNDEF(EltVT)); 4031 else 4032 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4033 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4034 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4035 MVT::i32))); 4036 } 4037 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4038 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4039 } 4040 4041 return SDValue(); 4042} 4043 4044static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4045 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4046 SDValue Lane = Op.getOperand(1); 4047 if (!isa<ConstantSDNode>(Lane)) 4048 return SDValue(); 4049 4050 SDValue Vec = Op.getOperand(0); 4051 if (Op.getValueType() == MVT::i32 && 4052 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4053 DebugLoc dl = Op.getDebugLoc(); 4054 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4055 } 4056 4057 return Op; 4058} 4059 4060static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4061 // The only time a CONCAT_VECTORS operation can have legal types is when 4062 // two 64-bit vectors are concatenated to a 128-bit vector. 4063 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4064 "unexpected CONCAT_VECTORS"); 4065 DebugLoc dl = Op.getDebugLoc(); 4066 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4067 SDValue Op0 = Op.getOperand(0); 4068 SDValue Op1 = Op.getOperand(1); 4069 if (Op0.getOpcode() != ISD::UNDEF) 4070 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4071 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4072 DAG.getIntPtrConstant(0)); 4073 if (Op1.getOpcode() != ISD::UNDEF) 4074 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4075 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4076 DAG.getIntPtrConstant(1)); 4077 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4078} 4079 4080/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4081/// element has been zero/sign-extended, depending on the isSigned parameter, 4082/// from an integer type half its size. 4083static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4084 bool isSigned) { 4085 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4086 EVT VT = N->getValueType(0); 4087 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4088 SDNode *BVN = N->getOperand(0).getNode(); 4089 if (BVN->getValueType(0) != MVT::v4i32 || 4090 BVN->getOpcode() != ISD::BUILD_VECTOR) 4091 return false; 4092 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4093 unsigned HiElt = 1 - LoElt; 4094 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4095 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4096 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4097 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4098 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4099 return false; 4100 if (isSigned) { 4101 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4102 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4103 return true; 4104 } else { 4105 if (Hi0->isNullValue() && Hi1->isNullValue()) 4106 return true; 4107 } 4108 return false; 4109 } 4110 4111 if (N->getOpcode() != ISD::BUILD_VECTOR) 4112 return false; 4113 4114 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4115 SDNode *Elt = N->getOperand(i).getNode(); 4116 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4117 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4118 unsigned HalfSize = EltSize / 2; 4119 if (isSigned) { 4120 int64_t SExtVal = C->getSExtValue(); 4121 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 4122 return false; 4123 } else { 4124 if ((C->getZExtValue() >> HalfSize) != 0) 4125 return false; 4126 } 4127 continue; 4128 } 4129 return false; 4130 } 4131 4132 return true; 4133} 4134 4135/// isSignExtended - Check if a node is a vector value that is sign-extended 4136/// or a constant BUILD_VECTOR with sign-extended elements. 4137static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4138 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4139 return true; 4140 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4141 return true; 4142 return false; 4143} 4144 4145/// isZeroExtended - Check if a node is a vector value that is zero-extended 4146/// or a constant BUILD_VECTOR with zero-extended elements. 4147static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4148 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4149 return true; 4150 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4151 return true; 4152 return false; 4153} 4154 4155/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4156/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4157static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4158 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4159 return N->getOperand(0); 4160 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4161 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4162 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4163 LD->isNonTemporal(), LD->getAlignment()); 4164 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4165 // have been legalized as a BITCAST from v4i32. 4166 if (N->getOpcode() == ISD::BITCAST) { 4167 SDNode *BVN = N->getOperand(0).getNode(); 4168 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4169 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4170 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4171 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4172 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4173 } 4174 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4175 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4176 EVT VT = N->getValueType(0); 4177 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4178 unsigned NumElts = VT.getVectorNumElements(); 4179 MVT TruncVT = MVT::getIntegerVT(EltSize); 4180 SmallVector<SDValue, 8> Ops; 4181 for (unsigned i = 0; i != NumElts; ++i) { 4182 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4183 const APInt &CInt = C->getAPIntValue(); 4184 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4185 } 4186 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4187 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4188} 4189 4190static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4191 // Multiplications are only custom-lowered for 128-bit vectors so that 4192 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4193 EVT VT = Op.getValueType(); 4194 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4195 SDNode *N0 = Op.getOperand(0).getNode(); 4196 SDNode *N1 = Op.getOperand(1).getNode(); 4197 unsigned NewOpc = 0; 4198 if (isSignExtended(N0, DAG) && isSignExtended(N1, DAG)) 4199 NewOpc = ARMISD::VMULLs; 4200 else if (isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG)) 4201 NewOpc = ARMISD::VMULLu; 4202 else if (VT == MVT::v2i64) 4203 // Fall through to expand this. It is not legal. 4204 return SDValue(); 4205 else 4206 // Other vector multiplications are legal. 4207 return Op; 4208 4209 // Legalize to a VMULL instruction. 4210 DebugLoc DL = Op.getDebugLoc(); 4211 SDValue Op0 = SkipExtension(N0, DAG); 4212 SDValue Op1 = SkipExtension(N1, DAG); 4213 4214 assert(Op0.getValueType().is64BitVector() && 4215 Op1.getValueType().is64BitVector() && 4216 "unexpected types for extended operands to VMULL"); 4217 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4218} 4219 4220SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4221 switch (Op.getOpcode()) { 4222 default: llvm_unreachable("Don't know how to custom lower this!"); 4223 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4224 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4225 case ISD::GlobalAddress: 4226 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4227 LowerGlobalAddressELF(Op, DAG); 4228 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4229 case ISD::SELECT: return LowerSELECT(Op, DAG); 4230 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4231 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4232 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4233 case ISD::VASTART: return LowerVASTART(Op, DAG); 4234 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4235 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4236 case ISD::SINT_TO_FP: 4237 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4238 case ISD::FP_TO_SINT: 4239 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4240 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4241 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4242 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4243 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4244 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4245 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4246 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4247 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4248 Subtarget); 4249 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4250 case ISD::SHL: 4251 case ISD::SRL: 4252 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4253 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4254 case ISD::SRL_PARTS: 4255 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4256 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4257 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 4258 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4259 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4260 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4261 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4262 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4263 case ISD::MUL: return LowerMUL(Op, DAG); 4264 } 4265 return SDValue(); 4266} 4267 4268/// ReplaceNodeResults - Replace the results of node with an illegal result 4269/// type with new values built out of custom code. 4270void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4271 SmallVectorImpl<SDValue>&Results, 4272 SelectionDAG &DAG) const { 4273 SDValue Res; 4274 switch (N->getOpcode()) { 4275 default: 4276 llvm_unreachable("Don't know how to custom expand this!"); 4277 break; 4278 case ISD::BITCAST: 4279 Res = ExpandBITCAST(N, DAG); 4280 break; 4281 case ISD::SRL: 4282 case ISD::SRA: 4283 Res = Expand64BitShift(N, DAG, Subtarget); 4284 break; 4285 } 4286 if (Res.getNode()) 4287 Results.push_back(Res); 4288} 4289 4290//===----------------------------------------------------------------------===// 4291// ARM Scheduler Hooks 4292//===----------------------------------------------------------------------===// 4293 4294MachineBasicBlock * 4295ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 4296 MachineBasicBlock *BB, 4297 unsigned Size) const { 4298 unsigned dest = MI->getOperand(0).getReg(); 4299 unsigned ptr = MI->getOperand(1).getReg(); 4300 unsigned oldval = MI->getOperand(2).getReg(); 4301 unsigned newval = MI->getOperand(3).getReg(); 4302 unsigned scratch = BB->getParent()->getRegInfo() 4303 .createVirtualRegister(ARM::GPRRegisterClass); 4304 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4305 DebugLoc dl = MI->getDebugLoc(); 4306 bool isThumb2 = Subtarget->isThumb2(); 4307 4308 unsigned ldrOpc, strOpc; 4309 switch (Size) { 4310 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4311 case 1: 4312 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4313 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB; 4314 break; 4315 case 2: 4316 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4317 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4318 break; 4319 case 4: 4320 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4321 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4322 break; 4323 } 4324 4325 MachineFunction *MF = BB->getParent(); 4326 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4327 MachineFunction::iterator It = BB; 4328 ++It; // insert the new blocks after the current block 4329 4330 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4331 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4332 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4333 MF->insert(It, loop1MBB); 4334 MF->insert(It, loop2MBB); 4335 MF->insert(It, exitMBB); 4336 4337 // Transfer the remainder of BB and its successor edges to exitMBB. 4338 exitMBB->splice(exitMBB->begin(), BB, 4339 llvm::next(MachineBasicBlock::iterator(MI)), 4340 BB->end()); 4341 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4342 4343 // thisMBB: 4344 // ... 4345 // fallthrough --> loop1MBB 4346 BB->addSuccessor(loop1MBB); 4347 4348 // loop1MBB: 4349 // ldrex dest, [ptr] 4350 // cmp dest, oldval 4351 // bne exitMBB 4352 BB = loop1MBB; 4353 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4354 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4355 .addReg(dest).addReg(oldval)); 4356 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4357 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4358 BB->addSuccessor(loop2MBB); 4359 BB->addSuccessor(exitMBB); 4360 4361 // loop2MBB: 4362 // strex scratch, newval, [ptr] 4363 // cmp scratch, #0 4364 // bne loop1MBB 4365 BB = loop2MBB; 4366 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 4367 .addReg(ptr)); 4368 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4369 .addReg(scratch).addImm(0)); 4370 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4371 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4372 BB->addSuccessor(loop1MBB); 4373 BB->addSuccessor(exitMBB); 4374 4375 // exitMBB: 4376 // ... 4377 BB = exitMBB; 4378 4379 MI->eraseFromParent(); // The instruction is gone now. 4380 4381 return BB; 4382} 4383 4384MachineBasicBlock * 4385ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4386 unsigned Size, unsigned BinOpcode) const { 4387 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4388 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4389 4390 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4391 MachineFunction *MF = BB->getParent(); 4392 MachineFunction::iterator It = BB; 4393 ++It; 4394 4395 unsigned dest = MI->getOperand(0).getReg(); 4396 unsigned ptr = MI->getOperand(1).getReg(); 4397 unsigned incr = MI->getOperand(2).getReg(); 4398 DebugLoc dl = MI->getDebugLoc(); 4399 4400 bool isThumb2 = Subtarget->isThumb2(); 4401 unsigned ldrOpc, strOpc; 4402 switch (Size) { 4403 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4404 case 1: 4405 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4406 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4407 break; 4408 case 2: 4409 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4410 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4411 break; 4412 case 4: 4413 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4414 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4415 break; 4416 } 4417 4418 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4419 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4420 MF->insert(It, loopMBB); 4421 MF->insert(It, exitMBB); 4422 4423 // Transfer the remainder of BB and its successor edges to exitMBB. 4424 exitMBB->splice(exitMBB->begin(), BB, 4425 llvm::next(MachineBasicBlock::iterator(MI)), 4426 BB->end()); 4427 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4428 4429 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4430 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4431 unsigned scratch2 = (!BinOpcode) ? incr : 4432 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4433 4434 // thisMBB: 4435 // ... 4436 // fallthrough --> loopMBB 4437 BB->addSuccessor(loopMBB); 4438 4439 // loopMBB: 4440 // ldrex dest, ptr 4441 // <binop> scratch2, dest, incr 4442 // strex scratch, scratch2, ptr 4443 // cmp scratch, #0 4444 // bne- loopMBB 4445 // fallthrough --> exitMBB 4446 BB = loopMBB; 4447 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4448 if (BinOpcode) { 4449 // operand order needs to go the other way for NAND 4450 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 4451 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4452 addReg(incr).addReg(dest)).addReg(0); 4453 else 4454 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4455 addReg(dest).addReg(incr)).addReg(0); 4456 } 4457 4458 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 4459 .addReg(ptr)); 4460 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4461 .addReg(scratch).addImm(0)); 4462 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4463 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4464 4465 BB->addSuccessor(loopMBB); 4466 BB->addSuccessor(exitMBB); 4467 4468 // exitMBB: 4469 // ... 4470 BB = exitMBB; 4471 4472 MI->eraseFromParent(); // The instruction is gone now. 4473 4474 return BB; 4475} 4476 4477static 4478MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 4479 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 4480 E = MBB->succ_end(); I != E; ++I) 4481 if (*I != Succ) 4482 return *I; 4483 llvm_unreachable("Expecting a BB with two successors!"); 4484} 4485 4486MachineBasicBlock * 4487ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4488 MachineBasicBlock *BB) const { 4489 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4490 DebugLoc dl = MI->getDebugLoc(); 4491 bool isThumb2 = Subtarget->isThumb2(); 4492 switch (MI->getOpcode()) { 4493 default: 4494 MI->dump(); 4495 llvm_unreachable("Unexpected instr type to insert"); 4496 4497 case ARM::ATOMIC_LOAD_ADD_I8: 4498 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4499 case ARM::ATOMIC_LOAD_ADD_I16: 4500 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4501 case ARM::ATOMIC_LOAD_ADD_I32: 4502 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4503 4504 case ARM::ATOMIC_LOAD_AND_I8: 4505 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4506 case ARM::ATOMIC_LOAD_AND_I16: 4507 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4508 case ARM::ATOMIC_LOAD_AND_I32: 4509 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4510 4511 case ARM::ATOMIC_LOAD_OR_I8: 4512 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4513 case ARM::ATOMIC_LOAD_OR_I16: 4514 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4515 case ARM::ATOMIC_LOAD_OR_I32: 4516 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4517 4518 case ARM::ATOMIC_LOAD_XOR_I8: 4519 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4520 case ARM::ATOMIC_LOAD_XOR_I16: 4521 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4522 case ARM::ATOMIC_LOAD_XOR_I32: 4523 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4524 4525 case ARM::ATOMIC_LOAD_NAND_I8: 4526 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4527 case ARM::ATOMIC_LOAD_NAND_I16: 4528 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4529 case ARM::ATOMIC_LOAD_NAND_I32: 4530 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4531 4532 case ARM::ATOMIC_LOAD_SUB_I8: 4533 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4534 case ARM::ATOMIC_LOAD_SUB_I16: 4535 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4536 case ARM::ATOMIC_LOAD_SUB_I32: 4537 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4538 4539 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 4540 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 4541 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 4542 4543 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 4544 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 4545 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 4546 4547 case ARM::tMOVCCr_pseudo: { 4548 // To "insert" a SELECT_CC instruction, we actually have to insert the 4549 // diamond control-flow pattern. The incoming instruction knows the 4550 // destination vreg to set, the condition code register to branch on, the 4551 // true/false values to select between, and a branch opcode to use. 4552 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4553 MachineFunction::iterator It = BB; 4554 ++It; 4555 4556 // thisMBB: 4557 // ... 4558 // TrueVal = ... 4559 // cmpTY ccX, r1, r2 4560 // bCC copy1MBB 4561 // fallthrough --> copy0MBB 4562 MachineBasicBlock *thisMBB = BB; 4563 MachineFunction *F = BB->getParent(); 4564 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4565 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4566 F->insert(It, copy0MBB); 4567 F->insert(It, sinkMBB); 4568 4569 // Transfer the remainder of BB and its successor edges to sinkMBB. 4570 sinkMBB->splice(sinkMBB->begin(), BB, 4571 llvm::next(MachineBasicBlock::iterator(MI)), 4572 BB->end()); 4573 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 4574 4575 BB->addSuccessor(copy0MBB); 4576 BB->addSuccessor(sinkMBB); 4577 4578 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 4579 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 4580 4581 // copy0MBB: 4582 // %FalseValue = ... 4583 // # fallthrough to sinkMBB 4584 BB = copy0MBB; 4585 4586 // Update machine-CFG edges 4587 BB->addSuccessor(sinkMBB); 4588 4589 // sinkMBB: 4590 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4591 // ... 4592 BB = sinkMBB; 4593 BuildMI(*BB, BB->begin(), dl, 4594 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 4595 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4596 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4597 4598 MI->eraseFromParent(); // The pseudo instruction is gone now. 4599 return BB; 4600 } 4601 4602 case ARM::BCCi64: 4603 case ARM::BCCZi64: { 4604 // If there is an unconditional branch to the other successor, remove it. 4605 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 4606 4607 // Compare both parts that make up the double comparison separately for 4608 // equality. 4609 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 4610 4611 unsigned LHS1 = MI->getOperand(1).getReg(); 4612 unsigned LHS2 = MI->getOperand(2).getReg(); 4613 if (RHSisZero) { 4614 AddDefaultPred(BuildMI(BB, dl, 4615 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4616 .addReg(LHS1).addImm(0)); 4617 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4618 .addReg(LHS2).addImm(0) 4619 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4620 } else { 4621 unsigned RHS1 = MI->getOperand(3).getReg(); 4622 unsigned RHS2 = MI->getOperand(4).getReg(); 4623 AddDefaultPred(BuildMI(BB, dl, 4624 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4625 .addReg(LHS1).addReg(RHS1)); 4626 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4627 .addReg(LHS2).addReg(RHS2) 4628 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4629 } 4630 4631 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 4632 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 4633 if (MI->getOperand(0).getImm() == ARMCC::NE) 4634 std::swap(destMBB, exitMBB); 4635 4636 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4637 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 4638 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 4639 .addMBB(exitMBB); 4640 4641 MI->eraseFromParent(); // The pseudo instruction is gone now. 4642 return BB; 4643 } 4644 } 4645} 4646 4647//===----------------------------------------------------------------------===// 4648// ARM Optimization Hooks 4649//===----------------------------------------------------------------------===// 4650 4651static 4652SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 4653 TargetLowering::DAGCombinerInfo &DCI) { 4654 SelectionDAG &DAG = DCI.DAG; 4655 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4656 EVT VT = N->getValueType(0); 4657 unsigned Opc = N->getOpcode(); 4658 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 4659 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 4660 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 4661 ISD::CondCode CC = ISD::SETCC_INVALID; 4662 4663 if (isSlctCC) { 4664 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 4665 } else { 4666 SDValue CCOp = Slct.getOperand(0); 4667 if (CCOp.getOpcode() == ISD::SETCC) 4668 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 4669 } 4670 4671 bool DoXform = false; 4672 bool InvCC = false; 4673 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 4674 "Bad input!"); 4675 4676 if (LHS.getOpcode() == ISD::Constant && 4677 cast<ConstantSDNode>(LHS)->isNullValue()) { 4678 DoXform = true; 4679 } else if (CC != ISD::SETCC_INVALID && 4680 RHS.getOpcode() == ISD::Constant && 4681 cast<ConstantSDNode>(RHS)->isNullValue()) { 4682 std::swap(LHS, RHS); 4683 SDValue Op0 = Slct.getOperand(0); 4684 EVT OpVT = isSlctCC ? Op0.getValueType() : 4685 Op0.getOperand(0).getValueType(); 4686 bool isInt = OpVT.isInteger(); 4687 CC = ISD::getSetCCInverse(CC, isInt); 4688 4689 if (!TLI.isCondCodeLegal(CC, OpVT)) 4690 return SDValue(); // Inverse operator isn't legal. 4691 4692 DoXform = true; 4693 InvCC = true; 4694 } 4695 4696 if (DoXform) { 4697 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 4698 if (isSlctCC) 4699 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 4700 Slct.getOperand(0), Slct.getOperand(1), CC); 4701 SDValue CCOp = Slct.getOperand(0); 4702 if (InvCC) 4703 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 4704 CCOp.getOperand(0), CCOp.getOperand(1), CC); 4705 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4706 CCOp, OtherOp, Result); 4707 } 4708 return SDValue(); 4709} 4710 4711/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 4712/// operands N0 and N1. This is a helper for PerformADDCombine that is 4713/// called with the default operands, and if that fails, with commuted 4714/// operands. 4715static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 4716 TargetLowering::DAGCombinerInfo &DCI) { 4717 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 4718 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 4719 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 4720 if (Result.getNode()) return Result; 4721 } 4722 return SDValue(); 4723} 4724 4725/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 4726/// 4727static SDValue PerformADDCombine(SDNode *N, 4728 TargetLowering::DAGCombinerInfo &DCI) { 4729 SDValue N0 = N->getOperand(0); 4730 SDValue N1 = N->getOperand(1); 4731 4732 // First try with the default operand order. 4733 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 4734 if (Result.getNode()) 4735 return Result; 4736 4737 // If that didn't work, try again with the operands commuted. 4738 return PerformADDCombineWithOperands(N, N1, N0, DCI); 4739} 4740 4741/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 4742/// 4743static SDValue PerformSUBCombine(SDNode *N, 4744 TargetLowering::DAGCombinerInfo &DCI) { 4745 SDValue N0 = N->getOperand(0); 4746 SDValue N1 = N->getOperand(1); 4747 4748 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 4749 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 4750 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 4751 if (Result.getNode()) return Result; 4752 } 4753 4754 return SDValue(); 4755} 4756 4757static SDValue PerformMULCombine(SDNode *N, 4758 TargetLowering::DAGCombinerInfo &DCI, 4759 const ARMSubtarget *Subtarget) { 4760 SelectionDAG &DAG = DCI.DAG; 4761 4762 if (Subtarget->isThumb1Only()) 4763 return SDValue(); 4764 4765 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 4766 return SDValue(); 4767 4768 EVT VT = N->getValueType(0); 4769 if (VT != MVT::i32) 4770 return SDValue(); 4771 4772 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4773 if (!C) 4774 return SDValue(); 4775 4776 uint64_t MulAmt = C->getZExtValue(); 4777 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 4778 ShiftAmt = ShiftAmt & (32 - 1); 4779 SDValue V = N->getOperand(0); 4780 DebugLoc DL = N->getDebugLoc(); 4781 4782 SDValue Res; 4783 MulAmt >>= ShiftAmt; 4784 if (isPowerOf2_32(MulAmt - 1)) { 4785 // (mul x, 2^N + 1) => (add (shl x, N), x) 4786 Res = DAG.getNode(ISD::ADD, DL, VT, 4787 V, DAG.getNode(ISD::SHL, DL, VT, 4788 V, DAG.getConstant(Log2_32(MulAmt-1), 4789 MVT::i32))); 4790 } else if (isPowerOf2_32(MulAmt + 1)) { 4791 // (mul x, 2^N - 1) => (sub (shl x, N), x) 4792 Res = DAG.getNode(ISD::SUB, DL, VT, 4793 DAG.getNode(ISD::SHL, DL, VT, 4794 V, DAG.getConstant(Log2_32(MulAmt+1), 4795 MVT::i32)), 4796 V); 4797 } else 4798 return SDValue(); 4799 4800 if (ShiftAmt != 0) 4801 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 4802 DAG.getConstant(ShiftAmt, MVT::i32)); 4803 4804 // Do not add new nodes to DAG combiner worklist. 4805 DCI.CombineTo(N, Res, false); 4806 return SDValue(); 4807} 4808 4809static SDValue PerformANDCombine(SDNode *N, 4810 TargetLowering::DAGCombinerInfo &DCI) { 4811 // Attempt to use immediate-form VBIC 4812 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4813 DebugLoc dl = N->getDebugLoc(); 4814 EVT VT = N->getValueType(0); 4815 SelectionDAG &DAG = DCI.DAG; 4816 4817 APInt SplatBits, SplatUndef; 4818 unsigned SplatBitSize; 4819 bool HasAnyUndefs; 4820 if (BVN && 4821 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4822 if (SplatBitSize <= 64) { 4823 EVT VbicVT; 4824 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 4825 SplatUndef.getZExtValue(), SplatBitSize, 4826 DAG, VbicVT, VT.is128BitVector(), 4827 OtherModImm); 4828 if (Val.getNode()) { 4829 SDValue Input = 4830 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 4831 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 4832 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 4833 } 4834 } 4835 } 4836 4837 return SDValue(); 4838} 4839 4840/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 4841static SDValue PerformORCombine(SDNode *N, 4842 TargetLowering::DAGCombinerInfo &DCI, 4843 const ARMSubtarget *Subtarget) { 4844 // Attempt to use immediate-form VORR 4845 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4846 DebugLoc dl = N->getDebugLoc(); 4847 EVT VT = N->getValueType(0); 4848 SelectionDAG &DAG = DCI.DAG; 4849 4850 APInt SplatBits, SplatUndef; 4851 unsigned SplatBitSize; 4852 bool HasAnyUndefs; 4853 if (BVN && Subtarget->hasNEON() && 4854 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4855 if (SplatBitSize <= 64) { 4856 EVT VorrVT; 4857 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4858 SplatUndef.getZExtValue(), SplatBitSize, 4859 DAG, VorrVT, VT.is128BitVector(), 4860 OtherModImm); 4861 if (Val.getNode()) { 4862 SDValue Input = 4863 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 4864 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 4865 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 4866 } 4867 } 4868 } 4869 4870 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 4871 // reasonable. 4872 4873 // BFI is only available on V6T2+ 4874 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 4875 return SDValue(); 4876 4877 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 4878 DebugLoc DL = N->getDebugLoc(); 4879 // 1) or (and A, mask), val => ARMbfi A, val, mask 4880 // iff (val & mask) == val 4881 // 4882 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4883 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 4884 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4885 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 4886 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4887 // (i.e., copy a bitfield value into another bitfield of the same width) 4888 if (N0.getOpcode() != ISD::AND) 4889 return SDValue(); 4890 4891 if (VT != MVT::i32) 4892 return SDValue(); 4893 4894 SDValue N00 = N0.getOperand(0); 4895 4896 // The value and the mask need to be constants so we can verify this is 4897 // actually a bitfield set. If the mask is 0xffff, we can do better 4898 // via a movt instruction, so don't use BFI in that case. 4899 SDValue MaskOp = N0.getOperand(1); 4900 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 4901 if (!MaskC) 4902 return SDValue(); 4903 unsigned Mask = MaskC->getZExtValue(); 4904 if (Mask == 0xffff) 4905 return SDValue(); 4906 SDValue Res; 4907 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 4908 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4909 if (N1C) { 4910 unsigned Val = N1C->getZExtValue(); 4911 if ((Val & ~Mask) != Val) 4912 return SDValue(); 4913 4914 if (ARM::isBitFieldInvertedMask(Mask)) { 4915 Val >>= CountTrailingZeros_32(~Mask); 4916 4917 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 4918 DAG.getConstant(Val, MVT::i32), 4919 DAG.getConstant(Mask, MVT::i32)); 4920 4921 // Do not add new nodes to DAG combiner worklist. 4922 DCI.CombineTo(N, Res, false); 4923 return SDValue(); 4924 } 4925 } else if (N1.getOpcode() == ISD::AND) { 4926 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4927 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4928 if (!N11C) 4929 return SDValue(); 4930 unsigned Mask2 = N11C->getZExtValue(); 4931 4932 if (ARM::isBitFieldInvertedMask(Mask) && 4933 ARM::isBitFieldInvertedMask(~Mask2) && 4934 (CountPopulation_32(Mask) == CountPopulation_32(~Mask2))) { 4935 // The pack halfword instruction works better for masks that fit it, 4936 // so use that when it's available. 4937 if (Subtarget->hasT2ExtractPack() && 4938 (Mask == 0xffff || Mask == 0xffff0000)) 4939 return SDValue(); 4940 // 2a 4941 unsigned lsb = CountTrailingZeros_32(Mask2); 4942 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 4943 DAG.getConstant(lsb, MVT::i32)); 4944 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 4945 DAG.getConstant(Mask, MVT::i32)); 4946 // Do not add new nodes to DAG combiner worklist. 4947 DCI.CombineTo(N, Res, false); 4948 return SDValue(); 4949 } else if (ARM::isBitFieldInvertedMask(~Mask) && 4950 ARM::isBitFieldInvertedMask(Mask2) && 4951 (CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) { 4952 // The pack halfword instruction works better for masks that fit it, 4953 // so use that when it's available. 4954 if (Subtarget->hasT2ExtractPack() && 4955 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 4956 return SDValue(); 4957 // 2b 4958 unsigned lsb = CountTrailingZeros_32(Mask); 4959 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 4960 DAG.getConstant(lsb, MVT::i32)); 4961 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 4962 DAG.getConstant(Mask2, MVT::i32)); 4963 // Do not add new nodes to DAG combiner worklist. 4964 DCI.CombineTo(N, Res, false); 4965 return SDValue(); 4966 } 4967 } 4968 4969 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 4970 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 4971 ARM::isBitFieldInvertedMask(~Mask)) { 4972 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 4973 // where lsb(mask) == #shamt and masked bits of B are known zero. 4974 SDValue ShAmt = N00.getOperand(1); 4975 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 4976 unsigned LSB = CountTrailingZeros_32(Mask); 4977 if (ShAmtC != LSB) 4978 return SDValue(); 4979 4980 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 4981 DAG.getConstant(~Mask, MVT::i32)); 4982 4983 // Do not add new nodes to DAG combiner worklist. 4984 DCI.CombineTo(N, Res, false); 4985 } 4986 4987 return SDValue(); 4988} 4989 4990/// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff 4991/// C1 & C2 == C1. 4992static SDValue PerformBFICombine(SDNode *N, 4993 TargetLowering::DAGCombinerInfo &DCI) { 4994 SDValue N1 = N->getOperand(1); 4995 if (N1.getOpcode() == ISD::AND) { 4996 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4997 if (!N11C) 4998 return SDValue(); 4999 unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 5000 unsigned Mask2 = N11C->getZExtValue(); 5001 if ((Mask & Mask2) == Mask2) 5002 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 5003 N->getOperand(0), N1.getOperand(0), 5004 N->getOperand(2)); 5005 } 5006 return SDValue(); 5007} 5008 5009/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 5010/// ARMISD::VMOVRRD. 5011static SDValue PerformVMOVRRDCombine(SDNode *N, 5012 TargetLowering::DAGCombinerInfo &DCI) { 5013 // vmovrrd(vmovdrr x, y) -> x,y 5014 SDValue InDouble = N->getOperand(0); 5015 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 5016 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 5017 return SDValue(); 5018} 5019 5020/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 5021/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 5022static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 5023 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 5024 SDValue Op0 = N->getOperand(0); 5025 SDValue Op1 = N->getOperand(1); 5026 if (Op0.getOpcode() == ISD::BITCAST) 5027 Op0 = Op0.getOperand(0); 5028 if (Op1.getOpcode() == ISD::BITCAST) 5029 Op1 = Op1.getOperand(0); 5030 if (Op0.getOpcode() == ARMISD::VMOVRRD && 5031 Op0.getNode() == Op1.getNode() && 5032 Op0.getResNo() == 0 && Op1.getResNo() == 1) 5033 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 5034 N->getValueType(0), Op0.getOperand(0)); 5035 return SDValue(); 5036} 5037 5038/// PerformSTORECombine - Target-specific dag combine xforms for 5039/// ISD::STORE. 5040static SDValue PerformSTORECombine(SDNode *N, 5041 TargetLowering::DAGCombinerInfo &DCI) { 5042 // Bitcast an i64 store extracted from a vector to f64. 5043 // Otherwise, the i64 value will be legalized to a pair of i32 values. 5044 StoreSDNode *St = cast<StoreSDNode>(N); 5045 SDValue StVal = St->getValue(); 5046 if (!ISD::isNormalStore(St) || St->isVolatile() || 5047 StVal.getValueType() != MVT::i64 || 5048 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5049 return SDValue(); 5050 5051 SelectionDAG &DAG = DCI.DAG; 5052 DebugLoc dl = StVal.getDebugLoc(); 5053 SDValue IntVec = StVal.getOperand(0); 5054 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 5055 IntVec.getValueType().getVectorNumElements()); 5056 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 5057 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 5058 Vec, StVal.getOperand(1)); 5059 dl = N->getDebugLoc(); 5060 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 5061 // Make the DAGCombiner fold the bitcasts. 5062 DCI.AddToWorklist(Vec.getNode()); 5063 DCI.AddToWorklist(ExtElt.getNode()); 5064 DCI.AddToWorklist(V.getNode()); 5065 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 5066 St->getPointerInfo(), St->isVolatile(), 5067 St->isNonTemporal(), St->getAlignment(), 5068 St->getTBAAInfo()); 5069} 5070 5071/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 5072/// are normal, non-volatile loads. If so, it is profitable to bitcast an 5073/// i64 vector to have f64 elements, since the value can then be loaded 5074/// directly into a VFP register. 5075static bool hasNormalLoadOperand(SDNode *N) { 5076 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 5077 for (unsigned i = 0; i < NumElts; ++i) { 5078 SDNode *Elt = N->getOperand(i).getNode(); 5079 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 5080 return true; 5081 } 5082 return false; 5083} 5084 5085/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 5086/// ISD::BUILD_VECTOR. 5087static SDValue PerformBUILD_VECTORCombine(SDNode *N, 5088 TargetLowering::DAGCombinerInfo &DCI){ 5089 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 5090 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 5091 // into a pair of GPRs, which is fine when the value is used as a scalar, 5092 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 5093 SelectionDAG &DAG = DCI.DAG; 5094 if (N->getNumOperands() == 2) { 5095 SDValue RV = PerformVMOVDRRCombine(N, DAG); 5096 if (RV.getNode()) 5097 return RV; 5098 } 5099 5100 // Load i64 elements as f64 values so that type legalization does not split 5101 // them up into i32 values. 5102 EVT VT = N->getValueType(0); 5103 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 5104 return SDValue(); 5105 DebugLoc dl = N->getDebugLoc(); 5106 SmallVector<SDValue, 8> Ops; 5107 unsigned NumElts = VT.getVectorNumElements(); 5108 for (unsigned i = 0; i < NumElts; ++i) { 5109 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 5110 Ops.push_back(V); 5111 // Make the DAGCombiner fold the bitcast. 5112 DCI.AddToWorklist(V.getNode()); 5113 } 5114 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 5115 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 5116 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 5117} 5118 5119/// PerformInsertEltCombine - Target-specific dag combine xforms for 5120/// ISD::INSERT_VECTOR_ELT. 5121static SDValue PerformInsertEltCombine(SDNode *N, 5122 TargetLowering::DAGCombinerInfo &DCI) { 5123 // Bitcast an i64 load inserted into a vector to f64. 5124 // Otherwise, the i64 value will be legalized to a pair of i32 values. 5125 EVT VT = N->getValueType(0); 5126 SDNode *Elt = N->getOperand(1).getNode(); 5127 if (VT.getVectorElementType() != MVT::i64 || 5128 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 5129 return SDValue(); 5130 5131 SelectionDAG &DAG = DCI.DAG; 5132 DebugLoc dl = N->getDebugLoc(); 5133 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 5134 VT.getVectorNumElements()); 5135 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 5136 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 5137 // Make the DAGCombiner fold the bitcasts. 5138 DCI.AddToWorklist(Vec.getNode()); 5139 DCI.AddToWorklist(V.getNode()); 5140 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 5141 Vec, V, N->getOperand(2)); 5142 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 5143} 5144 5145/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 5146/// ISD::VECTOR_SHUFFLE. 5147static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 5148 // The LLVM shufflevector instruction does not require the shuffle mask 5149 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 5150 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 5151 // operands do not match the mask length, they are extended by concatenating 5152 // them with undef vectors. That is probably the right thing for other 5153 // targets, but for NEON it is better to concatenate two double-register 5154 // size vector operands into a single quad-register size vector. Do that 5155 // transformation here: 5156 // shuffle(concat(v1, undef), concat(v2, undef)) -> 5157 // shuffle(concat(v1, v2), undef) 5158 SDValue Op0 = N->getOperand(0); 5159 SDValue Op1 = N->getOperand(1); 5160 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 5161 Op1.getOpcode() != ISD::CONCAT_VECTORS || 5162 Op0.getNumOperands() != 2 || 5163 Op1.getNumOperands() != 2) 5164 return SDValue(); 5165 SDValue Concat0Op1 = Op0.getOperand(1); 5166 SDValue Concat1Op1 = Op1.getOperand(1); 5167 if (Concat0Op1.getOpcode() != ISD::UNDEF || 5168 Concat1Op1.getOpcode() != ISD::UNDEF) 5169 return SDValue(); 5170 // Skip the transformation if any of the types are illegal. 5171 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5172 EVT VT = N->getValueType(0); 5173 if (!TLI.isTypeLegal(VT) || 5174 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 5175 !TLI.isTypeLegal(Concat1Op1.getValueType())) 5176 return SDValue(); 5177 5178 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 5179 Op0.getOperand(0), Op1.getOperand(0)); 5180 // Translate the shuffle mask. 5181 SmallVector<int, 16> NewMask; 5182 unsigned NumElts = VT.getVectorNumElements(); 5183 unsigned HalfElts = NumElts/2; 5184 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 5185 for (unsigned n = 0; n < NumElts; ++n) { 5186 int MaskElt = SVN->getMaskElt(n); 5187 int NewElt = -1; 5188 if (MaskElt < (int)HalfElts) 5189 NewElt = MaskElt; 5190 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 5191 NewElt = HalfElts + MaskElt - NumElts; 5192 NewMask.push_back(NewElt); 5193 } 5194 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 5195 DAG.getUNDEF(VT), NewMask.data()); 5196} 5197 5198/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 5199/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 5200/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 5201/// return true. 5202static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 5203 SelectionDAG &DAG = DCI.DAG; 5204 EVT VT = N->getValueType(0); 5205 // vldN-dup instructions only support 64-bit vectors for N > 1. 5206 if (!VT.is64BitVector()) 5207 return false; 5208 5209 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 5210 SDNode *VLD = N->getOperand(0).getNode(); 5211 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 5212 return false; 5213 unsigned NumVecs = 0; 5214 unsigned NewOpc = 0; 5215 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 5216 if (IntNo == Intrinsic::arm_neon_vld2lane) { 5217 NumVecs = 2; 5218 NewOpc = ARMISD::VLD2DUP; 5219 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 5220 NumVecs = 3; 5221 NewOpc = ARMISD::VLD3DUP; 5222 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 5223 NumVecs = 4; 5224 NewOpc = ARMISD::VLD4DUP; 5225 } else { 5226 return false; 5227 } 5228 5229 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 5230 // numbers match the load. 5231 unsigned VLDLaneNo = 5232 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 5233 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 5234 UI != UE; ++UI) { 5235 // Ignore uses of the chain result. 5236 if (UI.getUse().getResNo() == NumVecs) 5237 continue; 5238 SDNode *User = *UI; 5239 if (User->getOpcode() != ARMISD::VDUPLANE || 5240 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 5241 return false; 5242 } 5243 5244 // Create the vldN-dup node. 5245 EVT Tys[5]; 5246 unsigned n; 5247 for (n = 0; n < NumVecs; ++n) 5248 Tys[n] = VT; 5249 Tys[n] = MVT::Other; 5250 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 5251 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 5252 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 5253 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 5254 Ops, 2, VLDMemInt->getMemoryVT(), 5255 VLDMemInt->getMemOperand()); 5256 5257 // Update the uses. 5258 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 5259 UI != UE; ++UI) { 5260 unsigned ResNo = UI.getUse().getResNo(); 5261 // Ignore uses of the chain result. 5262 if (ResNo == NumVecs) 5263 continue; 5264 SDNode *User = *UI; 5265 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 5266 } 5267 5268 // Now the vldN-lane intrinsic is dead except for its chain result. 5269 // Update uses of the chain. 5270 std::vector<SDValue> VLDDupResults; 5271 for (unsigned n = 0; n < NumVecs; ++n) 5272 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 5273 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 5274 DCI.CombineTo(VLD, VLDDupResults); 5275 5276 return true; 5277} 5278 5279/// PerformVDUPLANECombine - Target-specific dag combine xforms for 5280/// ARMISD::VDUPLANE. 5281static SDValue PerformVDUPLANECombine(SDNode *N, 5282 TargetLowering::DAGCombinerInfo &DCI) { 5283 SDValue Op = N->getOperand(0); 5284 5285 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 5286 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 5287 if (CombineVLDDUP(N, DCI)) 5288 return SDValue(N, 0); 5289 5290 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 5291 // redundant. Ignore bit_converts for now; element sizes are checked below. 5292 while (Op.getOpcode() == ISD::BITCAST) 5293 Op = Op.getOperand(0); 5294 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 5295 return SDValue(); 5296 5297 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 5298 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 5299 // The canonical VMOV for a zero vector uses a 32-bit element size. 5300 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5301 unsigned EltBits; 5302 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 5303 EltSize = 8; 5304 EVT VT = N->getValueType(0); 5305 if (EltSize > VT.getVectorElementType().getSizeInBits()) 5306 return SDValue(); 5307 5308 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 5309} 5310 5311/// getVShiftImm - Check if this is a valid build_vector for the immediate 5312/// operand of a vector shift operation, where all the elements of the 5313/// build_vector must have the same constant integer value. 5314static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 5315 // Ignore bit_converts. 5316 while (Op.getOpcode() == ISD::BITCAST) 5317 Op = Op.getOperand(0); 5318 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5319 APInt SplatBits, SplatUndef; 5320 unsigned SplatBitSize; 5321 bool HasAnyUndefs; 5322 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 5323 HasAnyUndefs, ElementBits) || 5324 SplatBitSize > ElementBits) 5325 return false; 5326 Cnt = SplatBits.getSExtValue(); 5327 return true; 5328} 5329 5330/// isVShiftLImm - Check if this is a valid build_vector for the immediate 5331/// operand of a vector shift left operation. That value must be in the range: 5332/// 0 <= Value < ElementBits for a left shift; or 5333/// 0 <= Value <= ElementBits for a long left shift. 5334static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 5335 assert(VT.isVector() && "vector shift count is not a vector type"); 5336 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 5337 if (! getVShiftImm(Op, ElementBits, Cnt)) 5338 return false; 5339 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 5340} 5341 5342/// isVShiftRImm - Check if this is a valid build_vector for the immediate 5343/// operand of a vector shift right operation. For a shift opcode, the value 5344/// is positive, but for an intrinsic the value count must be negative. The 5345/// absolute value must be in the range: 5346/// 1 <= |Value| <= ElementBits for a right shift; or 5347/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 5348static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 5349 int64_t &Cnt) { 5350 assert(VT.isVector() && "vector shift count is not a vector type"); 5351 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 5352 if (! getVShiftImm(Op, ElementBits, Cnt)) 5353 return false; 5354 if (isIntrinsic) 5355 Cnt = -Cnt; 5356 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 5357} 5358 5359/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 5360static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 5361 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 5362 switch (IntNo) { 5363 default: 5364 // Don't do anything for most intrinsics. 5365 break; 5366 5367 // Vector shifts: check for immediate versions and lower them. 5368 // Note: This is done during DAG combining instead of DAG legalizing because 5369 // the build_vectors for 64-bit vector element shift counts are generally 5370 // not legal, and it is hard to see their values after they get legalized to 5371 // loads from a constant pool. 5372 case Intrinsic::arm_neon_vshifts: 5373 case Intrinsic::arm_neon_vshiftu: 5374 case Intrinsic::arm_neon_vshiftls: 5375 case Intrinsic::arm_neon_vshiftlu: 5376 case Intrinsic::arm_neon_vshiftn: 5377 case Intrinsic::arm_neon_vrshifts: 5378 case Intrinsic::arm_neon_vrshiftu: 5379 case Intrinsic::arm_neon_vrshiftn: 5380 case Intrinsic::arm_neon_vqshifts: 5381 case Intrinsic::arm_neon_vqshiftu: 5382 case Intrinsic::arm_neon_vqshiftsu: 5383 case Intrinsic::arm_neon_vqshiftns: 5384 case Intrinsic::arm_neon_vqshiftnu: 5385 case Intrinsic::arm_neon_vqshiftnsu: 5386 case Intrinsic::arm_neon_vqrshiftns: 5387 case Intrinsic::arm_neon_vqrshiftnu: 5388 case Intrinsic::arm_neon_vqrshiftnsu: { 5389 EVT VT = N->getOperand(1).getValueType(); 5390 int64_t Cnt; 5391 unsigned VShiftOpc = 0; 5392 5393 switch (IntNo) { 5394 case Intrinsic::arm_neon_vshifts: 5395 case Intrinsic::arm_neon_vshiftu: 5396 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 5397 VShiftOpc = ARMISD::VSHL; 5398 break; 5399 } 5400 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 5401 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 5402 ARMISD::VSHRs : ARMISD::VSHRu); 5403 break; 5404 } 5405 return SDValue(); 5406 5407 case Intrinsic::arm_neon_vshiftls: 5408 case Intrinsic::arm_neon_vshiftlu: 5409 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 5410 break; 5411 llvm_unreachable("invalid shift count for vshll intrinsic"); 5412 5413 case Intrinsic::arm_neon_vrshifts: 5414 case Intrinsic::arm_neon_vrshiftu: 5415 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 5416 break; 5417 return SDValue(); 5418 5419 case Intrinsic::arm_neon_vqshifts: 5420 case Intrinsic::arm_neon_vqshiftu: 5421 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 5422 break; 5423 return SDValue(); 5424 5425 case Intrinsic::arm_neon_vqshiftsu: 5426 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 5427 break; 5428 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 5429 5430 case Intrinsic::arm_neon_vshiftn: 5431 case Intrinsic::arm_neon_vrshiftn: 5432 case Intrinsic::arm_neon_vqshiftns: 5433 case Intrinsic::arm_neon_vqshiftnu: 5434 case Intrinsic::arm_neon_vqshiftnsu: 5435 case Intrinsic::arm_neon_vqrshiftns: 5436 case Intrinsic::arm_neon_vqrshiftnu: 5437 case Intrinsic::arm_neon_vqrshiftnsu: 5438 // Narrowing shifts require an immediate right shift. 5439 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 5440 break; 5441 llvm_unreachable("invalid shift count for narrowing vector shift " 5442 "intrinsic"); 5443 5444 default: 5445 llvm_unreachable("unhandled vector shift"); 5446 } 5447 5448 switch (IntNo) { 5449 case Intrinsic::arm_neon_vshifts: 5450 case Intrinsic::arm_neon_vshiftu: 5451 // Opcode already set above. 5452 break; 5453 case Intrinsic::arm_neon_vshiftls: 5454 case Intrinsic::arm_neon_vshiftlu: 5455 if (Cnt == VT.getVectorElementType().getSizeInBits()) 5456 VShiftOpc = ARMISD::VSHLLi; 5457 else 5458 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 5459 ARMISD::VSHLLs : ARMISD::VSHLLu); 5460 break; 5461 case Intrinsic::arm_neon_vshiftn: 5462 VShiftOpc = ARMISD::VSHRN; break; 5463 case Intrinsic::arm_neon_vrshifts: 5464 VShiftOpc = ARMISD::VRSHRs; break; 5465 case Intrinsic::arm_neon_vrshiftu: 5466 VShiftOpc = ARMISD::VRSHRu; break; 5467 case Intrinsic::arm_neon_vrshiftn: 5468 VShiftOpc = ARMISD::VRSHRN; break; 5469 case Intrinsic::arm_neon_vqshifts: 5470 VShiftOpc = ARMISD::VQSHLs; break; 5471 case Intrinsic::arm_neon_vqshiftu: 5472 VShiftOpc = ARMISD::VQSHLu; break; 5473 case Intrinsic::arm_neon_vqshiftsu: 5474 VShiftOpc = ARMISD::VQSHLsu; break; 5475 case Intrinsic::arm_neon_vqshiftns: 5476 VShiftOpc = ARMISD::VQSHRNs; break; 5477 case Intrinsic::arm_neon_vqshiftnu: 5478 VShiftOpc = ARMISD::VQSHRNu; break; 5479 case Intrinsic::arm_neon_vqshiftnsu: 5480 VShiftOpc = ARMISD::VQSHRNsu; break; 5481 case Intrinsic::arm_neon_vqrshiftns: 5482 VShiftOpc = ARMISD::VQRSHRNs; break; 5483 case Intrinsic::arm_neon_vqrshiftnu: 5484 VShiftOpc = ARMISD::VQRSHRNu; break; 5485 case Intrinsic::arm_neon_vqrshiftnsu: 5486 VShiftOpc = ARMISD::VQRSHRNsu; break; 5487 } 5488 5489 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 5490 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 5491 } 5492 5493 case Intrinsic::arm_neon_vshiftins: { 5494 EVT VT = N->getOperand(1).getValueType(); 5495 int64_t Cnt; 5496 unsigned VShiftOpc = 0; 5497 5498 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 5499 VShiftOpc = ARMISD::VSLI; 5500 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 5501 VShiftOpc = ARMISD::VSRI; 5502 else { 5503 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 5504 } 5505 5506 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 5507 N->getOperand(1), N->getOperand(2), 5508 DAG.getConstant(Cnt, MVT::i32)); 5509 } 5510 5511 case Intrinsic::arm_neon_vqrshifts: 5512 case Intrinsic::arm_neon_vqrshiftu: 5513 // No immediate versions of these to check for. 5514 break; 5515 } 5516 5517 return SDValue(); 5518} 5519 5520/// PerformShiftCombine - Checks for immediate versions of vector shifts and 5521/// lowers them. As with the vector shift intrinsics, this is done during DAG 5522/// combining instead of DAG legalizing because the build_vectors for 64-bit 5523/// vector element shift counts are generally not legal, and it is hard to see 5524/// their values after they get legalized to loads from a constant pool. 5525static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 5526 const ARMSubtarget *ST) { 5527 EVT VT = N->getValueType(0); 5528 5529 // Nothing to be done for scalar shifts. 5530 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5531 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 5532 return SDValue(); 5533 5534 assert(ST->hasNEON() && "unexpected vector shift"); 5535 int64_t Cnt; 5536 5537 switch (N->getOpcode()) { 5538 default: llvm_unreachable("unexpected shift opcode"); 5539 5540 case ISD::SHL: 5541 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 5542 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 5543 DAG.getConstant(Cnt, MVT::i32)); 5544 break; 5545 5546 case ISD::SRA: 5547 case ISD::SRL: 5548 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 5549 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 5550 ARMISD::VSHRs : ARMISD::VSHRu); 5551 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 5552 DAG.getConstant(Cnt, MVT::i32)); 5553 } 5554 } 5555 return SDValue(); 5556} 5557 5558/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 5559/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 5560static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 5561 const ARMSubtarget *ST) { 5562 SDValue N0 = N->getOperand(0); 5563 5564 // Check for sign- and zero-extensions of vector extract operations of 8- 5565 // and 16-bit vector elements. NEON supports these directly. They are 5566 // handled during DAG combining because type legalization will promote them 5567 // to 32-bit types and it is messy to recognize the operations after that. 5568 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 5569 SDValue Vec = N0.getOperand(0); 5570 SDValue Lane = N0.getOperand(1); 5571 EVT VT = N->getValueType(0); 5572 EVT EltVT = N0.getValueType(); 5573 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5574 5575 if (VT == MVT::i32 && 5576 (EltVT == MVT::i8 || EltVT == MVT::i16) && 5577 TLI.isTypeLegal(Vec.getValueType()) && 5578 isa<ConstantSDNode>(Lane)) { 5579 5580 unsigned Opc = 0; 5581 switch (N->getOpcode()) { 5582 default: llvm_unreachable("unexpected opcode"); 5583 case ISD::SIGN_EXTEND: 5584 Opc = ARMISD::VGETLANEs; 5585 break; 5586 case ISD::ZERO_EXTEND: 5587 case ISD::ANY_EXTEND: 5588 Opc = ARMISD::VGETLANEu; 5589 break; 5590 } 5591 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 5592 } 5593 } 5594 5595 return SDValue(); 5596} 5597 5598/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 5599/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 5600static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 5601 const ARMSubtarget *ST) { 5602 // If the target supports NEON, try to use vmax/vmin instructions for f32 5603 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 5604 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 5605 // a NaN; only do the transformation when it matches that behavior. 5606 5607 // For now only do this when using NEON for FP operations; if using VFP, it 5608 // is not obvious that the benefit outweighs the cost of switching to the 5609 // NEON pipeline. 5610 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 5611 N->getValueType(0) != MVT::f32) 5612 return SDValue(); 5613 5614 SDValue CondLHS = N->getOperand(0); 5615 SDValue CondRHS = N->getOperand(1); 5616 SDValue LHS = N->getOperand(2); 5617 SDValue RHS = N->getOperand(3); 5618 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 5619 5620 unsigned Opcode = 0; 5621 bool IsReversed; 5622 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 5623 IsReversed = false; // x CC y ? x : y 5624 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 5625 IsReversed = true ; // x CC y ? y : x 5626 } else { 5627 return SDValue(); 5628 } 5629 5630 bool IsUnordered; 5631 switch (CC) { 5632 default: break; 5633 case ISD::SETOLT: 5634 case ISD::SETOLE: 5635 case ISD::SETLT: 5636 case ISD::SETLE: 5637 case ISD::SETULT: 5638 case ISD::SETULE: 5639 // If LHS is NaN, an ordered comparison will be false and the result will 5640 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 5641 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5642 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 5643 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5644 break; 5645 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 5646 // will return -0, so vmin can only be used for unsafe math or if one of 5647 // the operands is known to be nonzero. 5648 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 5649 !UnsafeFPMath && 5650 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5651 break; 5652 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 5653 break; 5654 5655 case ISD::SETOGT: 5656 case ISD::SETOGE: 5657 case ISD::SETGT: 5658 case ISD::SETGE: 5659 case ISD::SETUGT: 5660 case ISD::SETUGE: 5661 // If LHS is NaN, an ordered comparison will be false and the result will 5662 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 5663 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5664 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 5665 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5666 break; 5667 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 5668 // will return +0, so vmax can only be used for unsafe math or if one of 5669 // the operands is known to be nonzero. 5670 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 5671 !UnsafeFPMath && 5672 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5673 break; 5674 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 5675 break; 5676 } 5677 5678 if (!Opcode) 5679 return SDValue(); 5680 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 5681} 5682 5683SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 5684 DAGCombinerInfo &DCI) const { 5685 switch (N->getOpcode()) { 5686 default: break; 5687 case ISD::ADD: return PerformADDCombine(N, DCI); 5688 case ISD::SUB: return PerformSUBCombine(N, DCI); 5689 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 5690 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 5691 case ISD::AND: return PerformANDCombine(N, DCI); 5692 case ARMISD::BFI: return PerformBFICombine(N, DCI); 5693 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 5694 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 5695 case ISD::STORE: return PerformSTORECombine(N, DCI); 5696 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 5697 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 5698 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 5699 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 5700 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 5701 case ISD::SHL: 5702 case ISD::SRA: 5703 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 5704 case ISD::SIGN_EXTEND: 5705 case ISD::ZERO_EXTEND: 5706 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 5707 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 5708 } 5709 return SDValue(); 5710} 5711 5712bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 5713 if (!Subtarget->allowsUnalignedMem()) 5714 return false; 5715 5716 switch (VT.getSimpleVT().SimpleTy) { 5717 default: 5718 return false; 5719 case MVT::i8: 5720 case MVT::i16: 5721 case MVT::i32: 5722 return true; 5723 // FIXME: VLD1 etc with standard alignment is legal. 5724 } 5725} 5726 5727static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 5728 if (V < 0) 5729 return false; 5730 5731 unsigned Scale = 1; 5732 switch (VT.getSimpleVT().SimpleTy) { 5733 default: return false; 5734 case MVT::i1: 5735 case MVT::i8: 5736 // Scale == 1; 5737 break; 5738 case MVT::i16: 5739 // Scale == 2; 5740 Scale = 2; 5741 break; 5742 case MVT::i32: 5743 // Scale == 4; 5744 Scale = 4; 5745 break; 5746 } 5747 5748 if ((V & (Scale - 1)) != 0) 5749 return false; 5750 V /= Scale; 5751 return V == (V & ((1LL << 5) - 1)); 5752} 5753 5754static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 5755 const ARMSubtarget *Subtarget) { 5756 bool isNeg = false; 5757 if (V < 0) { 5758 isNeg = true; 5759 V = - V; 5760 } 5761 5762 switch (VT.getSimpleVT().SimpleTy) { 5763 default: return false; 5764 case MVT::i1: 5765 case MVT::i8: 5766 case MVT::i16: 5767 case MVT::i32: 5768 // + imm12 or - imm8 5769 if (isNeg) 5770 return V == (V & ((1LL << 8) - 1)); 5771 return V == (V & ((1LL << 12) - 1)); 5772 case MVT::f32: 5773 case MVT::f64: 5774 // Same as ARM mode. FIXME: NEON? 5775 if (!Subtarget->hasVFP2()) 5776 return false; 5777 if ((V & 3) != 0) 5778 return false; 5779 V >>= 2; 5780 return V == (V & ((1LL << 8) - 1)); 5781 } 5782} 5783 5784/// isLegalAddressImmediate - Return true if the integer value can be used 5785/// as the offset of the target addressing mode for load / store of the 5786/// given type. 5787static bool isLegalAddressImmediate(int64_t V, EVT VT, 5788 const ARMSubtarget *Subtarget) { 5789 if (V == 0) 5790 return true; 5791 5792 if (!VT.isSimple()) 5793 return false; 5794 5795 if (Subtarget->isThumb1Only()) 5796 return isLegalT1AddressImmediate(V, VT); 5797 else if (Subtarget->isThumb2()) 5798 return isLegalT2AddressImmediate(V, VT, Subtarget); 5799 5800 // ARM mode. 5801 if (V < 0) 5802 V = - V; 5803 switch (VT.getSimpleVT().SimpleTy) { 5804 default: return false; 5805 case MVT::i1: 5806 case MVT::i8: 5807 case MVT::i32: 5808 // +- imm12 5809 return V == (V & ((1LL << 12) - 1)); 5810 case MVT::i16: 5811 // +- imm8 5812 return V == (V & ((1LL << 8) - 1)); 5813 case MVT::f32: 5814 case MVT::f64: 5815 if (!Subtarget->hasVFP2()) // FIXME: NEON? 5816 return false; 5817 if ((V & 3) != 0) 5818 return false; 5819 V >>= 2; 5820 return V == (V & ((1LL << 8) - 1)); 5821 } 5822} 5823 5824bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 5825 EVT VT) const { 5826 int Scale = AM.Scale; 5827 if (Scale < 0) 5828 return false; 5829 5830 switch (VT.getSimpleVT().SimpleTy) { 5831 default: return false; 5832 case MVT::i1: 5833 case MVT::i8: 5834 case MVT::i16: 5835 case MVT::i32: 5836 if (Scale == 1) 5837 return true; 5838 // r + r << imm 5839 Scale = Scale & ~1; 5840 return Scale == 2 || Scale == 4 || Scale == 8; 5841 case MVT::i64: 5842 // r + r 5843 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5844 return true; 5845 return false; 5846 case MVT::isVoid: 5847 // Note, we allow "void" uses (basically, uses that aren't loads or 5848 // stores), because arm allows folding a scale into many arithmetic 5849 // operations. This should be made more precise and revisited later. 5850 5851 // Allow r << imm, but the imm has to be a multiple of two. 5852 if (Scale & 1) return false; 5853 return isPowerOf2_32(Scale); 5854 } 5855} 5856 5857/// isLegalAddressingMode - Return true if the addressing mode represented 5858/// by AM is legal for this target, for a load/store of the specified type. 5859bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5860 const Type *Ty) const { 5861 EVT VT = getValueType(Ty, true); 5862 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 5863 return false; 5864 5865 // Can never fold addr of global into load/store. 5866 if (AM.BaseGV) 5867 return false; 5868 5869 switch (AM.Scale) { 5870 case 0: // no scale reg, must be "r+i" or "r", or "i". 5871 break; 5872 case 1: 5873 if (Subtarget->isThumb1Only()) 5874 return false; 5875 // FALL THROUGH. 5876 default: 5877 // ARM doesn't support any R+R*scale+imm addr modes. 5878 if (AM.BaseOffs) 5879 return false; 5880 5881 if (!VT.isSimple()) 5882 return false; 5883 5884 if (Subtarget->isThumb2()) 5885 return isLegalT2ScaledAddressingMode(AM, VT); 5886 5887 int Scale = AM.Scale; 5888 switch (VT.getSimpleVT().SimpleTy) { 5889 default: return false; 5890 case MVT::i1: 5891 case MVT::i8: 5892 case MVT::i32: 5893 if (Scale < 0) Scale = -Scale; 5894 if (Scale == 1) 5895 return true; 5896 // r + r << imm 5897 return isPowerOf2_32(Scale & ~1); 5898 case MVT::i16: 5899 case MVT::i64: 5900 // r + r 5901 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5902 return true; 5903 return false; 5904 5905 case MVT::isVoid: 5906 // Note, we allow "void" uses (basically, uses that aren't loads or 5907 // stores), because arm allows folding a scale into many arithmetic 5908 // operations. This should be made more precise and revisited later. 5909 5910 // Allow r << imm, but the imm has to be a multiple of two. 5911 if (Scale & 1) return false; 5912 return isPowerOf2_32(Scale); 5913 } 5914 break; 5915 } 5916 return true; 5917} 5918 5919/// isLegalICmpImmediate - Return true if the specified immediate is legal 5920/// icmp immediate, that is the target has icmp instructions which can compare 5921/// a register against the immediate without having to materialize the 5922/// immediate into a register. 5923bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 5924 if (!Subtarget->isThumb()) 5925 return ARM_AM::getSOImmVal(Imm) != -1; 5926 if (Subtarget->isThumb2()) 5927 return ARM_AM::getT2SOImmVal(Imm) != -1; 5928 return Imm >= 0 && Imm <= 255; 5929} 5930 5931static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 5932 bool isSEXTLoad, SDValue &Base, 5933 SDValue &Offset, bool &isInc, 5934 SelectionDAG &DAG) { 5935 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5936 return false; 5937 5938 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 5939 // AddressingMode 3 5940 Base = Ptr->getOperand(0); 5941 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5942 int RHSC = (int)RHS->getZExtValue(); 5943 if (RHSC < 0 && RHSC > -256) { 5944 assert(Ptr->getOpcode() == ISD::ADD); 5945 isInc = false; 5946 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5947 return true; 5948 } 5949 } 5950 isInc = (Ptr->getOpcode() == ISD::ADD); 5951 Offset = Ptr->getOperand(1); 5952 return true; 5953 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 5954 // AddressingMode 2 5955 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5956 int RHSC = (int)RHS->getZExtValue(); 5957 if (RHSC < 0 && RHSC > -0x1000) { 5958 assert(Ptr->getOpcode() == ISD::ADD); 5959 isInc = false; 5960 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5961 Base = Ptr->getOperand(0); 5962 return true; 5963 } 5964 } 5965 5966 if (Ptr->getOpcode() == ISD::ADD) { 5967 isInc = true; 5968 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 5969 if (ShOpcVal != ARM_AM::no_shift) { 5970 Base = Ptr->getOperand(1); 5971 Offset = Ptr->getOperand(0); 5972 } else { 5973 Base = Ptr->getOperand(0); 5974 Offset = Ptr->getOperand(1); 5975 } 5976 return true; 5977 } 5978 5979 isInc = (Ptr->getOpcode() == ISD::ADD); 5980 Base = Ptr->getOperand(0); 5981 Offset = Ptr->getOperand(1); 5982 return true; 5983 } 5984 5985 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 5986 return false; 5987} 5988 5989static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 5990 bool isSEXTLoad, SDValue &Base, 5991 SDValue &Offset, bool &isInc, 5992 SelectionDAG &DAG) { 5993 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5994 return false; 5995 5996 Base = Ptr->getOperand(0); 5997 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5998 int RHSC = (int)RHS->getZExtValue(); 5999 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 6000 assert(Ptr->getOpcode() == ISD::ADD); 6001 isInc = false; 6002 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 6003 return true; 6004 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 6005 isInc = Ptr->getOpcode() == ISD::ADD; 6006 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 6007 return true; 6008 } 6009 } 6010 6011 return false; 6012} 6013 6014/// getPreIndexedAddressParts - returns true by value, base pointer and 6015/// offset pointer and addressing mode by reference if the node's address 6016/// can be legally represented as pre-indexed load / store address. 6017bool 6018ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 6019 SDValue &Offset, 6020 ISD::MemIndexedMode &AM, 6021 SelectionDAG &DAG) const { 6022 if (Subtarget->isThumb1Only()) 6023 return false; 6024 6025 EVT VT; 6026 SDValue Ptr; 6027 bool isSEXTLoad = false; 6028 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 6029 Ptr = LD->getBasePtr(); 6030 VT = LD->getMemoryVT(); 6031 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 6032 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 6033 Ptr = ST->getBasePtr(); 6034 VT = ST->getMemoryVT(); 6035 } else 6036 return false; 6037 6038 bool isInc; 6039 bool isLegal = false; 6040 if (Subtarget->isThumb2()) 6041 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 6042 Offset, isInc, DAG); 6043 else 6044 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 6045 Offset, isInc, DAG); 6046 if (!isLegal) 6047 return false; 6048 6049 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 6050 return true; 6051} 6052 6053/// getPostIndexedAddressParts - returns true by value, base pointer and 6054/// offset pointer and addressing mode by reference if this node can be 6055/// combined with a load / store to form a post-indexed load / store. 6056bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 6057 SDValue &Base, 6058 SDValue &Offset, 6059 ISD::MemIndexedMode &AM, 6060 SelectionDAG &DAG) const { 6061 if (Subtarget->isThumb1Only()) 6062 return false; 6063 6064 EVT VT; 6065 SDValue Ptr; 6066 bool isSEXTLoad = false; 6067 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 6068 VT = LD->getMemoryVT(); 6069 Ptr = LD->getBasePtr(); 6070 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 6071 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 6072 VT = ST->getMemoryVT(); 6073 Ptr = ST->getBasePtr(); 6074 } else 6075 return false; 6076 6077 bool isInc; 6078 bool isLegal = false; 6079 if (Subtarget->isThumb2()) 6080 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 6081 isInc, DAG); 6082 else 6083 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 6084 isInc, DAG); 6085 if (!isLegal) 6086 return false; 6087 6088 if (Ptr != Base) { 6089 // Swap base ptr and offset to catch more post-index load / store when 6090 // it's legal. In Thumb2 mode, offset must be an immediate. 6091 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 6092 !Subtarget->isThumb2()) 6093 std::swap(Base, Offset); 6094 6095 // Post-indexed load / store update the base pointer. 6096 if (Ptr != Base) 6097 return false; 6098 } 6099 6100 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 6101 return true; 6102} 6103 6104void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 6105 const APInt &Mask, 6106 APInt &KnownZero, 6107 APInt &KnownOne, 6108 const SelectionDAG &DAG, 6109 unsigned Depth) const { 6110 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 6111 switch (Op.getOpcode()) { 6112 default: break; 6113 case ARMISD::CMOV: { 6114 // Bits are known zero/one if known on the LHS and RHS. 6115 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 6116 if (KnownZero == 0 && KnownOne == 0) return; 6117 6118 APInt KnownZeroRHS, KnownOneRHS; 6119 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 6120 KnownZeroRHS, KnownOneRHS, Depth+1); 6121 KnownZero &= KnownZeroRHS; 6122 KnownOne &= KnownOneRHS; 6123 return; 6124 } 6125 } 6126} 6127 6128//===----------------------------------------------------------------------===// 6129// ARM Inline Assembly Support 6130//===----------------------------------------------------------------------===// 6131 6132bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 6133 // Looking for "rev" which is V6+. 6134 if (!Subtarget->hasV6Ops()) 6135 return false; 6136 6137 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 6138 std::string AsmStr = IA->getAsmString(); 6139 SmallVector<StringRef, 4> AsmPieces; 6140 SplitString(AsmStr, AsmPieces, ";\n"); 6141 6142 switch (AsmPieces.size()) { 6143 default: return false; 6144 case 1: 6145 AsmStr = AsmPieces[0]; 6146 AsmPieces.clear(); 6147 SplitString(AsmStr, AsmPieces, " \t,"); 6148 6149 // rev $0, $1 6150 if (AsmPieces.size() == 3 && 6151 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 6152 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 6153 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 6154 if (Ty && Ty->getBitWidth() == 32) 6155 return IntrinsicLowering::LowerToByteSwap(CI); 6156 } 6157 break; 6158 } 6159 6160 return false; 6161} 6162 6163/// getConstraintType - Given a constraint letter, return the type of 6164/// constraint it is for this target. 6165ARMTargetLowering::ConstraintType 6166ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 6167 if (Constraint.size() == 1) { 6168 switch (Constraint[0]) { 6169 default: break; 6170 case 'l': return C_RegisterClass; 6171 case 'w': return C_RegisterClass; 6172 } 6173 } 6174 return TargetLowering::getConstraintType(Constraint); 6175} 6176 6177/// Examine constraint type and operand type and determine a weight value. 6178/// This object must already have been set up with the operand type 6179/// and the current alternative constraint selected. 6180TargetLowering::ConstraintWeight 6181ARMTargetLowering::getSingleConstraintMatchWeight( 6182 AsmOperandInfo &info, const char *constraint) const { 6183 ConstraintWeight weight = CW_Invalid; 6184 Value *CallOperandVal = info.CallOperandVal; 6185 // If we don't have a value, we can't do a match, 6186 // but allow it at the lowest weight. 6187 if (CallOperandVal == NULL) 6188 return CW_Default; 6189 const Type *type = CallOperandVal->getType(); 6190 // Look at the constraint type. 6191 switch (*constraint) { 6192 default: 6193 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 6194 break; 6195 case 'l': 6196 if (type->isIntegerTy()) { 6197 if (Subtarget->isThumb()) 6198 weight = CW_SpecificReg; 6199 else 6200 weight = CW_Register; 6201 } 6202 break; 6203 case 'w': 6204 if (type->isFloatingPointTy()) 6205 weight = CW_Register; 6206 break; 6207 } 6208 return weight; 6209} 6210 6211std::pair<unsigned, const TargetRegisterClass*> 6212ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6213 EVT VT) const { 6214 if (Constraint.size() == 1) { 6215 // GCC ARM Constraint Letters 6216 switch (Constraint[0]) { 6217 case 'l': 6218 if (Subtarget->isThumb()) 6219 return std::make_pair(0U, ARM::tGPRRegisterClass); 6220 else 6221 return std::make_pair(0U, ARM::GPRRegisterClass); 6222 case 'r': 6223 return std::make_pair(0U, ARM::GPRRegisterClass); 6224 case 'w': 6225 if (VT == MVT::f32) 6226 return std::make_pair(0U, ARM::SPRRegisterClass); 6227 if (VT.getSizeInBits() == 64) 6228 return std::make_pair(0U, ARM::DPRRegisterClass); 6229 if (VT.getSizeInBits() == 128) 6230 return std::make_pair(0U, ARM::QPRRegisterClass); 6231 break; 6232 } 6233 } 6234 if (StringRef("{cc}").equals_lower(Constraint)) 6235 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 6236 6237 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6238} 6239 6240std::vector<unsigned> ARMTargetLowering:: 6241getRegClassForInlineAsmConstraint(const std::string &Constraint, 6242 EVT VT) const { 6243 if (Constraint.size() != 1) 6244 return std::vector<unsigned>(); 6245 6246 switch (Constraint[0]) { // GCC ARM Constraint Letters 6247 default: break; 6248 case 'l': 6249 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 6250 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 6251 0); 6252 case 'r': 6253 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 6254 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 6255 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 6256 ARM::R12, ARM::LR, 0); 6257 case 'w': 6258 if (VT == MVT::f32) 6259 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 6260 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 6261 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 6262 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 6263 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 6264 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 6265 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 6266 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 6267 if (VT.getSizeInBits() == 64) 6268 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 6269 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 6270 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 6271 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 6272 if (VT.getSizeInBits() == 128) 6273 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 6274 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 6275 break; 6276 } 6277 6278 return std::vector<unsigned>(); 6279} 6280 6281/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6282/// vector. If it is invalid, don't add anything to Ops. 6283void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 6284 char Constraint, 6285 std::vector<SDValue>&Ops, 6286 SelectionDAG &DAG) const { 6287 SDValue Result(0, 0); 6288 6289 switch (Constraint) { 6290 default: break; 6291 case 'I': case 'J': case 'K': case 'L': 6292 case 'M': case 'N': case 'O': 6293 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 6294 if (!C) 6295 return; 6296 6297 int64_t CVal64 = C->getSExtValue(); 6298 int CVal = (int) CVal64; 6299 // None of these constraints allow values larger than 32 bits. Check 6300 // that the value fits in an int. 6301 if (CVal != CVal64) 6302 return; 6303 6304 switch (Constraint) { 6305 case 'I': 6306 if (Subtarget->isThumb1Only()) { 6307 // This must be a constant between 0 and 255, for ADD 6308 // immediates. 6309 if (CVal >= 0 && CVal <= 255) 6310 break; 6311 } else if (Subtarget->isThumb2()) { 6312 // A constant that can be used as an immediate value in a 6313 // data-processing instruction. 6314 if (ARM_AM::getT2SOImmVal(CVal) != -1) 6315 break; 6316 } else { 6317 // A constant that can be used as an immediate value in a 6318 // data-processing instruction. 6319 if (ARM_AM::getSOImmVal(CVal) != -1) 6320 break; 6321 } 6322 return; 6323 6324 case 'J': 6325 if (Subtarget->isThumb()) { // FIXME thumb2 6326 // This must be a constant between -255 and -1, for negated ADD 6327 // immediates. This can be used in GCC with an "n" modifier that 6328 // prints the negated value, for use with SUB instructions. It is 6329 // not useful otherwise but is implemented for compatibility. 6330 if (CVal >= -255 && CVal <= -1) 6331 break; 6332 } else { 6333 // This must be a constant between -4095 and 4095. It is not clear 6334 // what this constraint is intended for. Implemented for 6335 // compatibility with GCC. 6336 if (CVal >= -4095 && CVal <= 4095) 6337 break; 6338 } 6339 return; 6340 6341 case 'K': 6342 if (Subtarget->isThumb1Only()) { 6343 // A 32-bit value where only one byte has a nonzero value. Exclude 6344 // zero to match GCC. This constraint is used by GCC internally for 6345 // constants that can be loaded with a move/shift combination. 6346 // It is not useful otherwise but is implemented for compatibility. 6347 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 6348 break; 6349 } else if (Subtarget->isThumb2()) { 6350 // A constant whose bitwise inverse can be used as an immediate 6351 // value in a data-processing instruction. This can be used in GCC 6352 // with a "B" modifier that prints the inverted value, for use with 6353 // BIC and MVN instructions. It is not useful otherwise but is 6354 // implemented for compatibility. 6355 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 6356 break; 6357 } else { 6358 // A constant whose bitwise inverse can be used as an immediate 6359 // value in a data-processing instruction. This can be used in GCC 6360 // with a "B" modifier that prints the inverted value, for use with 6361 // BIC and MVN instructions. It is not useful otherwise but is 6362 // implemented for compatibility. 6363 if (ARM_AM::getSOImmVal(~CVal) != -1) 6364 break; 6365 } 6366 return; 6367 6368 case 'L': 6369 if (Subtarget->isThumb1Only()) { 6370 // This must be a constant between -7 and 7, 6371 // for 3-operand ADD/SUB immediate instructions. 6372 if (CVal >= -7 && CVal < 7) 6373 break; 6374 } else if (Subtarget->isThumb2()) { 6375 // A constant whose negation can be used as an immediate value in a 6376 // data-processing instruction. This can be used in GCC with an "n" 6377 // modifier that prints the negated value, for use with SUB 6378 // instructions. It is not useful otherwise but is implemented for 6379 // compatibility. 6380 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 6381 break; 6382 } else { 6383 // A constant whose negation can be used as an immediate value in a 6384 // data-processing instruction. This can be used in GCC with an "n" 6385 // modifier that prints the negated value, for use with SUB 6386 // instructions. It is not useful otherwise but is implemented for 6387 // compatibility. 6388 if (ARM_AM::getSOImmVal(-CVal) != -1) 6389 break; 6390 } 6391 return; 6392 6393 case 'M': 6394 if (Subtarget->isThumb()) { // FIXME thumb2 6395 // This must be a multiple of 4 between 0 and 1020, for 6396 // ADD sp + immediate. 6397 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 6398 break; 6399 } else { 6400 // A power of two or a constant between 0 and 32. This is used in 6401 // GCC for the shift amount on shifted register operands, but it is 6402 // useful in general for any shift amounts. 6403 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 6404 break; 6405 } 6406 return; 6407 6408 case 'N': 6409 if (Subtarget->isThumb()) { // FIXME thumb2 6410 // This must be a constant between 0 and 31, for shift amounts. 6411 if (CVal >= 0 && CVal <= 31) 6412 break; 6413 } 6414 return; 6415 6416 case 'O': 6417 if (Subtarget->isThumb()) { // FIXME thumb2 6418 // This must be a multiple of 4 between -508 and 508, for 6419 // ADD/SUB sp = sp + immediate. 6420 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 6421 break; 6422 } 6423 return; 6424 } 6425 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 6426 break; 6427 } 6428 6429 if (Result.getNode()) { 6430 Ops.push_back(Result); 6431 return; 6432 } 6433 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6434} 6435 6436bool 6437ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 6438 // The ARM target isn't yet aware of offsets. 6439 return false; 6440} 6441 6442int ARM::getVFPf32Imm(const APFloat &FPImm) { 6443 APInt Imm = FPImm.bitcastToAPInt(); 6444 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 6445 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 6446 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 6447 6448 // We can handle 4 bits of mantissa. 6449 // mantissa = (16+UInt(e:f:g:h))/16. 6450 if (Mantissa & 0x7ffff) 6451 return -1; 6452 Mantissa >>= 19; 6453 if ((Mantissa & 0xf) != Mantissa) 6454 return -1; 6455 6456 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 6457 if (Exp < -3 || Exp > 4) 6458 return -1; 6459 Exp = ((Exp+3) & 0x7) ^ 4; 6460 6461 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 6462} 6463 6464int ARM::getVFPf64Imm(const APFloat &FPImm) { 6465 APInt Imm = FPImm.bitcastToAPInt(); 6466 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 6467 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 6468 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 6469 6470 // We can handle 4 bits of mantissa. 6471 // mantissa = (16+UInt(e:f:g:h))/16. 6472 if (Mantissa & 0xffffffffffffLL) 6473 return -1; 6474 Mantissa >>= 48; 6475 if ((Mantissa & 0xf) != Mantissa) 6476 return -1; 6477 6478 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 6479 if (Exp < -3 || Exp > 4) 6480 return -1; 6481 Exp = ((Exp+3) & 0x7) ^ 4; 6482 6483 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 6484} 6485 6486bool ARM::isBitFieldInvertedMask(unsigned v) { 6487 if (v == 0xffffffff) 6488 return 0; 6489 // there can be 1's on either or both "outsides", all the "inside" 6490 // bits must be 0's 6491 unsigned int lsb = 0, msb = 31; 6492 while (v & (1 << msb)) --msb; 6493 while (v & (1 << lsb)) ++lsb; 6494 for (unsigned int i = lsb; i <= msb; ++i) { 6495 if (v & (1 << i)) 6496 return 0; 6497 } 6498 return 1; 6499} 6500 6501/// isFPImmLegal - Returns true if the target can instruction select the 6502/// specified FP immediate natively. If false, the legalizer will 6503/// materialize the FP immediate as a load from a constant pool. 6504bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 6505 if (!Subtarget->hasVFP3()) 6506 return false; 6507 if (VT == MVT::f32) 6508 return ARM::getVFPf32Imm(Imm) != -1; 6509 if (VT == MVT::f64) 6510 return ARM::getVFPf64Imm(Imm) != -1; 6511 return false; 6512} 6513 6514/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 6515/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 6516/// specified in the intrinsic calls. 6517bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 6518 const CallInst &I, 6519 unsigned Intrinsic) const { 6520 switch (Intrinsic) { 6521 case Intrinsic::arm_neon_vld1: 6522 case Intrinsic::arm_neon_vld2: 6523 case Intrinsic::arm_neon_vld3: 6524 case Intrinsic::arm_neon_vld4: 6525 case Intrinsic::arm_neon_vld2lane: 6526 case Intrinsic::arm_neon_vld3lane: 6527 case Intrinsic::arm_neon_vld4lane: { 6528 Info.opc = ISD::INTRINSIC_W_CHAIN; 6529 // Conservatively set memVT to the entire set of vectors loaded. 6530 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 6531 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 6532 Info.ptrVal = I.getArgOperand(0); 6533 Info.offset = 0; 6534 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 6535 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 6536 Info.vol = false; // volatile loads with NEON intrinsics not supported 6537 Info.readMem = true; 6538 Info.writeMem = false; 6539 return true; 6540 } 6541 case Intrinsic::arm_neon_vst1: 6542 case Intrinsic::arm_neon_vst2: 6543 case Intrinsic::arm_neon_vst3: 6544 case Intrinsic::arm_neon_vst4: 6545 case Intrinsic::arm_neon_vst2lane: 6546 case Intrinsic::arm_neon_vst3lane: 6547 case Intrinsic::arm_neon_vst4lane: { 6548 Info.opc = ISD::INTRINSIC_VOID; 6549 // Conservatively set memVT to the entire set of vectors stored. 6550 unsigned NumElts = 0; 6551 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 6552 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 6553 if (!ArgTy->isVectorTy()) 6554 break; 6555 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 6556 } 6557 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 6558 Info.ptrVal = I.getArgOperand(0); 6559 Info.offset = 0; 6560 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 6561 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 6562 Info.vol = false; // volatile stores with NEON intrinsics not supported 6563 Info.readMem = false; 6564 Info.writeMem = true; 6565 return true; 6566 } 6567 default: 6568 break; 6569 } 6570 6571 return false; 6572} 6573