ARMISelLowering.cpp revision 0521928ae7cc492f3f45ef0e0cedc349102489c5
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/MachineBasicBlock.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineInstrBuilder.h" 40#include "llvm/CodeGen/MachineRegisterInfo.h" 41#include "llvm/CodeGen/PseudoSourceValue.h" 42#include "llvm/CodeGen/SelectionDAG.h" 43#include "llvm/MC/MCSectionMachO.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/ADT/VectorExtras.h" 46#include "llvm/ADT/Statistic.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/raw_ostream.h" 51#include <sstream> 52using namespace llvm; 53 54STATISTIC(NumTailCalls, "Number of tail calls"); 55 56// This option should go away when tail calls fully work. 57static cl::opt<bool> 58EnableARMTailCalls("arm-tail-calls", cl::Hidden, 59 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 60 cl::init(false)); 61 62cl::opt<bool> 63EnableARMLongCalls("arm-long-calls", cl::Hidden, 64 cl::desc("Generate calls via indirect call instructions"), 65 cl::init(false)); 66 67static cl::opt<bool> 68ARMInterworking("arm-interworking", cl::Hidden, 69 cl::desc("Enable / disable ARM interworking (for debugging only)"), 70 cl::init(true)); 71 72void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 73 EVT PromotedBitwiseVT) { 74 if (VT != PromotedLdStVT) { 75 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 76 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 77 PromotedLdStVT.getSimpleVT()); 78 79 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 80 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 81 PromotedLdStVT.getSimpleVT()); 82 } 83 84 EVT ElemTy = VT.getVectorElementType(); 85 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 86 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 87 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 88 if (ElemTy != MVT::i32) { 89 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 90 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 91 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 92 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 93 } 94 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 95 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 96 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 97 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand); 98 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 100 if (VT.isInteger()) { 101 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 102 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 104 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 105 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 106 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 107 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 108 setTruncStoreAction(VT.getSimpleVT(), 109 (MVT::SimpleValueType)InnerVT, Expand); 110 } 111 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 112 113 // Promote all bit-wise operations. 114 if (VT.isInteger() && VT != PromotedBitwiseVT) { 115 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 116 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 117 PromotedBitwiseVT.getSimpleVT()); 118 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 119 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 120 PromotedBitwiseVT.getSimpleVT()); 121 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 122 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 123 PromotedBitwiseVT.getSimpleVT()); 124 } 125 126 // Neon does not support vector divide/remainder operations. 127 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 128 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 129 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 130 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 131 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 132 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 133} 134 135void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 136 addRegisterClass(VT, ARM::DPRRegisterClass); 137 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 138} 139 140void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 141 addRegisterClass(VT, ARM::QPRRegisterClass); 142 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 143} 144 145static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 146 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 147 return new TargetLoweringObjectFileMachO(); 148 149 return new ARMElfTargetObjectFile(); 150} 151 152ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 153 : TargetLowering(TM, createTLOF(TM)) { 154 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 155 RegInfo = TM.getRegisterInfo(); 156 Itins = TM.getInstrItineraryData(); 157 158 if (Subtarget->isTargetDarwin()) { 159 // Uses VFP for Thumb libfuncs if available. 160 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 161 // Single-precision floating-point arithmetic. 162 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 163 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 164 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 165 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 166 167 // Double-precision floating-point arithmetic. 168 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 169 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 170 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 171 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 172 173 // Single-precision comparisons. 174 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 175 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 176 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 177 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 178 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 179 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 180 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 181 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 182 183 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 184 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 185 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 186 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 187 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 188 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 189 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 190 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 191 192 // Double-precision comparisons. 193 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 194 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 195 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 196 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 197 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 198 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 199 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 200 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 201 202 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 210 211 // Floating-point to integer conversions. 212 // i64 conversions are done via library routines even when generating VFP 213 // instructions, so use the same ones. 214 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 215 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 216 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 217 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 218 219 // Conversions between floating types. 220 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 221 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 222 223 // Integer to floating-point conversions. 224 // i64 conversions are done via library routines even when generating VFP 225 // instructions, so use the same ones. 226 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 227 // e.g., __floatunsidf vs. __floatunssidfvfp. 228 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 229 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 230 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 231 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 232 } 233 } 234 235 // These libcalls are not available in 32-bit. 236 setLibcallName(RTLIB::SHL_I128, 0); 237 setLibcallName(RTLIB::SRL_I128, 0); 238 setLibcallName(RTLIB::SRA_I128, 0); 239 240 if (Subtarget->isAAPCS_ABI()) { 241 // Double-precision floating-point arithmetic helper functions 242 // RTABI chapter 4.1.2, Table 2 243 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 244 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 245 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 246 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 247 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 248 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 249 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 250 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 251 252 // Double-precision floating-point comparison helper functions 253 // RTABI chapter 4.1.2, Table 3 254 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 255 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 256 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 257 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 258 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 259 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 260 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 261 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 262 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 263 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 264 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 265 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 266 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 267 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 268 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 269 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 270 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 271 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 272 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 273 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 277 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 278 279 // Single-precision floating-point arithmetic helper functions 280 // RTABI chapter 4.1.2, Table 4 281 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 282 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 283 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 284 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 285 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 286 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 287 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 288 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 289 290 // Single-precision floating-point comparison helper functions 291 // RTABI chapter 4.1.2, Table 5 292 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 293 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 294 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 295 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 296 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 297 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 298 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 299 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 300 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 301 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 302 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 303 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 304 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 305 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 306 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 307 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 308 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 309 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 310 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 311 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 315 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 316 317 // Floating-point to integer conversions. 318 // RTABI chapter 4.1.2, Table 6 319 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 320 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 321 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 322 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 323 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 324 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 325 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 326 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 327 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 335 336 // Conversions between floating types. 337 // RTABI chapter 4.1.2, Table 7 338 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 339 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 340 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 342 343 // Integer to floating-point conversions. 344 // RTABI chapter 4.1.2, Table 8 345 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 346 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 347 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 348 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 349 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 350 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 351 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 352 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 353 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 361 362 // Long long helper functions 363 // RTABI chapter 4.2, Table 9 364 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 365 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 366 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 367 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 368 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 369 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 370 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 371 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 372 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 373 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 376 377 // Integer division functions 378 // RTABI chapter 4.3.1 379 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 380 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 381 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 382 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 383 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 384 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 385 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 387 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 388 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 391 } 392 393 if (Subtarget->isThumb1Only()) 394 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 395 else 396 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 397 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 398 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 399 if (!Subtarget->isFPOnlySP()) 400 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 401 402 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 403 } 404 405 if (Subtarget->hasNEON()) { 406 addDRTypeForNEON(MVT::v2f32); 407 addDRTypeForNEON(MVT::v8i8); 408 addDRTypeForNEON(MVT::v4i16); 409 addDRTypeForNEON(MVT::v2i32); 410 addDRTypeForNEON(MVT::v1i64); 411 412 addQRTypeForNEON(MVT::v4f32); 413 addQRTypeForNEON(MVT::v2f64); 414 addQRTypeForNEON(MVT::v16i8); 415 addQRTypeForNEON(MVT::v8i16); 416 addQRTypeForNEON(MVT::v4i32); 417 addQRTypeForNEON(MVT::v2i64); 418 419 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 420 // neither Neon nor VFP support any arithmetic operations on it. 421 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 422 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 423 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 424 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 425 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 426 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 427 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 428 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 429 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 430 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 431 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 432 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 433 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 434 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 435 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 436 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 437 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 438 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 439 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 440 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 441 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 442 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 443 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 444 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 445 446 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 447 448 // Neon does not support some operations on v1i64 and v2i64 types. 449 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 450 // Custom handling for some quad-vector types to detect VMULL. 451 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 452 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 453 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 454 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 455 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 456 457 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 458 setTargetDAGCombine(ISD::SHL); 459 setTargetDAGCombine(ISD::SRL); 460 setTargetDAGCombine(ISD::SRA); 461 setTargetDAGCombine(ISD::SIGN_EXTEND); 462 setTargetDAGCombine(ISD::ZERO_EXTEND); 463 setTargetDAGCombine(ISD::ANY_EXTEND); 464 setTargetDAGCombine(ISD::SELECT_CC); 465 setTargetDAGCombine(ISD::BUILD_VECTOR); 466 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 467 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 468 setTargetDAGCombine(ISD::STORE); 469 } 470 471 computeRegisterProperties(); 472 473 // ARM does not have f32 extending load. 474 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 475 476 // ARM does not have i1 sign extending load. 477 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 478 479 // ARM supports all 4 flavors of integer indexed load / store. 480 if (!Subtarget->isThumb1Only()) { 481 for (unsigned im = (unsigned)ISD::PRE_INC; 482 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 483 setIndexedLoadAction(im, MVT::i1, Legal); 484 setIndexedLoadAction(im, MVT::i8, Legal); 485 setIndexedLoadAction(im, MVT::i16, Legal); 486 setIndexedLoadAction(im, MVT::i32, Legal); 487 setIndexedStoreAction(im, MVT::i1, Legal); 488 setIndexedStoreAction(im, MVT::i8, Legal); 489 setIndexedStoreAction(im, MVT::i16, Legal); 490 setIndexedStoreAction(im, MVT::i32, Legal); 491 } 492 } 493 494 // i64 operation support. 495 if (Subtarget->isThumb1Only()) { 496 setOperationAction(ISD::MUL, MVT::i64, Expand); 497 setOperationAction(ISD::MULHU, MVT::i32, Expand); 498 setOperationAction(ISD::MULHS, MVT::i32, Expand); 499 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 500 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 501 } else { 502 setOperationAction(ISD::MUL, MVT::i64, Expand); 503 setOperationAction(ISD::MULHU, MVT::i32, Expand); 504 if (!Subtarget->hasV6Ops()) 505 setOperationAction(ISD::MULHS, MVT::i32, Expand); 506 } 507 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 508 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 509 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 510 setOperationAction(ISD::SRL, MVT::i64, Custom); 511 setOperationAction(ISD::SRA, MVT::i64, Custom); 512 513 // ARM does not have ROTL. 514 setOperationAction(ISD::ROTL, MVT::i32, Expand); 515 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 516 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 517 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 518 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 519 520 // Only ARMv6 has BSWAP. 521 if (!Subtarget->hasV6Ops()) 522 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 523 524 // These are expanded into libcalls. 525 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 526 // v7M has a hardware divider 527 setOperationAction(ISD::SDIV, MVT::i32, Expand); 528 setOperationAction(ISD::UDIV, MVT::i32, Expand); 529 } 530 setOperationAction(ISD::SREM, MVT::i32, Expand); 531 setOperationAction(ISD::UREM, MVT::i32, Expand); 532 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 533 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 534 535 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 536 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 537 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 538 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 539 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 540 541 setOperationAction(ISD::TRAP, MVT::Other, Legal); 542 543 // Use the default implementation. 544 setOperationAction(ISD::VASTART, MVT::Other, Custom); 545 setOperationAction(ISD::VAARG, MVT::Other, Expand); 546 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 547 setOperationAction(ISD::VAEND, MVT::Other, Expand); 548 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 549 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 550 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 551 // FIXME: Shouldn't need this, since no register is used, but the legalizer 552 // doesn't yet know how to not do that for SjLj. 553 setExceptionSelectorRegister(ARM::R0); 554 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 555 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 556 // the default expansion. 557 if (Subtarget->hasDataBarrier() || 558 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 559 // membarrier needs custom lowering; the rest are legal and handled 560 // normally. 561 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 562 } else { 563 // Set them all for expansion, which will force libcalls. 564 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 565 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 566 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 567 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 568 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 569 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 570 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 571 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 572 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 573 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 574 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 575 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 576 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 577 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 578 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 579 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 580 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 581 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 582 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 583 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 584 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 585 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 586 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 587 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 588 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 589 // Since the libcalls include locking, fold in the fences 590 setShouldFoldAtomicFences(true); 591 } 592 // 64-bit versions are always libcalls (for now) 593 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 594 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 595 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 596 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 597 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 598 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 599 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 600 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 601 602 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 603 604 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 605 if (!Subtarget->hasV6Ops()) { 606 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 607 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 608 } 609 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 610 611 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 612 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 613 // iff target supports vfp2. 614 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 615 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 616 } 617 618 // We want to custom lower some of our intrinsics. 619 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 620 if (Subtarget->isTargetDarwin()) { 621 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 622 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 623 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 624 } 625 626 setOperationAction(ISD::SETCC, MVT::i32, Expand); 627 setOperationAction(ISD::SETCC, MVT::f32, Expand); 628 setOperationAction(ISD::SETCC, MVT::f64, Expand); 629 setOperationAction(ISD::SELECT, MVT::i32, Custom); 630 setOperationAction(ISD::SELECT, MVT::f32, Custom); 631 setOperationAction(ISD::SELECT, MVT::f64, Custom); 632 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 633 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 634 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 635 636 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 637 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 638 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 639 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 640 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 641 642 // We don't support sin/cos/fmod/copysign/pow 643 setOperationAction(ISD::FSIN, MVT::f64, Expand); 644 setOperationAction(ISD::FSIN, MVT::f32, Expand); 645 setOperationAction(ISD::FCOS, MVT::f32, Expand); 646 setOperationAction(ISD::FCOS, MVT::f64, Expand); 647 setOperationAction(ISD::FREM, MVT::f64, Expand); 648 setOperationAction(ISD::FREM, MVT::f32, Expand); 649 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 650 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 651 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 652 } 653 setOperationAction(ISD::FPOW, MVT::f64, Expand); 654 setOperationAction(ISD::FPOW, MVT::f32, Expand); 655 656 // Various VFP goodness 657 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 658 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 659 if (Subtarget->hasVFP2()) { 660 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 661 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 662 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 663 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 664 } 665 // Special handling for half-precision FP. 666 if (!Subtarget->hasFP16()) { 667 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 668 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 669 } 670 } 671 672 // We have target-specific dag combine patterns for the following nodes: 673 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 674 setTargetDAGCombine(ISD::ADD); 675 setTargetDAGCombine(ISD::SUB); 676 setTargetDAGCombine(ISD::MUL); 677 678 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 679 setTargetDAGCombine(ISD::OR); 680 if (Subtarget->hasNEON()) 681 setTargetDAGCombine(ISD::AND); 682 683 setStackPointerRegisterToSaveRestore(ARM::SP); 684 685 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 686 setSchedulingPreference(Sched::RegPressure); 687 else 688 setSchedulingPreference(Sched::Hybrid); 689 690 //// temporary - rewrite interface to use type 691 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 692 693 // On ARM arguments smaller than 4 bytes are extended, so all arguments 694 // are at least 4 bytes aligned. 695 setMinStackArgumentAlignment(4); 696 697 benefitFromCodePlacementOpt = true; 698} 699 700std::pair<const TargetRegisterClass*, uint8_t> 701ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 702 const TargetRegisterClass *RRC = 0; 703 uint8_t Cost = 1; 704 switch (VT.getSimpleVT().SimpleTy) { 705 default: 706 return TargetLowering::findRepresentativeClass(VT); 707 // Use DPR as representative register class for all floating point 708 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 709 // the cost is 1 for both f32 and f64. 710 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 711 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 712 RRC = ARM::DPRRegisterClass; 713 break; 714 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 715 case MVT::v4f32: case MVT::v2f64: 716 RRC = ARM::DPRRegisterClass; 717 Cost = 2; 718 break; 719 case MVT::v4i64: 720 RRC = ARM::DPRRegisterClass; 721 Cost = 4; 722 break; 723 case MVT::v8i64: 724 RRC = ARM::DPRRegisterClass; 725 Cost = 8; 726 break; 727 } 728 return std::make_pair(RRC, Cost); 729} 730 731const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 732 switch (Opcode) { 733 default: return 0; 734 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 735 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 736 case ARMISD::CALL: return "ARMISD::CALL"; 737 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 738 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 739 case ARMISD::tCALL: return "ARMISD::tCALL"; 740 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 741 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 742 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 743 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 744 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 745 case ARMISD::CMP: return "ARMISD::CMP"; 746 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 747 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 748 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 749 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 750 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 751 case ARMISD::CMOV: return "ARMISD::CMOV"; 752 case ARMISD::CNEG: return "ARMISD::CNEG"; 753 754 case ARMISD::RBIT: return "ARMISD::RBIT"; 755 756 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 757 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 758 case ARMISD::SITOF: return "ARMISD::SITOF"; 759 case ARMISD::UITOF: return "ARMISD::UITOF"; 760 761 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 762 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 763 case ARMISD::RRX: return "ARMISD::RRX"; 764 765 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 766 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 767 768 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 769 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 770 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 771 772 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 773 774 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 775 776 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 777 778 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 779 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 780 781 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 782 783 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 784 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 785 case ARMISD::VCGE: return "ARMISD::VCGE"; 786 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 787 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 788 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 789 case ARMISD::VCGT: return "ARMISD::VCGT"; 790 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 791 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 792 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 793 case ARMISD::VTST: return "ARMISD::VTST"; 794 795 case ARMISD::VSHL: return "ARMISD::VSHL"; 796 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 797 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 798 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 799 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 800 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 801 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 802 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 803 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 804 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 805 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 806 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 807 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 808 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 809 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 810 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 811 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 812 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 813 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 814 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 815 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 816 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 817 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 818 case ARMISD::VDUP: return "ARMISD::VDUP"; 819 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 820 case ARMISD::VEXT: return "ARMISD::VEXT"; 821 case ARMISD::VREV64: return "ARMISD::VREV64"; 822 case ARMISD::VREV32: return "ARMISD::VREV32"; 823 case ARMISD::VREV16: return "ARMISD::VREV16"; 824 case ARMISD::VZIP: return "ARMISD::VZIP"; 825 case ARMISD::VUZP: return "ARMISD::VUZP"; 826 case ARMISD::VTRN: return "ARMISD::VTRN"; 827 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 828 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 829 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 830 case ARMISD::FMAX: return "ARMISD::FMAX"; 831 case ARMISD::FMIN: return "ARMISD::FMIN"; 832 case ARMISD::BFI: return "ARMISD::BFI"; 833 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 834 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 835 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 836 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 837 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 838 } 839} 840 841/// getRegClassFor - Return the register class that should be used for the 842/// specified value type. 843TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 844 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 845 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 846 // load / store 4 to 8 consecutive D registers. 847 if (Subtarget->hasNEON()) { 848 if (VT == MVT::v4i64) 849 return ARM::QQPRRegisterClass; 850 else if (VT == MVT::v8i64) 851 return ARM::QQQQPRRegisterClass; 852 } 853 return TargetLowering::getRegClassFor(VT); 854} 855 856// Create a fast isel object. 857FastISel * 858ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 859 return ARM::createFastISel(funcInfo); 860} 861 862/// getFunctionAlignment - Return the Log2 alignment of this function. 863unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 864 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 865} 866 867/// getMaximalGlobalOffset - Returns the maximal possible offset which can 868/// be used for loads / stores from the global. 869unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 870 return (Subtarget->isThumb1Only() ? 127 : 4095); 871} 872 873Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 874 unsigned NumVals = N->getNumValues(); 875 if (!NumVals) 876 return Sched::RegPressure; 877 878 for (unsigned i = 0; i != NumVals; ++i) { 879 EVT VT = N->getValueType(i); 880 if (VT == MVT::Glue || VT == MVT::Other) 881 continue; 882 if (VT.isFloatingPoint() || VT.isVector()) 883 return Sched::Latency; 884 } 885 886 if (!N->isMachineOpcode()) 887 return Sched::RegPressure; 888 889 // Load are scheduled for latency even if there instruction itinerary 890 // is not available. 891 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 892 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 893 894 if (TID.getNumDefs() == 0) 895 return Sched::RegPressure; 896 if (!Itins->isEmpty() && 897 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 898 return Sched::Latency; 899 900 return Sched::RegPressure; 901} 902 903unsigned 904ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 905 MachineFunction &MF) const { 906 const TargetFrameInfo *TFI = MF.getTarget().getFrameInfo(); 907 908 switch (RC->getID()) { 909 default: 910 return 0; 911 case ARM::tGPRRegClassID: 912 return TFI->hasFP(MF) ? 4 : 5; 913 case ARM::GPRRegClassID: { 914 unsigned FP = TFI->hasFP(MF) ? 1 : 0; 915 return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0); 916 } 917 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 918 case ARM::DPRRegClassID: 919 return 32 - 10; 920 } 921} 922 923//===----------------------------------------------------------------------===// 924// Lowering Code 925//===----------------------------------------------------------------------===// 926 927/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 928static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 929 switch (CC) { 930 default: llvm_unreachable("Unknown condition code!"); 931 case ISD::SETNE: return ARMCC::NE; 932 case ISD::SETEQ: return ARMCC::EQ; 933 case ISD::SETGT: return ARMCC::GT; 934 case ISD::SETGE: return ARMCC::GE; 935 case ISD::SETLT: return ARMCC::LT; 936 case ISD::SETLE: return ARMCC::LE; 937 case ISD::SETUGT: return ARMCC::HI; 938 case ISD::SETUGE: return ARMCC::HS; 939 case ISD::SETULT: return ARMCC::LO; 940 case ISD::SETULE: return ARMCC::LS; 941 } 942} 943 944/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 945static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 946 ARMCC::CondCodes &CondCode2) { 947 CondCode2 = ARMCC::AL; 948 switch (CC) { 949 default: llvm_unreachable("Unknown FP condition!"); 950 case ISD::SETEQ: 951 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 952 case ISD::SETGT: 953 case ISD::SETOGT: CondCode = ARMCC::GT; break; 954 case ISD::SETGE: 955 case ISD::SETOGE: CondCode = ARMCC::GE; break; 956 case ISD::SETOLT: CondCode = ARMCC::MI; break; 957 case ISD::SETOLE: CondCode = ARMCC::LS; break; 958 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 959 case ISD::SETO: CondCode = ARMCC::VC; break; 960 case ISD::SETUO: CondCode = ARMCC::VS; break; 961 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 962 case ISD::SETUGT: CondCode = ARMCC::HI; break; 963 case ISD::SETUGE: CondCode = ARMCC::PL; break; 964 case ISD::SETLT: 965 case ISD::SETULT: CondCode = ARMCC::LT; break; 966 case ISD::SETLE: 967 case ISD::SETULE: CondCode = ARMCC::LE; break; 968 case ISD::SETNE: 969 case ISD::SETUNE: CondCode = ARMCC::NE; break; 970 } 971} 972 973//===----------------------------------------------------------------------===// 974// Calling Convention Implementation 975//===----------------------------------------------------------------------===// 976 977#include "ARMGenCallingConv.inc" 978 979/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 980/// given CallingConvention value. 981CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 982 bool Return, 983 bool isVarArg) const { 984 switch (CC) { 985 default: 986 llvm_unreachable("Unsupported calling convention"); 987 case CallingConv::Fast: 988 if (Subtarget->hasVFP2() && !isVarArg) { 989 if (!Subtarget->isAAPCS_ABI()) 990 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 991 // For AAPCS ABI targets, just use VFP variant of the calling convention. 992 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 993 } 994 // Fallthrough 995 case CallingConv::C: { 996 // Use target triple & subtarget features to do actual dispatch. 997 if (!Subtarget->isAAPCS_ABI()) 998 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 999 else if (Subtarget->hasVFP2() && 1000 FloatABIType == FloatABI::Hard && !isVarArg) 1001 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1002 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1003 } 1004 case CallingConv::ARM_AAPCS_VFP: 1005 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1006 case CallingConv::ARM_AAPCS: 1007 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1008 case CallingConv::ARM_APCS: 1009 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1010 } 1011} 1012 1013/// LowerCallResult - Lower the result values of a call into the 1014/// appropriate copies out of appropriate physical registers. 1015SDValue 1016ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1017 CallingConv::ID CallConv, bool isVarArg, 1018 const SmallVectorImpl<ISD::InputArg> &Ins, 1019 DebugLoc dl, SelectionDAG &DAG, 1020 SmallVectorImpl<SDValue> &InVals) const { 1021 1022 // Assign locations to each value returned by this call. 1023 SmallVector<CCValAssign, 16> RVLocs; 1024 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1025 RVLocs, *DAG.getContext()); 1026 CCInfo.AnalyzeCallResult(Ins, 1027 CCAssignFnForNode(CallConv, /* Return*/ true, 1028 isVarArg)); 1029 1030 // Copy all of the result registers out of their specified physreg. 1031 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1032 CCValAssign VA = RVLocs[i]; 1033 1034 SDValue Val; 1035 if (VA.needsCustom()) { 1036 // Handle f64 or half of a v2f64. 1037 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1038 InFlag); 1039 Chain = Lo.getValue(1); 1040 InFlag = Lo.getValue(2); 1041 VA = RVLocs[++i]; // skip ahead to next loc 1042 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1043 InFlag); 1044 Chain = Hi.getValue(1); 1045 InFlag = Hi.getValue(2); 1046 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1047 1048 if (VA.getLocVT() == MVT::v2f64) { 1049 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1050 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1051 DAG.getConstant(0, MVT::i32)); 1052 1053 VA = RVLocs[++i]; // skip ahead to next loc 1054 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1055 Chain = Lo.getValue(1); 1056 InFlag = Lo.getValue(2); 1057 VA = RVLocs[++i]; // skip ahead to next loc 1058 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1059 Chain = Hi.getValue(1); 1060 InFlag = Hi.getValue(2); 1061 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1062 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1063 DAG.getConstant(1, MVT::i32)); 1064 } 1065 } else { 1066 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1067 InFlag); 1068 Chain = Val.getValue(1); 1069 InFlag = Val.getValue(2); 1070 } 1071 1072 switch (VA.getLocInfo()) { 1073 default: llvm_unreachable("Unknown loc info!"); 1074 case CCValAssign::Full: break; 1075 case CCValAssign::BCvt: 1076 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1077 break; 1078 } 1079 1080 InVals.push_back(Val); 1081 } 1082 1083 return Chain; 1084} 1085 1086/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1087/// by "Src" to address "Dst" of size "Size". Alignment information is 1088/// specified by the specific parameter attribute. The copy will be passed as 1089/// a byval function parameter. 1090/// Sometimes what we are copying is the end of a larger object, the part that 1091/// does not fit in registers. 1092static SDValue 1093CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1094 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1095 DebugLoc dl) { 1096 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1097 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1098 /*isVolatile=*/false, /*AlwaysInline=*/false, 1099 MachinePointerInfo(0), MachinePointerInfo(0)); 1100} 1101 1102/// LowerMemOpCallTo - Store the argument to the stack. 1103SDValue 1104ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1105 SDValue StackPtr, SDValue Arg, 1106 DebugLoc dl, SelectionDAG &DAG, 1107 const CCValAssign &VA, 1108 ISD::ArgFlagsTy Flags) const { 1109 unsigned LocMemOffset = VA.getLocMemOffset(); 1110 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1111 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1112 if (Flags.isByVal()) 1113 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1114 1115 return DAG.getStore(Chain, dl, Arg, PtrOff, 1116 MachinePointerInfo::getStack(LocMemOffset), 1117 false, false, 0); 1118} 1119 1120void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1121 SDValue Chain, SDValue &Arg, 1122 RegsToPassVector &RegsToPass, 1123 CCValAssign &VA, CCValAssign &NextVA, 1124 SDValue &StackPtr, 1125 SmallVector<SDValue, 8> &MemOpChains, 1126 ISD::ArgFlagsTy Flags) const { 1127 1128 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1129 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1130 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1131 1132 if (NextVA.isRegLoc()) 1133 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1134 else { 1135 assert(NextVA.isMemLoc()); 1136 if (StackPtr.getNode() == 0) 1137 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1138 1139 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1140 dl, DAG, NextVA, 1141 Flags)); 1142 } 1143} 1144 1145/// LowerCall - Lowering a call into a callseq_start <- 1146/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1147/// nodes. 1148SDValue 1149ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1150 CallingConv::ID CallConv, bool isVarArg, 1151 bool &isTailCall, 1152 const SmallVectorImpl<ISD::OutputArg> &Outs, 1153 const SmallVectorImpl<SDValue> &OutVals, 1154 const SmallVectorImpl<ISD::InputArg> &Ins, 1155 DebugLoc dl, SelectionDAG &DAG, 1156 SmallVectorImpl<SDValue> &InVals) const { 1157 MachineFunction &MF = DAG.getMachineFunction(); 1158 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1159 bool IsSibCall = false; 1160 // Temporarily disable tail calls so things don't break. 1161 if (!EnableARMTailCalls) 1162 isTailCall = false; 1163 if (isTailCall) { 1164 // Check if it's really possible to do a tail call. 1165 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1166 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1167 Outs, OutVals, Ins, DAG); 1168 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1169 // detected sibcalls. 1170 if (isTailCall) { 1171 ++NumTailCalls; 1172 IsSibCall = true; 1173 } 1174 } 1175 1176 // Analyze operands of the call, assigning locations to each operand. 1177 SmallVector<CCValAssign, 16> ArgLocs; 1178 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1179 *DAG.getContext()); 1180 CCInfo.AnalyzeCallOperands(Outs, 1181 CCAssignFnForNode(CallConv, /* Return*/ false, 1182 isVarArg)); 1183 1184 // Get a count of how many bytes are to be pushed on the stack. 1185 unsigned NumBytes = CCInfo.getNextStackOffset(); 1186 1187 // For tail calls, memory operands are available in our caller's stack. 1188 if (IsSibCall) 1189 NumBytes = 0; 1190 1191 // Adjust the stack pointer for the new arguments... 1192 // These operations are automatically eliminated by the prolog/epilog pass 1193 if (!IsSibCall) 1194 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1195 1196 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1197 1198 RegsToPassVector RegsToPass; 1199 SmallVector<SDValue, 8> MemOpChains; 1200 1201 // Walk the register/memloc assignments, inserting copies/loads. In the case 1202 // of tail call optimization, arguments are handled later. 1203 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1204 i != e; 1205 ++i, ++realArgIdx) { 1206 CCValAssign &VA = ArgLocs[i]; 1207 SDValue Arg = OutVals[realArgIdx]; 1208 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1209 1210 // Promote the value if needed. 1211 switch (VA.getLocInfo()) { 1212 default: llvm_unreachable("Unknown loc info!"); 1213 case CCValAssign::Full: break; 1214 case CCValAssign::SExt: 1215 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1216 break; 1217 case CCValAssign::ZExt: 1218 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1219 break; 1220 case CCValAssign::AExt: 1221 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1222 break; 1223 case CCValAssign::BCvt: 1224 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1225 break; 1226 } 1227 1228 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1229 if (VA.needsCustom()) { 1230 if (VA.getLocVT() == MVT::v2f64) { 1231 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1232 DAG.getConstant(0, MVT::i32)); 1233 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1234 DAG.getConstant(1, MVT::i32)); 1235 1236 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1237 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1238 1239 VA = ArgLocs[++i]; // skip ahead to next loc 1240 if (VA.isRegLoc()) { 1241 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1242 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1243 } else { 1244 assert(VA.isMemLoc()); 1245 1246 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1247 dl, DAG, VA, Flags)); 1248 } 1249 } else { 1250 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1251 StackPtr, MemOpChains, Flags); 1252 } 1253 } else if (VA.isRegLoc()) { 1254 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1255 } else if (!IsSibCall) { 1256 assert(VA.isMemLoc()); 1257 1258 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1259 dl, DAG, VA, Flags)); 1260 } 1261 } 1262 1263 if (!MemOpChains.empty()) 1264 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1265 &MemOpChains[0], MemOpChains.size()); 1266 1267 // Build a sequence of copy-to-reg nodes chained together with token chain 1268 // and flag operands which copy the outgoing args into the appropriate regs. 1269 SDValue InFlag; 1270 // Tail call byval lowering might overwrite argument registers so in case of 1271 // tail call optimization the copies to registers are lowered later. 1272 if (!isTailCall) 1273 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1274 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1275 RegsToPass[i].second, InFlag); 1276 InFlag = Chain.getValue(1); 1277 } 1278 1279 // For tail calls lower the arguments to the 'real' stack slot. 1280 if (isTailCall) { 1281 // Force all the incoming stack arguments to be loaded from the stack 1282 // before any new outgoing arguments are stored to the stack, because the 1283 // outgoing stack slots may alias the incoming argument stack slots, and 1284 // the alias isn't otherwise explicit. This is slightly more conservative 1285 // than necessary, because it means that each store effectively depends 1286 // on every argument instead of just those arguments it would clobber. 1287 1288 // Do not flag preceeding copytoreg stuff together with the following stuff. 1289 InFlag = SDValue(); 1290 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1291 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1292 RegsToPass[i].second, InFlag); 1293 InFlag = Chain.getValue(1); 1294 } 1295 InFlag =SDValue(); 1296 } 1297 1298 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1299 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1300 // node so that legalize doesn't hack it. 1301 bool isDirect = false; 1302 bool isARMFunc = false; 1303 bool isLocalARMFunc = false; 1304 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1305 1306 if (EnableARMLongCalls) { 1307 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1308 && "long-calls with non-static relocation model!"); 1309 // Handle a global address or an external symbol. If it's not one of 1310 // those, the target's already in a register, so we don't need to do 1311 // anything extra. 1312 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1313 const GlobalValue *GV = G->getGlobal(); 1314 // Create a constant pool entry for the callee address 1315 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1316 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1317 ARMPCLabelIndex, 1318 ARMCP::CPValue, 0); 1319 // Get the address of the callee into a register 1320 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1321 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1322 Callee = DAG.getLoad(getPointerTy(), dl, 1323 DAG.getEntryNode(), CPAddr, 1324 MachinePointerInfo::getConstantPool(), 1325 false, false, 0); 1326 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1327 const char *Sym = S->getSymbol(); 1328 1329 // Create a constant pool entry for the callee address 1330 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1331 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1332 Sym, ARMPCLabelIndex, 0); 1333 // Get the address of the callee into a register 1334 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1335 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1336 Callee = DAG.getLoad(getPointerTy(), dl, 1337 DAG.getEntryNode(), CPAddr, 1338 MachinePointerInfo::getConstantPool(), 1339 false, false, 0); 1340 } 1341 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1342 const GlobalValue *GV = G->getGlobal(); 1343 isDirect = true; 1344 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1345 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1346 getTargetMachine().getRelocationModel() != Reloc::Static; 1347 isARMFunc = !Subtarget->isThumb() || isStub; 1348 // ARM call to a local ARM function is predicable. 1349 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1350 // tBX takes a register source operand. 1351 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1352 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1353 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1354 ARMPCLabelIndex, 1355 ARMCP::CPValue, 4); 1356 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1357 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1358 Callee = DAG.getLoad(getPointerTy(), dl, 1359 DAG.getEntryNode(), CPAddr, 1360 MachinePointerInfo::getConstantPool(), 1361 false, false, 0); 1362 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1363 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1364 getPointerTy(), Callee, PICLabel); 1365 } else { 1366 // On ELF targets for PIC code, direct calls should go through the PLT 1367 unsigned OpFlags = 0; 1368 if (Subtarget->isTargetELF() && 1369 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1370 OpFlags = ARMII::MO_PLT; 1371 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1372 } 1373 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1374 isDirect = true; 1375 bool isStub = Subtarget->isTargetDarwin() && 1376 getTargetMachine().getRelocationModel() != Reloc::Static; 1377 isARMFunc = !Subtarget->isThumb() || isStub; 1378 // tBX takes a register source operand. 1379 const char *Sym = S->getSymbol(); 1380 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1381 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1382 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1383 Sym, ARMPCLabelIndex, 4); 1384 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1385 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1386 Callee = DAG.getLoad(getPointerTy(), dl, 1387 DAG.getEntryNode(), CPAddr, 1388 MachinePointerInfo::getConstantPool(), 1389 false, false, 0); 1390 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1391 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1392 getPointerTy(), Callee, PICLabel); 1393 } else { 1394 unsigned OpFlags = 0; 1395 // On ELF targets for PIC code, direct calls should go through the PLT 1396 if (Subtarget->isTargetELF() && 1397 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1398 OpFlags = ARMII::MO_PLT; 1399 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1400 } 1401 } 1402 1403 // FIXME: handle tail calls differently. 1404 unsigned CallOpc; 1405 if (Subtarget->isThumb()) { 1406 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1407 CallOpc = ARMISD::CALL_NOLINK; 1408 else 1409 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1410 } else { 1411 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1412 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1413 : ARMISD::CALL_NOLINK; 1414 } 1415 1416 std::vector<SDValue> Ops; 1417 Ops.push_back(Chain); 1418 Ops.push_back(Callee); 1419 1420 // Add argument registers to the end of the list so that they are known live 1421 // into the call. 1422 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1423 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1424 RegsToPass[i].second.getValueType())); 1425 1426 if (InFlag.getNode()) 1427 Ops.push_back(InFlag); 1428 1429 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1430 if (isTailCall) 1431 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1432 1433 // Returns a chain and a flag for retval copy to use. 1434 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1435 InFlag = Chain.getValue(1); 1436 1437 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1438 DAG.getIntPtrConstant(0, true), InFlag); 1439 if (!Ins.empty()) 1440 InFlag = Chain.getValue(1); 1441 1442 // Handle result values, copying them out of physregs into vregs that we 1443 // return. 1444 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1445 dl, DAG, InVals); 1446} 1447 1448/// MatchingStackOffset - Return true if the given stack call argument is 1449/// already available in the same position (relatively) of the caller's 1450/// incoming argument stack. 1451static 1452bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1453 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1454 const ARMInstrInfo *TII) { 1455 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1456 int FI = INT_MAX; 1457 if (Arg.getOpcode() == ISD::CopyFromReg) { 1458 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1459 if (!VR || TargetRegisterInfo::isPhysicalRegister(VR)) 1460 return false; 1461 MachineInstr *Def = MRI->getVRegDef(VR); 1462 if (!Def) 1463 return false; 1464 if (!Flags.isByVal()) { 1465 if (!TII->isLoadFromStackSlot(Def, FI)) 1466 return false; 1467 } else { 1468 return false; 1469 } 1470 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1471 if (Flags.isByVal()) 1472 // ByVal argument is passed in as a pointer but it's now being 1473 // dereferenced. e.g. 1474 // define @foo(%struct.X* %A) { 1475 // tail call @bar(%struct.X* byval %A) 1476 // } 1477 return false; 1478 SDValue Ptr = Ld->getBasePtr(); 1479 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1480 if (!FINode) 1481 return false; 1482 FI = FINode->getIndex(); 1483 } else 1484 return false; 1485 1486 assert(FI != INT_MAX); 1487 if (!MFI->isFixedObjectIndex(FI)) 1488 return false; 1489 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1490} 1491 1492/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1493/// for tail call optimization. Targets which want to do tail call 1494/// optimization should implement this function. 1495bool 1496ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1497 CallingConv::ID CalleeCC, 1498 bool isVarArg, 1499 bool isCalleeStructRet, 1500 bool isCallerStructRet, 1501 const SmallVectorImpl<ISD::OutputArg> &Outs, 1502 const SmallVectorImpl<SDValue> &OutVals, 1503 const SmallVectorImpl<ISD::InputArg> &Ins, 1504 SelectionDAG& DAG) const { 1505 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1506 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1507 bool CCMatch = CallerCC == CalleeCC; 1508 1509 // Look for obvious safe cases to perform tail call optimization that do not 1510 // require ABI changes. This is what gcc calls sibcall. 1511 1512 // Do not sibcall optimize vararg calls unless the call site is not passing 1513 // any arguments. 1514 if (isVarArg && !Outs.empty()) 1515 return false; 1516 1517 // Also avoid sibcall optimization if either caller or callee uses struct 1518 // return semantics. 1519 if (isCalleeStructRet || isCallerStructRet) 1520 return false; 1521 1522 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1523 // emitEpilogue is not ready for them. 1524 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1525 // LR. This means if we need to reload LR, it takes an extra instructions, 1526 // which outweighs the value of the tail call; but here we don't know yet 1527 // whether LR is going to be used. Probably the right approach is to 1528 // generate the tail call here and turn it back into CALL/RET in 1529 // emitEpilogue if LR is used. 1530 1531 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1532 // but we need to make sure there are enough registers; the only valid 1533 // registers are the 4 used for parameters. We don't currently do this 1534 // case. 1535 if (Subtarget->isThumb1Only()) 1536 return false; 1537 1538 // If the calling conventions do not match, then we'd better make sure the 1539 // results are returned in the same way as what the caller expects. 1540 if (!CCMatch) { 1541 SmallVector<CCValAssign, 16> RVLocs1; 1542 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1543 RVLocs1, *DAG.getContext()); 1544 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1545 1546 SmallVector<CCValAssign, 16> RVLocs2; 1547 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1548 RVLocs2, *DAG.getContext()); 1549 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1550 1551 if (RVLocs1.size() != RVLocs2.size()) 1552 return false; 1553 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1554 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1555 return false; 1556 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1557 return false; 1558 if (RVLocs1[i].isRegLoc()) { 1559 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1560 return false; 1561 } else { 1562 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1563 return false; 1564 } 1565 } 1566 } 1567 1568 // If the callee takes no arguments then go on to check the results of the 1569 // call. 1570 if (!Outs.empty()) { 1571 // Check if stack adjustment is needed. For now, do not do this if any 1572 // argument is passed on the stack. 1573 SmallVector<CCValAssign, 16> ArgLocs; 1574 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1575 ArgLocs, *DAG.getContext()); 1576 CCInfo.AnalyzeCallOperands(Outs, 1577 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1578 if (CCInfo.getNextStackOffset()) { 1579 MachineFunction &MF = DAG.getMachineFunction(); 1580 1581 // Check if the arguments are already laid out in the right way as 1582 // the caller's fixed stack objects. 1583 MachineFrameInfo *MFI = MF.getFrameInfo(); 1584 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1585 const ARMInstrInfo *TII = 1586 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1587 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1588 i != e; 1589 ++i, ++realArgIdx) { 1590 CCValAssign &VA = ArgLocs[i]; 1591 EVT RegVT = VA.getLocVT(); 1592 SDValue Arg = OutVals[realArgIdx]; 1593 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1594 if (VA.getLocInfo() == CCValAssign::Indirect) 1595 return false; 1596 if (VA.needsCustom()) { 1597 // f64 and vector types are split into multiple registers or 1598 // register/stack-slot combinations. The types will not match 1599 // the registers; give up on memory f64 refs until we figure 1600 // out what to do about this. 1601 if (!VA.isRegLoc()) 1602 return false; 1603 if (!ArgLocs[++i].isRegLoc()) 1604 return false; 1605 if (RegVT == MVT::v2f64) { 1606 if (!ArgLocs[++i].isRegLoc()) 1607 return false; 1608 if (!ArgLocs[++i].isRegLoc()) 1609 return false; 1610 } 1611 } else if (!VA.isRegLoc()) { 1612 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1613 MFI, MRI, TII)) 1614 return false; 1615 } 1616 } 1617 } 1618 } 1619 1620 return true; 1621} 1622 1623SDValue 1624ARMTargetLowering::LowerReturn(SDValue Chain, 1625 CallingConv::ID CallConv, bool isVarArg, 1626 const SmallVectorImpl<ISD::OutputArg> &Outs, 1627 const SmallVectorImpl<SDValue> &OutVals, 1628 DebugLoc dl, SelectionDAG &DAG) const { 1629 1630 // CCValAssign - represent the assignment of the return value to a location. 1631 SmallVector<CCValAssign, 16> RVLocs; 1632 1633 // CCState - Info about the registers and stack slots. 1634 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1635 *DAG.getContext()); 1636 1637 // Analyze outgoing return values. 1638 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1639 isVarArg)); 1640 1641 // If this is the first return lowered for this function, add 1642 // the regs to the liveout set for the function. 1643 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1644 for (unsigned i = 0; i != RVLocs.size(); ++i) 1645 if (RVLocs[i].isRegLoc()) 1646 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1647 } 1648 1649 SDValue Flag; 1650 1651 // Copy the result values into the output registers. 1652 for (unsigned i = 0, realRVLocIdx = 0; 1653 i != RVLocs.size(); 1654 ++i, ++realRVLocIdx) { 1655 CCValAssign &VA = RVLocs[i]; 1656 assert(VA.isRegLoc() && "Can only return in registers!"); 1657 1658 SDValue Arg = OutVals[realRVLocIdx]; 1659 1660 switch (VA.getLocInfo()) { 1661 default: llvm_unreachable("Unknown loc info!"); 1662 case CCValAssign::Full: break; 1663 case CCValAssign::BCvt: 1664 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1665 break; 1666 } 1667 1668 if (VA.needsCustom()) { 1669 if (VA.getLocVT() == MVT::v2f64) { 1670 // Extract the first half and return it in two registers. 1671 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1672 DAG.getConstant(0, MVT::i32)); 1673 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1674 DAG.getVTList(MVT::i32, MVT::i32), Half); 1675 1676 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1677 Flag = Chain.getValue(1); 1678 VA = RVLocs[++i]; // skip ahead to next loc 1679 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1680 HalfGPRs.getValue(1), Flag); 1681 Flag = Chain.getValue(1); 1682 VA = RVLocs[++i]; // skip ahead to next loc 1683 1684 // Extract the 2nd half and fall through to handle it as an f64 value. 1685 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1686 DAG.getConstant(1, MVT::i32)); 1687 } 1688 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1689 // available. 1690 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1691 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1692 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1693 Flag = Chain.getValue(1); 1694 VA = RVLocs[++i]; // skip ahead to next loc 1695 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1696 Flag); 1697 } else 1698 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1699 1700 // Guarantee that all emitted copies are 1701 // stuck together, avoiding something bad. 1702 Flag = Chain.getValue(1); 1703 } 1704 1705 SDValue result; 1706 if (Flag.getNode()) 1707 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1708 else // Return Void 1709 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1710 1711 return result; 1712} 1713 1714bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1715 if (N->getNumValues() != 1) 1716 return false; 1717 if (!N->hasNUsesOfValue(1, 0)) 1718 return false; 1719 1720 unsigned NumCopies = 0; 1721 SDNode* Copies[2]; 1722 SDNode *Use = *N->use_begin(); 1723 if (Use->getOpcode() == ISD::CopyToReg) { 1724 Copies[NumCopies++] = Use; 1725 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1726 // f64 returned in a pair of GPRs. 1727 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1728 UI != UE; ++UI) { 1729 if (UI->getOpcode() != ISD::CopyToReg) 1730 return false; 1731 Copies[UI.getUse().getResNo()] = *UI; 1732 ++NumCopies; 1733 } 1734 } else if (Use->getOpcode() == ISD::BITCAST) { 1735 // f32 returned in a single GPR. 1736 if (!Use->hasNUsesOfValue(1, 0)) 1737 return false; 1738 Use = *Use->use_begin(); 1739 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1740 return false; 1741 Copies[NumCopies++] = Use; 1742 } else { 1743 return false; 1744 } 1745 1746 if (NumCopies != 1 && NumCopies != 2) 1747 return false; 1748 1749 bool HasRet = false; 1750 for (unsigned i = 0; i < NumCopies; ++i) { 1751 SDNode *Copy = Copies[i]; 1752 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1753 UI != UE; ++UI) { 1754 if (UI->getOpcode() == ISD::CopyToReg) { 1755 SDNode *Use = *UI; 1756 if (Use == Copies[0] || Use == Copies[1]) 1757 continue; 1758 return false; 1759 } 1760 if (UI->getOpcode() != ARMISD::RET_FLAG) 1761 return false; 1762 HasRet = true; 1763 } 1764 } 1765 1766 return HasRet; 1767} 1768 1769// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1770// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1771// one of the above mentioned nodes. It has to be wrapped because otherwise 1772// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1773// be used to form addressing mode. These wrapped nodes will be selected 1774// into MOVi. 1775static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1776 EVT PtrVT = Op.getValueType(); 1777 // FIXME there is no actual debug info here 1778 DebugLoc dl = Op.getDebugLoc(); 1779 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1780 SDValue Res; 1781 if (CP->isMachineConstantPoolEntry()) 1782 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1783 CP->getAlignment()); 1784 else 1785 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1786 CP->getAlignment()); 1787 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1788} 1789 1790unsigned ARMTargetLowering::getJumpTableEncoding() const { 1791 return MachineJumpTableInfo::EK_Inline; 1792} 1793 1794SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1795 SelectionDAG &DAG) const { 1796 MachineFunction &MF = DAG.getMachineFunction(); 1797 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1798 unsigned ARMPCLabelIndex = 0; 1799 DebugLoc DL = Op.getDebugLoc(); 1800 EVT PtrVT = getPointerTy(); 1801 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1802 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1803 SDValue CPAddr; 1804 if (RelocM == Reloc::Static) { 1805 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1806 } else { 1807 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1808 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1809 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1810 ARMCP::CPBlockAddress, 1811 PCAdj); 1812 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1813 } 1814 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1815 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1816 MachinePointerInfo::getConstantPool(), 1817 false, false, 0); 1818 if (RelocM == Reloc::Static) 1819 return Result; 1820 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1821 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1822} 1823 1824// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1825SDValue 1826ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1827 SelectionDAG &DAG) const { 1828 DebugLoc dl = GA->getDebugLoc(); 1829 EVT PtrVT = getPointerTy(); 1830 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1831 MachineFunction &MF = DAG.getMachineFunction(); 1832 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1833 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1834 ARMConstantPoolValue *CPV = 1835 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1836 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1837 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1838 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1839 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1840 MachinePointerInfo::getConstantPool(), 1841 false, false, 0); 1842 SDValue Chain = Argument.getValue(1); 1843 1844 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1845 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1846 1847 // call __tls_get_addr. 1848 ArgListTy Args; 1849 ArgListEntry Entry; 1850 Entry.Node = Argument; 1851 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1852 Args.push_back(Entry); 1853 // FIXME: is there useful debug info available here? 1854 std::pair<SDValue, SDValue> CallResult = 1855 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1856 false, false, false, false, 1857 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1858 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1859 return CallResult.first; 1860} 1861 1862// Lower ISD::GlobalTLSAddress using the "initial exec" or 1863// "local exec" model. 1864SDValue 1865ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1866 SelectionDAG &DAG) const { 1867 const GlobalValue *GV = GA->getGlobal(); 1868 DebugLoc dl = GA->getDebugLoc(); 1869 SDValue Offset; 1870 SDValue Chain = DAG.getEntryNode(); 1871 EVT PtrVT = getPointerTy(); 1872 // Get the Thread Pointer 1873 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1874 1875 if (GV->isDeclaration()) { 1876 MachineFunction &MF = DAG.getMachineFunction(); 1877 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1878 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1879 // Initial exec model. 1880 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1881 ARMConstantPoolValue *CPV = 1882 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1883 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true); 1884 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1885 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1886 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1887 MachinePointerInfo::getConstantPool(), 1888 false, false, 0); 1889 Chain = Offset.getValue(1); 1890 1891 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1892 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1893 1894 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1895 MachinePointerInfo::getConstantPool(), 1896 false, false, 0); 1897 } else { 1898 // local exec model 1899 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF); 1900 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1901 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1902 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1903 MachinePointerInfo::getConstantPool(), 1904 false, false, 0); 1905 } 1906 1907 // The address of the thread local variable is the add of the thread 1908 // pointer with the offset of the variable. 1909 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1910} 1911 1912SDValue 1913ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1914 // TODO: implement the "local dynamic" model 1915 assert(Subtarget->isTargetELF() && 1916 "TLS not implemented for non-ELF targets"); 1917 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1918 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1919 // otherwise use the "Local Exec" TLS Model 1920 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1921 return LowerToTLSGeneralDynamicModel(GA, DAG); 1922 else 1923 return LowerToTLSExecModels(GA, DAG); 1924} 1925 1926SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1927 SelectionDAG &DAG) const { 1928 EVT PtrVT = getPointerTy(); 1929 DebugLoc dl = Op.getDebugLoc(); 1930 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1931 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1932 if (RelocM == Reloc::PIC_) { 1933 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1934 ARMConstantPoolValue *CPV = 1935 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 1936 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1937 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1938 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1939 CPAddr, 1940 MachinePointerInfo::getConstantPool(), 1941 false, false, 0); 1942 SDValue Chain = Result.getValue(1); 1943 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1944 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1945 if (!UseGOTOFF) 1946 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1947 MachinePointerInfo::getGOT(), false, false, 0); 1948 return Result; 1949 } else { 1950 // If we have T2 ops, we can materialize the address directly via movt/movw 1951 // pair. This is always cheaper. 1952 if (Subtarget->useMovt()) { 1953 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1954 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 1955 } else { 1956 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1957 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1958 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1959 MachinePointerInfo::getConstantPool(), 1960 false, false, 0); 1961 } 1962 } 1963} 1964 1965SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1966 SelectionDAG &DAG) const { 1967 MachineFunction &MF = DAG.getMachineFunction(); 1968 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1969 unsigned ARMPCLabelIndex = 0; 1970 EVT PtrVT = getPointerTy(); 1971 DebugLoc dl = Op.getDebugLoc(); 1972 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1973 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1974 SDValue CPAddr; 1975 if (RelocM == Reloc::Static) 1976 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1977 else { 1978 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1979 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 1980 ARMConstantPoolValue *CPV = 1981 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 1982 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1983 } 1984 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1985 1986 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1987 MachinePointerInfo::getConstantPool(), 1988 false, false, 0); 1989 SDValue Chain = Result.getValue(1); 1990 1991 if (RelocM == Reloc::PIC_) { 1992 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1993 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1994 } 1995 1996 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 1997 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 1998 false, false, 0); 1999 2000 return Result; 2001} 2002 2003SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2004 SelectionDAG &DAG) const { 2005 assert(Subtarget->isTargetELF() && 2006 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2007 MachineFunction &MF = DAG.getMachineFunction(); 2008 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2009 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 2010 EVT PtrVT = getPointerTy(); 2011 DebugLoc dl = Op.getDebugLoc(); 2012 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2013 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 2014 "_GLOBAL_OFFSET_TABLE_", 2015 ARMPCLabelIndex, PCAdj); 2016 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2017 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2018 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2019 MachinePointerInfo::getConstantPool(), 2020 false, false, 0); 2021 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2022 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2023} 2024 2025SDValue 2026ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2027 const { 2028 DebugLoc dl = Op.getDebugLoc(); 2029 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2030 Op.getOperand(0), Op.getOperand(1)); 2031} 2032 2033SDValue 2034ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2035 DebugLoc dl = Op.getDebugLoc(); 2036 SDValue Val = DAG.getConstant(0, MVT::i32); 2037 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2038 Op.getOperand(1), Val); 2039} 2040 2041SDValue 2042ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2043 DebugLoc dl = Op.getDebugLoc(); 2044 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2045 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2046} 2047 2048SDValue 2049ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2050 const ARMSubtarget *Subtarget) const { 2051 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2052 DebugLoc dl = Op.getDebugLoc(); 2053 switch (IntNo) { 2054 default: return SDValue(); // Don't custom lower most intrinsics. 2055 case Intrinsic::arm_thread_pointer: { 2056 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2057 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2058 } 2059 case Intrinsic::eh_sjlj_lsda: { 2060 MachineFunction &MF = DAG.getMachineFunction(); 2061 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2062 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 2063 EVT PtrVT = getPointerTy(); 2064 DebugLoc dl = Op.getDebugLoc(); 2065 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2066 SDValue CPAddr; 2067 unsigned PCAdj = (RelocM != Reloc::PIC_) 2068 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2069 ARMConstantPoolValue *CPV = 2070 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2071 ARMCP::CPLSDA, PCAdj); 2072 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2073 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2074 SDValue Result = 2075 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2076 MachinePointerInfo::getConstantPool(), 2077 false, false, 0); 2078 2079 if (RelocM == Reloc::PIC_) { 2080 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2081 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2082 } 2083 return Result; 2084 } 2085 } 2086} 2087 2088static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2089 const ARMSubtarget *Subtarget) { 2090 DebugLoc dl = Op.getDebugLoc(); 2091 if (!Subtarget->hasDataBarrier()) { 2092 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2093 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2094 // here. 2095 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2096 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2097 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2098 DAG.getConstant(0, MVT::i32)); 2099 } 2100 2101 SDValue Op5 = Op.getOperand(5); 2102 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2103 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2104 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2105 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2106 2107 ARM_MB::MemBOpt DMBOpt; 2108 if (isDeviceBarrier) 2109 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2110 else 2111 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2112 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2113 DAG.getConstant(DMBOpt, MVT::i32)); 2114} 2115 2116static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2117 const ARMSubtarget *Subtarget) { 2118 // ARM pre v5TE and Thumb1 does not have preload instructions. 2119 if (!(Subtarget->isThumb2() || 2120 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2121 // Just preserve the chain. 2122 return Op.getOperand(0); 2123 2124 DebugLoc dl = Op.getDebugLoc(); 2125 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2126 if (!isRead && 2127 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2128 // ARMv7 with MP extension has PLDW. 2129 return Op.getOperand(0); 2130 2131 if (Subtarget->isThumb()) 2132 // Invert the bits. 2133 isRead = ~isRead & 1; 2134 unsigned isData = Subtarget->isThumb() ? 0 : 1; 2135 2136 // Currently there is no intrinsic that matches pli. 2137 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2138 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2139 DAG.getConstant(isData, MVT::i32)); 2140} 2141 2142static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2143 MachineFunction &MF = DAG.getMachineFunction(); 2144 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2145 2146 // vastart just stores the address of the VarArgsFrameIndex slot into the 2147 // memory location argument. 2148 DebugLoc dl = Op.getDebugLoc(); 2149 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2150 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2151 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2152 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2153 MachinePointerInfo(SV), false, false, 0); 2154} 2155 2156SDValue 2157ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2158 SDValue &Root, SelectionDAG &DAG, 2159 DebugLoc dl) const { 2160 MachineFunction &MF = DAG.getMachineFunction(); 2161 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2162 2163 TargetRegisterClass *RC; 2164 if (AFI->isThumb1OnlyFunction()) 2165 RC = ARM::tGPRRegisterClass; 2166 else 2167 RC = ARM::GPRRegisterClass; 2168 2169 // Transform the arguments stored in physical registers into virtual ones. 2170 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2171 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2172 2173 SDValue ArgValue2; 2174 if (NextVA.isMemLoc()) { 2175 MachineFrameInfo *MFI = MF.getFrameInfo(); 2176 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2177 2178 // Create load node to retrieve arguments from the stack. 2179 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2180 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2181 MachinePointerInfo::getFixedStack(FI), 2182 false, false, 0); 2183 } else { 2184 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2185 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2186 } 2187 2188 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2189} 2190 2191SDValue 2192ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2193 CallingConv::ID CallConv, bool isVarArg, 2194 const SmallVectorImpl<ISD::InputArg> 2195 &Ins, 2196 DebugLoc dl, SelectionDAG &DAG, 2197 SmallVectorImpl<SDValue> &InVals) 2198 const { 2199 2200 MachineFunction &MF = DAG.getMachineFunction(); 2201 MachineFrameInfo *MFI = MF.getFrameInfo(); 2202 2203 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2204 2205 // Assign locations to all of the incoming arguments. 2206 SmallVector<CCValAssign, 16> ArgLocs; 2207 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2208 *DAG.getContext()); 2209 CCInfo.AnalyzeFormalArguments(Ins, 2210 CCAssignFnForNode(CallConv, /* Return*/ false, 2211 isVarArg)); 2212 2213 SmallVector<SDValue, 16> ArgValues; 2214 2215 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2216 CCValAssign &VA = ArgLocs[i]; 2217 2218 // Arguments stored in registers. 2219 if (VA.isRegLoc()) { 2220 EVT RegVT = VA.getLocVT(); 2221 2222 SDValue ArgValue; 2223 if (VA.needsCustom()) { 2224 // f64 and vector types are split up into multiple registers or 2225 // combinations of registers and stack slots. 2226 if (VA.getLocVT() == MVT::v2f64) { 2227 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2228 Chain, DAG, dl); 2229 VA = ArgLocs[++i]; // skip ahead to next loc 2230 SDValue ArgValue2; 2231 if (VA.isMemLoc()) { 2232 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2233 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2234 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2235 MachinePointerInfo::getFixedStack(FI), 2236 false, false, 0); 2237 } else { 2238 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2239 Chain, DAG, dl); 2240 } 2241 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2242 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2243 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2244 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2245 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2246 } else 2247 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2248 2249 } else { 2250 TargetRegisterClass *RC; 2251 2252 if (RegVT == MVT::f32) 2253 RC = ARM::SPRRegisterClass; 2254 else if (RegVT == MVT::f64) 2255 RC = ARM::DPRRegisterClass; 2256 else if (RegVT == MVT::v2f64) 2257 RC = ARM::QPRRegisterClass; 2258 else if (RegVT == MVT::i32) 2259 RC = (AFI->isThumb1OnlyFunction() ? 2260 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2261 else 2262 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2263 2264 // Transform the arguments in physical registers into virtual ones. 2265 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2266 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2267 } 2268 2269 // If this is an 8 or 16-bit value, it is really passed promoted 2270 // to 32 bits. Insert an assert[sz]ext to capture this, then 2271 // truncate to the right size. 2272 switch (VA.getLocInfo()) { 2273 default: llvm_unreachable("Unknown loc info!"); 2274 case CCValAssign::Full: break; 2275 case CCValAssign::BCvt: 2276 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2277 break; 2278 case CCValAssign::SExt: 2279 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2280 DAG.getValueType(VA.getValVT())); 2281 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2282 break; 2283 case CCValAssign::ZExt: 2284 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2285 DAG.getValueType(VA.getValVT())); 2286 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2287 break; 2288 } 2289 2290 InVals.push_back(ArgValue); 2291 2292 } else { // VA.isRegLoc() 2293 2294 // sanity check 2295 assert(VA.isMemLoc()); 2296 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2297 2298 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 2299 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true); 2300 2301 // Create load nodes to retrieve arguments from the stack. 2302 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2303 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2304 MachinePointerInfo::getFixedStack(FI), 2305 false, false, 0)); 2306 } 2307 } 2308 2309 // varargs 2310 if (isVarArg) { 2311 static const unsigned GPRArgRegs[] = { 2312 ARM::R0, ARM::R1, ARM::R2, ARM::R3 2313 }; 2314 2315 unsigned NumGPRs = CCInfo.getFirstUnallocated 2316 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2317 2318 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 2319 unsigned VARegSize = (4 - NumGPRs) * 4; 2320 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2321 unsigned ArgOffset = CCInfo.getNextStackOffset(); 2322 if (VARegSaveSize) { 2323 // If this function is vararg, store any remaining integer argument regs 2324 // to their spots on the stack so that they may be loaded by deferencing 2325 // the result of va_next. 2326 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2327 AFI->setVarArgsFrameIndex( 2328 MFI->CreateFixedObject(VARegSaveSize, 2329 ArgOffset + VARegSaveSize - VARegSize, 2330 false)); 2331 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2332 getPointerTy()); 2333 2334 SmallVector<SDValue, 4> MemOps; 2335 for (; NumGPRs < 4; ++NumGPRs) { 2336 TargetRegisterClass *RC; 2337 if (AFI->isThumb1OnlyFunction()) 2338 RC = ARM::tGPRRegisterClass; 2339 else 2340 RC = ARM::GPRRegisterClass; 2341 2342 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 2343 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2344 SDValue Store = 2345 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2346 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2347 false, false, 0); 2348 MemOps.push_back(Store); 2349 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2350 DAG.getConstant(4, getPointerTy())); 2351 } 2352 if (!MemOps.empty()) 2353 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2354 &MemOps[0], MemOps.size()); 2355 } else 2356 // This will point to the next argument passed via stack. 2357 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2358 } 2359 2360 return Chain; 2361} 2362 2363/// isFloatingPointZero - Return true if this is +0.0. 2364static bool isFloatingPointZero(SDValue Op) { 2365 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2366 return CFP->getValueAPF().isPosZero(); 2367 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2368 // Maybe this has already been legalized into the constant pool? 2369 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2370 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2371 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2372 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2373 return CFP->getValueAPF().isPosZero(); 2374 } 2375 } 2376 return false; 2377} 2378 2379/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2380/// the given operands. 2381SDValue 2382ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2383 SDValue &ARMcc, SelectionDAG &DAG, 2384 DebugLoc dl) const { 2385 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2386 unsigned C = RHSC->getZExtValue(); 2387 if (!isLegalICmpImmediate(C)) { 2388 // Constant does not fit, try adjusting it by one? 2389 switch (CC) { 2390 default: break; 2391 case ISD::SETLT: 2392 case ISD::SETGE: 2393 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2394 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2395 RHS = DAG.getConstant(C-1, MVT::i32); 2396 } 2397 break; 2398 case ISD::SETULT: 2399 case ISD::SETUGE: 2400 if (C != 0 && isLegalICmpImmediate(C-1)) { 2401 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2402 RHS = DAG.getConstant(C-1, MVT::i32); 2403 } 2404 break; 2405 case ISD::SETLE: 2406 case ISD::SETGT: 2407 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2408 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2409 RHS = DAG.getConstant(C+1, MVT::i32); 2410 } 2411 break; 2412 case ISD::SETULE: 2413 case ISD::SETUGT: 2414 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2415 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2416 RHS = DAG.getConstant(C+1, MVT::i32); 2417 } 2418 break; 2419 } 2420 } 2421 } 2422 2423 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2424 ARMISD::NodeType CompareType; 2425 switch (CondCode) { 2426 default: 2427 CompareType = ARMISD::CMP; 2428 break; 2429 case ARMCC::EQ: 2430 case ARMCC::NE: 2431 // Uses only Z Flag 2432 CompareType = ARMISD::CMPZ; 2433 break; 2434 } 2435 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2436 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2437} 2438 2439/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2440SDValue 2441ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2442 DebugLoc dl) const { 2443 SDValue Cmp; 2444 if (!isFloatingPointZero(RHS)) 2445 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2446 else 2447 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2448 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2449} 2450 2451SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2452 SDValue Cond = Op.getOperand(0); 2453 SDValue SelectTrue = Op.getOperand(1); 2454 SDValue SelectFalse = Op.getOperand(2); 2455 DebugLoc dl = Op.getDebugLoc(); 2456 2457 // Convert: 2458 // 2459 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2460 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2461 // 2462 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2463 const ConstantSDNode *CMOVTrue = 2464 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2465 const ConstantSDNode *CMOVFalse = 2466 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2467 2468 if (CMOVTrue && CMOVFalse) { 2469 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2470 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2471 2472 SDValue True; 2473 SDValue False; 2474 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2475 True = SelectTrue; 2476 False = SelectFalse; 2477 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2478 True = SelectFalse; 2479 False = SelectTrue; 2480 } 2481 2482 if (True.getNode() && False.getNode()) { 2483 EVT VT = Cond.getValueType(); 2484 SDValue ARMcc = Cond.getOperand(2); 2485 SDValue CCR = Cond.getOperand(3); 2486 SDValue Cmp = Cond.getOperand(4); 2487 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2488 } 2489 } 2490 } 2491 2492 return DAG.getSelectCC(dl, Cond, 2493 DAG.getConstant(0, Cond.getValueType()), 2494 SelectTrue, SelectFalse, ISD::SETNE); 2495} 2496 2497SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2498 EVT VT = Op.getValueType(); 2499 SDValue LHS = Op.getOperand(0); 2500 SDValue RHS = Op.getOperand(1); 2501 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2502 SDValue TrueVal = Op.getOperand(2); 2503 SDValue FalseVal = Op.getOperand(3); 2504 DebugLoc dl = Op.getDebugLoc(); 2505 2506 if (LHS.getValueType() == MVT::i32) { 2507 SDValue ARMcc; 2508 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2509 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2510 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2511 } 2512 2513 ARMCC::CondCodes CondCode, CondCode2; 2514 FPCCToARMCC(CC, CondCode, CondCode2); 2515 2516 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2517 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2518 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2519 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2520 ARMcc, CCR, Cmp); 2521 if (CondCode2 != ARMCC::AL) { 2522 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2523 // FIXME: Needs another CMP because flag can have but one use. 2524 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2525 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2526 Result, TrueVal, ARMcc2, CCR, Cmp2); 2527 } 2528 return Result; 2529} 2530 2531/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2532/// to morph to an integer compare sequence. 2533static bool canChangeToInt(SDValue Op, bool &SeenZero, 2534 const ARMSubtarget *Subtarget) { 2535 SDNode *N = Op.getNode(); 2536 if (!N->hasOneUse()) 2537 // Otherwise it requires moving the value from fp to integer registers. 2538 return false; 2539 if (!N->getNumValues()) 2540 return false; 2541 EVT VT = Op.getValueType(); 2542 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2543 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2544 // vmrs are very slow, e.g. cortex-a8. 2545 return false; 2546 2547 if (isFloatingPointZero(Op)) { 2548 SeenZero = true; 2549 return true; 2550 } 2551 return ISD::isNormalLoad(N); 2552} 2553 2554static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2555 if (isFloatingPointZero(Op)) 2556 return DAG.getConstant(0, MVT::i32); 2557 2558 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2559 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2560 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2561 Ld->isVolatile(), Ld->isNonTemporal(), 2562 Ld->getAlignment()); 2563 2564 llvm_unreachable("Unknown VFP cmp argument!"); 2565} 2566 2567static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2568 SDValue &RetVal1, SDValue &RetVal2) { 2569 if (isFloatingPointZero(Op)) { 2570 RetVal1 = DAG.getConstant(0, MVT::i32); 2571 RetVal2 = DAG.getConstant(0, MVT::i32); 2572 return; 2573 } 2574 2575 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2576 SDValue Ptr = Ld->getBasePtr(); 2577 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2578 Ld->getChain(), Ptr, 2579 Ld->getPointerInfo(), 2580 Ld->isVolatile(), Ld->isNonTemporal(), 2581 Ld->getAlignment()); 2582 2583 EVT PtrType = Ptr.getValueType(); 2584 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2585 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2586 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2587 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2588 Ld->getChain(), NewPtr, 2589 Ld->getPointerInfo().getWithOffset(4), 2590 Ld->isVolatile(), Ld->isNonTemporal(), 2591 NewAlign); 2592 return; 2593 } 2594 2595 llvm_unreachable("Unknown VFP cmp argument!"); 2596} 2597 2598/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2599/// f32 and even f64 comparisons to integer ones. 2600SDValue 2601ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2602 SDValue Chain = Op.getOperand(0); 2603 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2604 SDValue LHS = Op.getOperand(2); 2605 SDValue RHS = Op.getOperand(3); 2606 SDValue Dest = Op.getOperand(4); 2607 DebugLoc dl = Op.getDebugLoc(); 2608 2609 bool SeenZero = false; 2610 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2611 canChangeToInt(RHS, SeenZero, Subtarget) && 2612 // If one of the operand is zero, it's safe to ignore the NaN case since 2613 // we only care about equality comparisons. 2614 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2615 // If unsafe fp math optimization is enabled and there are no othter uses of 2616 // the CMP operands, and the condition code is EQ oe NE, we can optimize it 2617 // to an integer comparison. 2618 if (CC == ISD::SETOEQ) 2619 CC = ISD::SETEQ; 2620 else if (CC == ISD::SETUNE) 2621 CC = ISD::SETNE; 2622 2623 SDValue ARMcc; 2624 if (LHS.getValueType() == MVT::f32) { 2625 LHS = bitcastf32Toi32(LHS, DAG); 2626 RHS = bitcastf32Toi32(RHS, DAG); 2627 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2628 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2629 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2630 Chain, Dest, ARMcc, CCR, Cmp); 2631 } 2632 2633 SDValue LHS1, LHS2; 2634 SDValue RHS1, RHS2; 2635 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2636 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2637 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2638 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2639 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2640 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2641 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2642 } 2643 2644 return SDValue(); 2645} 2646 2647SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2648 SDValue Chain = Op.getOperand(0); 2649 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2650 SDValue LHS = Op.getOperand(2); 2651 SDValue RHS = Op.getOperand(3); 2652 SDValue Dest = Op.getOperand(4); 2653 DebugLoc dl = Op.getDebugLoc(); 2654 2655 if (LHS.getValueType() == MVT::i32) { 2656 SDValue ARMcc; 2657 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2658 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2659 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2660 Chain, Dest, ARMcc, CCR, Cmp); 2661 } 2662 2663 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2664 2665 if (UnsafeFPMath && 2666 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2667 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2668 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2669 if (Result.getNode()) 2670 return Result; 2671 } 2672 2673 ARMCC::CondCodes CondCode, CondCode2; 2674 FPCCToARMCC(CC, CondCode, CondCode2); 2675 2676 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2677 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2678 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2679 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2680 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2681 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2682 if (CondCode2 != ARMCC::AL) { 2683 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2684 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2685 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2686 } 2687 return Res; 2688} 2689 2690SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2691 SDValue Chain = Op.getOperand(0); 2692 SDValue Table = Op.getOperand(1); 2693 SDValue Index = Op.getOperand(2); 2694 DebugLoc dl = Op.getDebugLoc(); 2695 2696 EVT PTy = getPointerTy(); 2697 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2698 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2699 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2700 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2701 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2702 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2703 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2704 if (Subtarget->isThumb2()) { 2705 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2706 // which does another jump to the destination. This also makes it easier 2707 // to translate it to TBB / TBH later. 2708 // FIXME: This might not work if the function is extremely large. 2709 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2710 Addr, Op.getOperand(2), JTI, UId); 2711 } 2712 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2713 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2714 MachinePointerInfo::getJumpTable(), 2715 false, false, 0); 2716 Chain = Addr.getValue(1); 2717 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2718 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2719 } else { 2720 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2721 MachinePointerInfo::getJumpTable(), false, false, 0); 2722 Chain = Addr.getValue(1); 2723 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2724 } 2725} 2726 2727static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2728 DebugLoc dl = Op.getDebugLoc(); 2729 unsigned Opc; 2730 2731 switch (Op.getOpcode()) { 2732 default: 2733 assert(0 && "Invalid opcode!"); 2734 case ISD::FP_TO_SINT: 2735 Opc = ARMISD::FTOSI; 2736 break; 2737 case ISD::FP_TO_UINT: 2738 Opc = ARMISD::FTOUI; 2739 break; 2740 } 2741 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2742 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 2743} 2744 2745static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2746 EVT VT = Op.getValueType(); 2747 DebugLoc dl = Op.getDebugLoc(); 2748 unsigned Opc; 2749 2750 switch (Op.getOpcode()) { 2751 default: 2752 assert(0 && "Invalid opcode!"); 2753 case ISD::SINT_TO_FP: 2754 Opc = ARMISD::SITOF; 2755 break; 2756 case ISD::UINT_TO_FP: 2757 Opc = ARMISD::UITOF; 2758 break; 2759 } 2760 2761 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 2762 return DAG.getNode(Opc, dl, VT, Op); 2763} 2764 2765SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 2766 // Implement fcopysign with a fabs and a conditional fneg. 2767 SDValue Tmp0 = Op.getOperand(0); 2768 SDValue Tmp1 = Op.getOperand(1); 2769 DebugLoc dl = Op.getDebugLoc(); 2770 EVT VT = Op.getValueType(); 2771 EVT SrcVT = Tmp1.getValueType(); 2772 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 2773 SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32); 2774 SDValue FP0 = DAG.getConstantFP(0.0, SrcVT); 2775 SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl); 2776 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2777 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp); 2778} 2779 2780SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2781 MachineFunction &MF = DAG.getMachineFunction(); 2782 MachineFrameInfo *MFI = MF.getFrameInfo(); 2783 MFI->setReturnAddressIsTaken(true); 2784 2785 EVT VT = Op.getValueType(); 2786 DebugLoc dl = Op.getDebugLoc(); 2787 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2788 if (Depth) { 2789 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2790 SDValue Offset = DAG.getConstant(4, MVT::i32); 2791 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2792 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2793 MachinePointerInfo(), false, false, 0); 2794 } 2795 2796 // Return LR, which contains the return address. Mark it an implicit live-in. 2797 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 2798 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 2799} 2800 2801SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 2802 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2803 MFI->setFrameAddressIsTaken(true); 2804 2805 EVT VT = Op.getValueType(); 2806 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 2807 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2808 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 2809 ? ARM::R7 : ARM::R11; 2810 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2811 while (Depth--) 2812 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2813 MachinePointerInfo(), 2814 false, false, 0); 2815 return FrameAddr; 2816} 2817 2818/// ExpandBITCAST - If the target supports VFP, this function is called to 2819/// expand a bit convert where either the source or destination type is i64 to 2820/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 2821/// operand type is illegal (e.g., v2f32 for a target that doesn't support 2822/// vectors), since the legalizer won't know what to do with that. 2823static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 2824 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2825 DebugLoc dl = N->getDebugLoc(); 2826 SDValue Op = N->getOperand(0); 2827 2828 // This function is only supposed to be called for i64 types, either as the 2829 // source or destination of the bit convert. 2830 EVT SrcVT = Op.getValueType(); 2831 EVT DstVT = N->getValueType(0); 2832 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 2833 "ExpandBITCAST called for non-i64 type"); 2834 2835 // Turn i64->f64 into VMOVDRR. 2836 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 2837 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2838 DAG.getConstant(0, MVT::i32)); 2839 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2840 DAG.getConstant(1, MVT::i32)); 2841 return DAG.getNode(ISD::BITCAST, dl, DstVT, 2842 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 2843 } 2844 2845 // Turn f64->i64 into VMOVRRD. 2846 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 2847 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 2848 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 2849 // Merge the pieces into a single i64 value. 2850 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 2851 } 2852 2853 return SDValue(); 2854} 2855 2856/// getZeroVector - Returns a vector of specified type with all zero elements. 2857/// Zero vectors are used to represent vector negation and in those cases 2858/// will be implemented with the NEON VNEG instruction. However, VNEG does 2859/// not support i64 elements, so sometimes the zero vectors will need to be 2860/// explicitly constructed. Regardless, use a canonical VMOV to create the 2861/// zero vector. 2862static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2863 assert(VT.isVector() && "Expected a vector type"); 2864 // The canonical modified immediate encoding of a zero vector is....0! 2865 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 2866 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 2867 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 2868 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 2869} 2870 2871/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 2872/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2873SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 2874 SelectionDAG &DAG) const { 2875 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2876 EVT VT = Op.getValueType(); 2877 unsigned VTBits = VT.getSizeInBits(); 2878 DebugLoc dl = Op.getDebugLoc(); 2879 SDValue ShOpLo = Op.getOperand(0); 2880 SDValue ShOpHi = Op.getOperand(1); 2881 SDValue ShAmt = Op.getOperand(2); 2882 SDValue ARMcc; 2883 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 2884 2885 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 2886 2887 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2888 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2889 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 2890 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2891 DAG.getConstant(VTBits, MVT::i32)); 2892 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 2893 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2894 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 2895 2896 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2897 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2898 ARMcc, DAG, dl); 2899 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 2900 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 2901 CCR, Cmp); 2902 2903 SDValue Ops[2] = { Lo, Hi }; 2904 return DAG.getMergeValues(Ops, 2, dl); 2905} 2906 2907/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 2908/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2909SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 2910 SelectionDAG &DAG) const { 2911 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2912 EVT VT = Op.getValueType(); 2913 unsigned VTBits = VT.getSizeInBits(); 2914 DebugLoc dl = Op.getDebugLoc(); 2915 SDValue ShOpLo = Op.getOperand(0); 2916 SDValue ShOpHi = Op.getOperand(1); 2917 SDValue ShAmt = Op.getOperand(2); 2918 SDValue ARMcc; 2919 2920 assert(Op.getOpcode() == ISD::SHL_PARTS); 2921 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2922 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2923 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 2924 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2925 DAG.getConstant(VTBits, MVT::i32)); 2926 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 2927 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 2928 2929 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2930 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2931 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2932 ARMcc, DAG, dl); 2933 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 2934 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 2935 CCR, Cmp); 2936 2937 SDValue Ops[2] = { Lo, Hi }; 2938 return DAG.getMergeValues(Ops, 2, dl); 2939} 2940 2941SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 2942 SelectionDAG &DAG) const { 2943 // The rounding mode is in bits 23:22 of the FPSCR. 2944 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 2945 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 2946 // so that the shift + and get folded into a bitfield extract. 2947 DebugLoc dl = Op.getDebugLoc(); 2948 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 2949 DAG.getConstant(Intrinsic::arm_get_fpscr, 2950 MVT::i32)); 2951 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 2952 DAG.getConstant(1U << 22, MVT::i32)); 2953 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 2954 DAG.getConstant(22, MVT::i32)); 2955 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 2956 DAG.getConstant(3, MVT::i32)); 2957} 2958 2959static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 2960 const ARMSubtarget *ST) { 2961 EVT VT = N->getValueType(0); 2962 DebugLoc dl = N->getDebugLoc(); 2963 2964 if (!ST->hasV6T2Ops()) 2965 return SDValue(); 2966 2967 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 2968 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 2969} 2970 2971static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 2972 const ARMSubtarget *ST) { 2973 EVT VT = N->getValueType(0); 2974 DebugLoc dl = N->getDebugLoc(); 2975 2976 if (!VT.isVector()) 2977 return SDValue(); 2978 2979 // Lower vector shifts on NEON to use VSHL. 2980 assert(ST->hasNEON() && "unexpected vector shift"); 2981 2982 // Left shifts translate directly to the vshiftu intrinsic. 2983 if (N->getOpcode() == ISD::SHL) 2984 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2985 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 2986 N->getOperand(0), N->getOperand(1)); 2987 2988 assert((N->getOpcode() == ISD::SRA || 2989 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 2990 2991 // NEON uses the same intrinsics for both left and right shifts. For 2992 // right shifts, the shift amounts are negative, so negate the vector of 2993 // shift amounts. 2994 EVT ShiftVT = N->getOperand(1).getValueType(); 2995 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 2996 getZeroVector(ShiftVT, DAG, dl), 2997 N->getOperand(1)); 2998 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 2999 Intrinsic::arm_neon_vshifts : 3000 Intrinsic::arm_neon_vshiftu); 3001 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3002 DAG.getConstant(vshiftInt, MVT::i32), 3003 N->getOperand(0), NegatedCount); 3004} 3005 3006static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3007 const ARMSubtarget *ST) { 3008 EVT VT = N->getValueType(0); 3009 DebugLoc dl = N->getDebugLoc(); 3010 3011 // We can get here for a node like i32 = ISD::SHL i32, i64 3012 if (VT != MVT::i64) 3013 return SDValue(); 3014 3015 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3016 "Unknown shift to lower!"); 3017 3018 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3019 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3020 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3021 return SDValue(); 3022 3023 // If we are in thumb mode, we don't have RRX. 3024 if (ST->isThumb1Only()) return SDValue(); 3025 3026 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3027 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3028 DAG.getConstant(0, MVT::i32)); 3029 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3030 DAG.getConstant(1, MVT::i32)); 3031 3032 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3033 // captures the result into a carry flag. 3034 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3035 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3036 3037 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3038 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3039 3040 // Merge the pieces into a single i64 value. 3041 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3042} 3043 3044static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3045 SDValue TmpOp0, TmpOp1; 3046 bool Invert = false; 3047 bool Swap = false; 3048 unsigned Opc = 0; 3049 3050 SDValue Op0 = Op.getOperand(0); 3051 SDValue Op1 = Op.getOperand(1); 3052 SDValue CC = Op.getOperand(2); 3053 EVT VT = Op.getValueType(); 3054 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3055 DebugLoc dl = Op.getDebugLoc(); 3056 3057 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3058 switch (SetCCOpcode) { 3059 default: llvm_unreachable("Illegal FP comparison"); break; 3060 case ISD::SETUNE: 3061 case ISD::SETNE: Invert = true; // Fallthrough 3062 case ISD::SETOEQ: 3063 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3064 case ISD::SETOLT: 3065 case ISD::SETLT: Swap = true; // Fallthrough 3066 case ISD::SETOGT: 3067 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3068 case ISD::SETOLE: 3069 case ISD::SETLE: Swap = true; // Fallthrough 3070 case ISD::SETOGE: 3071 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3072 case ISD::SETUGE: Swap = true; // Fallthrough 3073 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3074 case ISD::SETUGT: Swap = true; // Fallthrough 3075 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3076 case ISD::SETUEQ: Invert = true; // Fallthrough 3077 case ISD::SETONE: 3078 // Expand this to (OLT | OGT). 3079 TmpOp0 = Op0; 3080 TmpOp1 = Op1; 3081 Opc = ISD::OR; 3082 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3083 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3084 break; 3085 case ISD::SETUO: Invert = true; // Fallthrough 3086 case ISD::SETO: 3087 // Expand this to (OLT | OGE). 3088 TmpOp0 = Op0; 3089 TmpOp1 = Op1; 3090 Opc = ISD::OR; 3091 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3092 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3093 break; 3094 } 3095 } else { 3096 // Integer comparisons. 3097 switch (SetCCOpcode) { 3098 default: llvm_unreachable("Illegal integer comparison"); break; 3099 case ISD::SETNE: Invert = true; 3100 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3101 case ISD::SETLT: Swap = true; 3102 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3103 case ISD::SETLE: Swap = true; 3104 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3105 case ISD::SETULT: Swap = true; 3106 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3107 case ISD::SETULE: Swap = true; 3108 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3109 } 3110 3111 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3112 if (Opc == ARMISD::VCEQ) { 3113 3114 SDValue AndOp; 3115 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3116 AndOp = Op0; 3117 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3118 AndOp = Op1; 3119 3120 // Ignore bitconvert. 3121 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3122 AndOp = AndOp.getOperand(0); 3123 3124 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3125 Opc = ARMISD::VTST; 3126 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3127 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3128 Invert = !Invert; 3129 } 3130 } 3131 } 3132 3133 if (Swap) 3134 std::swap(Op0, Op1); 3135 3136 // If one of the operands is a constant vector zero, attempt to fold the 3137 // comparison to a specialized compare-against-zero form. 3138 SDValue SingleOp; 3139 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3140 SingleOp = Op0; 3141 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3142 if (Opc == ARMISD::VCGE) 3143 Opc = ARMISD::VCLEZ; 3144 else if (Opc == ARMISD::VCGT) 3145 Opc = ARMISD::VCLTZ; 3146 SingleOp = Op1; 3147 } 3148 3149 SDValue Result; 3150 if (SingleOp.getNode()) { 3151 switch (Opc) { 3152 case ARMISD::VCEQ: 3153 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3154 case ARMISD::VCGE: 3155 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3156 case ARMISD::VCLEZ: 3157 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3158 case ARMISD::VCGT: 3159 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3160 case ARMISD::VCLTZ: 3161 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3162 default: 3163 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3164 } 3165 } else { 3166 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3167 } 3168 3169 if (Invert) 3170 Result = DAG.getNOT(dl, Result, VT); 3171 3172 return Result; 3173} 3174 3175/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3176/// valid vector constant for a NEON instruction with a "modified immediate" 3177/// operand (e.g., VMOV). If so, return the encoded value. 3178static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3179 unsigned SplatBitSize, SelectionDAG &DAG, 3180 EVT &VT, bool is128Bits, NEONModImmType type) { 3181 unsigned OpCmode, Imm; 3182 3183 // SplatBitSize is set to the smallest size that splats the vector, so a 3184 // zero vector will always have SplatBitSize == 8. However, NEON modified 3185 // immediate instructions others than VMOV do not support the 8-bit encoding 3186 // of a zero vector, and the default encoding of zero is supposed to be the 3187 // 32-bit version. 3188 if (SplatBits == 0) 3189 SplatBitSize = 32; 3190 3191 switch (SplatBitSize) { 3192 case 8: 3193 if (type != VMOVModImm) 3194 return SDValue(); 3195 // Any 1-byte value is OK. Op=0, Cmode=1110. 3196 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3197 OpCmode = 0xe; 3198 Imm = SplatBits; 3199 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3200 break; 3201 3202 case 16: 3203 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3204 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3205 if ((SplatBits & ~0xff) == 0) { 3206 // Value = 0x00nn: Op=x, Cmode=100x. 3207 OpCmode = 0x8; 3208 Imm = SplatBits; 3209 break; 3210 } 3211 if ((SplatBits & ~0xff00) == 0) { 3212 // Value = 0xnn00: Op=x, Cmode=101x. 3213 OpCmode = 0xa; 3214 Imm = SplatBits >> 8; 3215 break; 3216 } 3217 return SDValue(); 3218 3219 case 32: 3220 // NEON's 32-bit VMOV supports splat values where: 3221 // * only one byte is nonzero, or 3222 // * the least significant byte is 0xff and the second byte is nonzero, or 3223 // * the least significant 2 bytes are 0xff and the third is nonzero. 3224 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3225 if ((SplatBits & ~0xff) == 0) { 3226 // Value = 0x000000nn: Op=x, Cmode=000x. 3227 OpCmode = 0; 3228 Imm = SplatBits; 3229 break; 3230 } 3231 if ((SplatBits & ~0xff00) == 0) { 3232 // Value = 0x0000nn00: Op=x, Cmode=001x. 3233 OpCmode = 0x2; 3234 Imm = SplatBits >> 8; 3235 break; 3236 } 3237 if ((SplatBits & ~0xff0000) == 0) { 3238 // Value = 0x00nn0000: Op=x, Cmode=010x. 3239 OpCmode = 0x4; 3240 Imm = SplatBits >> 16; 3241 break; 3242 } 3243 if ((SplatBits & ~0xff000000) == 0) { 3244 // Value = 0xnn000000: Op=x, Cmode=011x. 3245 OpCmode = 0x6; 3246 Imm = SplatBits >> 24; 3247 break; 3248 } 3249 3250 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3251 if (type == OtherModImm) return SDValue(); 3252 3253 if ((SplatBits & ~0xffff) == 0 && 3254 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3255 // Value = 0x0000nnff: Op=x, Cmode=1100. 3256 OpCmode = 0xc; 3257 Imm = SplatBits >> 8; 3258 SplatBits |= 0xff; 3259 break; 3260 } 3261 3262 if ((SplatBits & ~0xffffff) == 0 && 3263 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3264 // Value = 0x00nnffff: Op=x, Cmode=1101. 3265 OpCmode = 0xd; 3266 Imm = SplatBits >> 16; 3267 SplatBits |= 0xffff; 3268 break; 3269 } 3270 3271 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3272 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3273 // VMOV.I32. A (very) minor optimization would be to replicate the value 3274 // and fall through here to test for a valid 64-bit splat. But, then the 3275 // caller would also need to check and handle the change in size. 3276 return SDValue(); 3277 3278 case 64: { 3279 if (type != VMOVModImm) 3280 return SDValue(); 3281 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3282 uint64_t BitMask = 0xff; 3283 uint64_t Val = 0; 3284 unsigned ImmMask = 1; 3285 Imm = 0; 3286 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3287 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3288 Val |= BitMask; 3289 Imm |= ImmMask; 3290 } else if ((SplatBits & BitMask) != 0) { 3291 return SDValue(); 3292 } 3293 BitMask <<= 8; 3294 ImmMask <<= 1; 3295 } 3296 // Op=1, Cmode=1110. 3297 OpCmode = 0x1e; 3298 SplatBits = Val; 3299 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3300 break; 3301 } 3302 3303 default: 3304 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3305 return SDValue(); 3306 } 3307 3308 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3309 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3310} 3311 3312static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3313 bool &ReverseVEXT, unsigned &Imm) { 3314 unsigned NumElts = VT.getVectorNumElements(); 3315 ReverseVEXT = false; 3316 3317 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3318 if (M[0] < 0) 3319 return false; 3320 3321 Imm = M[0]; 3322 3323 // If this is a VEXT shuffle, the immediate value is the index of the first 3324 // element. The other shuffle indices must be the successive elements after 3325 // the first one. 3326 unsigned ExpectedElt = Imm; 3327 for (unsigned i = 1; i < NumElts; ++i) { 3328 // Increment the expected index. If it wraps around, it may still be 3329 // a VEXT but the source vectors must be swapped. 3330 ExpectedElt += 1; 3331 if (ExpectedElt == NumElts * 2) { 3332 ExpectedElt = 0; 3333 ReverseVEXT = true; 3334 } 3335 3336 if (M[i] < 0) continue; // ignore UNDEF indices 3337 if (ExpectedElt != static_cast<unsigned>(M[i])) 3338 return false; 3339 } 3340 3341 // Adjust the index value if the source operands will be swapped. 3342 if (ReverseVEXT) 3343 Imm -= NumElts; 3344 3345 return true; 3346} 3347 3348/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3349/// instruction with the specified blocksize. (The order of the elements 3350/// within each block of the vector is reversed.) 3351static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3352 unsigned BlockSize) { 3353 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3354 "Only possible block sizes for VREV are: 16, 32, 64"); 3355 3356 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3357 if (EltSz == 64) 3358 return false; 3359 3360 unsigned NumElts = VT.getVectorNumElements(); 3361 unsigned BlockElts = M[0] + 1; 3362 // If the first shuffle index is UNDEF, be optimistic. 3363 if (M[0] < 0) 3364 BlockElts = BlockSize / EltSz; 3365 3366 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3367 return false; 3368 3369 for (unsigned i = 0; i < NumElts; ++i) { 3370 if (M[i] < 0) continue; // ignore UNDEF indices 3371 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3372 return false; 3373 } 3374 3375 return true; 3376} 3377 3378static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3379 unsigned &WhichResult) { 3380 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3381 if (EltSz == 64) 3382 return false; 3383 3384 unsigned NumElts = VT.getVectorNumElements(); 3385 WhichResult = (M[0] == 0 ? 0 : 1); 3386 for (unsigned i = 0; i < NumElts; i += 2) { 3387 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3388 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3389 return false; 3390 } 3391 return true; 3392} 3393 3394/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3395/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3396/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3397static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3398 unsigned &WhichResult) { 3399 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3400 if (EltSz == 64) 3401 return false; 3402 3403 unsigned NumElts = VT.getVectorNumElements(); 3404 WhichResult = (M[0] == 0 ? 0 : 1); 3405 for (unsigned i = 0; i < NumElts; i += 2) { 3406 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3407 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3408 return false; 3409 } 3410 return true; 3411} 3412 3413static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3414 unsigned &WhichResult) { 3415 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3416 if (EltSz == 64) 3417 return false; 3418 3419 unsigned NumElts = VT.getVectorNumElements(); 3420 WhichResult = (M[0] == 0 ? 0 : 1); 3421 for (unsigned i = 0; i != NumElts; ++i) { 3422 if (M[i] < 0) continue; // ignore UNDEF indices 3423 if ((unsigned) M[i] != 2 * i + WhichResult) 3424 return false; 3425 } 3426 3427 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3428 if (VT.is64BitVector() && EltSz == 32) 3429 return false; 3430 3431 return true; 3432} 3433 3434/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3435/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3436/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3437static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3438 unsigned &WhichResult) { 3439 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3440 if (EltSz == 64) 3441 return false; 3442 3443 unsigned Half = VT.getVectorNumElements() / 2; 3444 WhichResult = (M[0] == 0 ? 0 : 1); 3445 for (unsigned j = 0; j != 2; ++j) { 3446 unsigned Idx = WhichResult; 3447 for (unsigned i = 0; i != Half; ++i) { 3448 int MIdx = M[i + j * Half]; 3449 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3450 return false; 3451 Idx += 2; 3452 } 3453 } 3454 3455 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3456 if (VT.is64BitVector() && EltSz == 32) 3457 return false; 3458 3459 return true; 3460} 3461 3462static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3463 unsigned &WhichResult) { 3464 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3465 if (EltSz == 64) 3466 return false; 3467 3468 unsigned NumElts = VT.getVectorNumElements(); 3469 WhichResult = (M[0] == 0 ? 0 : 1); 3470 unsigned Idx = WhichResult * NumElts / 2; 3471 for (unsigned i = 0; i != NumElts; i += 2) { 3472 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3473 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3474 return false; 3475 Idx += 1; 3476 } 3477 3478 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3479 if (VT.is64BitVector() && EltSz == 32) 3480 return false; 3481 3482 return true; 3483} 3484 3485/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3486/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3487/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3488static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3489 unsigned &WhichResult) { 3490 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3491 if (EltSz == 64) 3492 return false; 3493 3494 unsigned NumElts = VT.getVectorNumElements(); 3495 WhichResult = (M[0] == 0 ? 0 : 1); 3496 unsigned Idx = WhichResult * NumElts / 2; 3497 for (unsigned i = 0; i != NumElts; i += 2) { 3498 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3499 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3500 return false; 3501 Idx += 1; 3502 } 3503 3504 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3505 if (VT.is64BitVector() && EltSz == 32) 3506 return false; 3507 3508 return true; 3509} 3510 3511// If N is an integer constant that can be moved into a register in one 3512// instruction, return an SDValue of such a constant (will become a MOV 3513// instruction). Otherwise return null. 3514static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3515 const ARMSubtarget *ST, DebugLoc dl) { 3516 uint64_t Val; 3517 if (!isa<ConstantSDNode>(N)) 3518 return SDValue(); 3519 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3520 3521 if (ST->isThumb1Only()) { 3522 if (Val <= 255 || ~Val <= 255) 3523 return DAG.getConstant(Val, MVT::i32); 3524 } else { 3525 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3526 return DAG.getConstant(Val, MVT::i32); 3527 } 3528 return SDValue(); 3529} 3530 3531// If this is a case we can't handle, return null and let the default 3532// expansion code take care of it. 3533static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3534 const ARMSubtarget *ST) { 3535 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3536 DebugLoc dl = Op.getDebugLoc(); 3537 EVT VT = Op.getValueType(); 3538 3539 APInt SplatBits, SplatUndef; 3540 unsigned SplatBitSize; 3541 bool HasAnyUndefs; 3542 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3543 if (SplatBitSize <= 64) { 3544 // Check if an immediate VMOV works. 3545 EVT VmovVT; 3546 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3547 SplatUndef.getZExtValue(), SplatBitSize, 3548 DAG, VmovVT, VT.is128BitVector(), 3549 VMOVModImm); 3550 if (Val.getNode()) { 3551 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3552 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3553 } 3554 3555 // Try an immediate VMVN. 3556 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3557 ((1LL << SplatBitSize) - 1)); 3558 Val = isNEONModifiedImm(NegatedImm, 3559 SplatUndef.getZExtValue(), SplatBitSize, 3560 DAG, VmovVT, VT.is128BitVector(), 3561 VMVNModImm); 3562 if (Val.getNode()) { 3563 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3564 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3565 } 3566 } 3567 } 3568 3569 // Scan through the operands to see if only one value is used. 3570 unsigned NumElts = VT.getVectorNumElements(); 3571 bool isOnlyLowElement = true; 3572 bool usesOnlyOneValue = true; 3573 bool isConstant = true; 3574 SDValue Value; 3575 for (unsigned i = 0; i < NumElts; ++i) { 3576 SDValue V = Op.getOperand(i); 3577 if (V.getOpcode() == ISD::UNDEF) 3578 continue; 3579 if (i > 0) 3580 isOnlyLowElement = false; 3581 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3582 isConstant = false; 3583 3584 if (!Value.getNode()) 3585 Value = V; 3586 else if (V != Value) 3587 usesOnlyOneValue = false; 3588 } 3589 3590 if (!Value.getNode()) 3591 return DAG.getUNDEF(VT); 3592 3593 if (isOnlyLowElement) 3594 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3595 3596 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3597 3598 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3599 // i32 and try again. 3600 if (usesOnlyOneValue && EltSize <= 32) { 3601 if (!isConstant) 3602 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3603 if (VT.getVectorElementType().isFloatingPoint()) { 3604 SmallVector<SDValue, 8> Ops; 3605 for (unsigned i = 0; i < NumElts; ++i) 3606 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3607 Op.getOperand(i))); 3608 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3609 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3610 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3611 if (Val.getNode()) 3612 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3613 } 3614 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3615 if (Val.getNode()) 3616 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3617 } 3618 3619 // If all elements are constants and the case above didn't get hit, fall back 3620 // to the default expansion, which will generate a load from the constant 3621 // pool. 3622 if (isConstant) 3623 return SDValue(); 3624 3625 // Vectors with 32- or 64-bit elements can be built by directly assigning 3626 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3627 // will be legalized. 3628 if (EltSize >= 32) { 3629 // Do the expansion with floating-point types, since that is what the VFP 3630 // registers are defined to use, and since i64 is not legal. 3631 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3632 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3633 SmallVector<SDValue, 8> Ops; 3634 for (unsigned i = 0; i < NumElts; ++i) 3635 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 3636 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3637 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3638 } 3639 3640 return SDValue(); 3641} 3642 3643/// isShuffleMaskLegal - Targets can use this to indicate that they only 3644/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 3645/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 3646/// are assumed to be legal. 3647bool 3648ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 3649 EVT VT) const { 3650 if (VT.getVectorNumElements() == 4 && 3651 (VT.is128BitVector() || VT.is64BitVector())) { 3652 unsigned PFIndexes[4]; 3653 for (unsigned i = 0; i != 4; ++i) { 3654 if (M[i] < 0) 3655 PFIndexes[i] = 8; 3656 else 3657 PFIndexes[i] = M[i]; 3658 } 3659 3660 // Compute the index in the perfect shuffle table. 3661 unsigned PFTableIndex = 3662 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3663 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3664 unsigned Cost = (PFEntry >> 30); 3665 3666 if (Cost <= 4) 3667 return true; 3668 } 3669 3670 bool ReverseVEXT; 3671 unsigned Imm, WhichResult; 3672 3673 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3674 return (EltSize >= 32 || 3675 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 3676 isVREVMask(M, VT, 64) || 3677 isVREVMask(M, VT, 32) || 3678 isVREVMask(M, VT, 16) || 3679 isVEXTMask(M, VT, ReverseVEXT, Imm) || 3680 isVTRNMask(M, VT, WhichResult) || 3681 isVUZPMask(M, VT, WhichResult) || 3682 isVZIPMask(M, VT, WhichResult) || 3683 isVTRN_v_undef_Mask(M, VT, WhichResult) || 3684 isVUZP_v_undef_Mask(M, VT, WhichResult) || 3685 isVZIP_v_undef_Mask(M, VT, WhichResult)); 3686} 3687 3688/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3689/// the specified operations to build the shuffle. 3690static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3691 SDValue RHS, SelectionDAG &DAG, 3692 DebugLoc dl) { 3693 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3694 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3695 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3696 3697 enum { 3698 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3699 OP_VREV, 3700 OP_VDUP0, 3701 OP_VDUP1, 3702 OP_VDUP2, 3703 OP_VDUP3, 3704 OP_VEXT1, 3705 OP_VEXT2, 3706 OP_VEXT3, 3707 OP_VUZPL, // VUZP, left result 3708 OP_VUZPR, // VUZP, right result 3709 OP_VZIPL, // VZIP, left result 3710 OP_VZIPR, // VZIP, right result 3711 OP_VTRNL, // VTRN, left result 3712 OP_VTRNR // VTRN, right result 3713 }; 3714 3715 if (OpNum == OP_COPY) { 3716 if (LHSID == (1*9+2)*9+3) return LHS; 3717 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3718 return RHS; 3719 } 3720 3721 SDValue OpLHS, OpRHS; 3722 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 3723 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 3724 EVT VT = OpLHS.getValueType(); 3725 3726 switch (OpNum) { 3727 default: llvm_unreachable("Unknown shuffle opcode!"); 3728 case OP_VREV: 3729 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 3730 case OP_VDUP0: 3731 case OP_VDUP1: 3732 case OP_VDUP2: 3733 case OP_VDUP3: 3734 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 3735 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 3736 case OP_VEXT1: 3737 case OP_VEXT2: 3738 case OP_VEXT3: 3739 return DAG.getNode(ARMISD::VEXT, dl, VT, 3740 OpLHS, OpRHS, 3741 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 3742 case OP_VUZPL: 3743 case OP_VUZPR: 3744 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3745 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 3746 case OP_VZIPL: 3747 case OP_VZIPR: 3748 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3749 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 3750 case OP_VTRNL: 3751 case OP_VTRNR: 3752 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3753 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 3754 } 3755} 3756 3757static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 3758 SDValue V1 = Op.getOperand(0); 3759 SDValue V2 = Op.getOperand(1); 3760 DebugLoc dl = Op.getDebugLoc(); 3761 EVT VT = Op.getValueType(); 3762 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 3763 SmallVector<int, 8> ShuffleMask; 3764 3765 // Convert shuffles that are directly supported on NEON to target-specific 3766 // DAG nodes, instead of keeping them as shuffles and matching them again 3767 // during code selection. This is more efficient and avoids the possibility 3768 // of inconsistencies between legalization and selection. 3769 // FIXME: floating-point vectors should be canonicalized to integer vectors 3770 // of the same time so that they get CSEd properly. 3771 SVN->getMask(ShuffleMask); 3772 3773 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3774 if (EltSize <= 32) { 3775 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 3776 int Lane = SVN->getSplatIndex(); 3777 // If this is undef splat, generate it via "just" vdup, if possible. 3778 if (Lane == -1) Lane = 0; 3779 3780 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 3781 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 3782 } 3783 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 3784 DAG.getConstant(Lane, MVT::i32)); 3785 } 3786 3787 bool ReverseVEXT; 3788 unsigned Imm; 3789 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 3790 if (ReverseVEXT) 3791 std::swap(V1, V2); 3792 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 3793 DAG.getConstant(Imm, MVT::i32)); 3794 } 3795 3796 if (isVREVMask(ShuffleMask, VT, 64)) 3797 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 3798 if (isVREVMask(ShuffleMask, VT, 32)) 3799 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 3800 if (isVREVMask(ShuffleMask, VT, 16)) 3801 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 3802 3803 // Check for Neon shuffles that modify both input vectors in place. 3804 // If both results are used, i.e., if there are two shuffles with the same 3805 // source operands and with masks corresponding to both results of one of 3806 // these operations, DAG memoization will ensure that a single node is 3807 // used for both shuffles. 3808 unsigned WhichResult; 3809 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 3810 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3811 V1, V2).getValue(WhichResult); 3812 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 3813 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3814 V1, V2).getValue(WhichResult); 3815 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 3816 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3817 V1, V2).getValue(WhichResult); 3818 3819 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3820 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3821 V1, V1).getValue(WhichResult); 3822 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3823 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3824 V1, V1).getValue(WhichResult); 3825 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3826 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3827 V1, V1).getValue(WhichResult); 3828 } 3829 3830 // If the shuffle is not directly supported and it has 4 elements, use 3831 // the PerfectShuffle-generated table to synthesize it from other shuffles. 3832 unsigned NumElts = VT.getVectorNumElements(); 3833 if (NumElts == 4) { 3834 unsigned PFIndexes[4]; 3835 for (unsigned i = 0; i != 4; ++i) { 3836 if (ShuffleMask[i] < 0) 3837 PFIndexes[i] = 8; 3838 else 3839 PFIndexes[i] = ShuffleMask[i]; 3840 } 3841 3842 // Compute the index in the perfect shuffle table. 3843 unsigned PFTableIndex = 3844 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3845 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3846 unsigned Cost = (PFEntry >> 30); 3847 3848 if (Cost <= 4) 3849 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 3850 } 3851 3852 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 3853 if (EltSize >= 32) { 3854 // Do the expansion with floating-point types, since that is what the VFP 3855 // registers are defined to use, and since i64 is not legal. 3856 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3857 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3858 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 3859 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 3860 SmallVector<SDValue, 8> Ops; 3861 for (unsigned i = 0; i < NumElts; ++i) { 3862 if (ShuffleMask[i] < 0) 3863 Ops.push_back(DAG.getUNDEF(EltVT)); 3864 else 3865 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3866 ShuffleMask[i] < (int)NumElts ? V1 : V2, 3867 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 3868 MVT::i32))); 3869 } 3870 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3871 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3872 } 3873 3874 return SDValue(); 3875} 3876 3877static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 3878 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 3879 SDValue Lane = Op.getOperand(1); 3880 if (!isa<ConstantSDNode>(Lane)) 3881 return SDValue(); 3882 3883 SDValue Vec = Op.getOperand(0); 3884 if (Op.getValueType() == MVT::i32 && 3885 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 3886 DebugLoc dl = Op.getDebugLoc(); 3887 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 3888 } 3889 3890 return Op; 3891} 3892 3893static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 3894 // The only time a CONCAT_VECTORS operation can have legal types is when 3895 // two 64-bit vectors are concatenated to a 128-bit vector. 3896 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 3897 "unexpected CONCAT_VECTORS"); 3898 DebugLoc dl = Op.getDebugLoc(); 3899 SDValue Val = DAG.getUNDEF(MVT::v2f64); 3900 SDValue Op0 = Op.getOperand(0); 3901 SDValue Op1 = Op.getOperand(1); 3902 if (Op0.getOpcode() != ISD::UNDEF) 3903 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3904 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 3905 DAG.getIntPtrConstant(0)); 3906 if (Op1.getOpcode() != ISD::UNDEF) 3907 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3908 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 3909 DAG.getIntPtrConstant(1)); 3910 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 3911} 3912 3913/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 3914/// element has been zero/sign-extended, depending on the isSigned parameter, 3915/// from an integer type half its size. 3916static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 3917 bool isSigned) { 3918 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 3919 EVT VT = N->getValueType(0); 3920 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 3921 SDNode *BVN = N->getOperand(0).getNode(); 3922 if (BVN->getValueType(0) != MVT::v4i32 || 3923 BVN->getOpcode() != ISD::BUILD_VECTOR) 3924 return false; 3925 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 3926 unsigned HiElt = 1 - LoElt; 3927 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 3928 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 3929 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 3930 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 3931 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 3932 return false; 3933 if (isSigned) { 3934 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 3935 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 3936 return true; 3937 } else { 3938 if (Hi0->isNullValue() && Hi1->isNullValue()) 3939 return true; 3940 } 3941 return false; 3942 } 3943 3944 if (N->getOpcode() != ISD::BUILD_VECTOR) 3945 return false; 3946 3947 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 3948 SDNode *Elt = N->getOperand(i).getNode(); 3949 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 3950 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3951 unsigned HalfSize = EltSize / 2; 3952 if (isSigned) { 3953 int64_t SExtVal = C->getSExtValue(); 3954 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 3955 return false; 3956 } else { 3957 if ((C->getZExtValue() >> HalfSize) != 0) 3958 return false; 3959 } 3960 continue; 3961 } 3962 return false; 3963 } 3964 3965 return true; 3966} 3967 3968/// isSignExtended - Check if a node is a vector value that is sign-extended 3969/// or a constant BUILD_VECTOR with sign-extended elements. 3970static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 3971 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 3972 return true; 3973 if (isExtendedBUILD_VECTOR(N, DAG, true)) 3974 return true; 3975 return false; 3976} 3977 3978/// isZeroExtended - Check if a node is a vector value that is zero-extended 3979/// or a constant BUILD_VECTOR with zero-extended elements. 3980static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 3981 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 3982 return true; 3983 if (isExtendedBUILD_VECTOR(N, DAG, false)) 3984 return true; 3985 return false; 3986} 3987 3988/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 3989/// load, or BUILD_VECTOR with extended elements, return the unextended value. 3990static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 3991 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 3992 return N->getOperand(0); 3993 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 3994 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 3995 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 3996 LD->isNonTemporal(), LD->getAlignment()); 3997 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 3998 // have been legalized as a BITCAST from v4i32. 3999 if (N->getOpcode() == ISD::BITCAST) { 4000 SDNode *BVN = N->getOperand(0).getNode(); 4001 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4002 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4003 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4004 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4005 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4006 } 4007 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4008 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4009 EVT VT = N->getValueType(0); 4010 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4011 unsigned NumElts = VT.getVectorNumElements(); 4012 MVT TruncVT = MVT::getIntegerVT(EltSize); 4013 SmallVector<SDValue, 8> Ops; 4014 for (unsigned i = 0; i != NumElts; ++i) { 4015 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4016 const APInt &CInt = C->getAPIntValue(); 4017 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4018 } 4019 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4020 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4021} 4022 4023static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4024 // Multiplications are only custom-lowered for 128-bit vectors so that 4025 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4026 EVT VT = Op.getValueType(); 4027 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4028 SDNode *N0 = Op.getOperand(0).getNode(); 4029 SDNode *N1 = Op.getOperand(1).getNode(); 4030 unsigned NewOpc = 0; 4031 if (isSignExtended(N0, DAG) && isSignExtended(N1, DAG)) 4032 NewOpc = ARMISD::VMULLs; 4033 else if (isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG)) 4034 NewOpc = ARMISD::VMULLu; 4035 else if (VT == MVT::v2i64) 4036 // Fall through to expand this. It is not legal. 4037 return SDValue(); 4038 else 4039 // Other vector multiplications are legal. 4040 return Op; 4041 4042 // Legalize to a VMULL instruction. 4043 DebugLoc DL = Op.getDebugLoc(); 4044 SDValue Op0 = SkipExtension(N0, DAG); 4045 SDValue Op1 = SkipExtension(N1, DAG); 4046 4047 assert(Op0.getValueType().is64BitVector() && 4048 Op1.getValueType().is64BitVector() && 4049 "unexpected types for extended operands to VMULL"); 4050 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4051} 4052 4053SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4054 switch (Op.getOpcode()) { 4055 default: llvm_unreachable("Don't know how to custom lower this!"); 4056 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4057 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4058 case ISD::GlobalAddress: 4059 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4060 LowerGlobalAddressELF(Op, DAG); 4061 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4062 case ISD::SELECT: return LowerSELECT(Op, DAG); 4063 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4064 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4065 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4066 case ISD::VASTART: return LowerVASTART(Op, DAG); 4067 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4068 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4069 case ISD::SINT_TO_FP: 4070 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4071 case ISD::FP_TO_SINT: 4072 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4073 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4074 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4075 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4076 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4077 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4078 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4079 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4080 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4081 Subtarget); 4082 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4083 case ISD::SHL: 4084 case ISD::SRL: 4085 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4086 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4087 case ISD::SRL_PARTS: 4088 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4089 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4090 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 4091 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4092 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4093 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4094 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4095 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4096 case ISD::MUL: return LowerMUL(Op, DAG); 4097 } 4098 return SDValue(); 4099} 4100 4101/// ReplaceNodeResults - Replace the results of node with an illegal result 4102/// type with new values built out of custom code. 4103void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4104 SmallVectorImpl<SDValue>&Results, 4105 SelectionDAG &DAG) const { 4106 SDValue Res; 4107 switch (N->getOpcode()) { 4108 default: 4109 llvm_unreachable("Don't know how to custom expand this!"); 4110 break; 4111 case ISD::BITCAST: 4112 Res = ExpandBITCAST(N, DAG); 4113 break; 4114 case ISD::SRL: 4115 case ISD::SRA: 4116 Res = Expand64BitShift(N, DAG, Subtarget); 4117 break; 4118 } 4119 if (Res.getNode()) 4120 Results.push_back(Res); 4121} 4122 4123//===----------------------------------------------------------------------===// 4124// ARM Scheduler Hooks 4125//===----------------------------------------------------------------------===// 4126 4127MachineBasicBlock * 4128ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 4129 MachineBasicBlock *BB, 4130 unsigned Size) const { 4131 unsigned dest = MI->getOperand(0).getReg(); 4132 unsigned ptr = MI->getOperand(1).getReg(); 4133 unsigned oldval = MI->getOperand(2).getReg(); 4134 unsigned newval = MI->getOperand(3).getReg(); 4135 unsigned scratch = BB->getParent()->getRegInfo() 4136 .createVirtualRegister(ARM::GPRRegisterClass); 4137 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4138 DebugLoc dl = MI->getDebugLoc(); 4139 bool isThumb2 = Subtarget->isThumb2(); 4140 4141 unsigned ldrOpc, strOpc; 4142 switch (Size) { 4143 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4144 case 1: 4145 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4146 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB; 4147 break; 4148 case 2: 4149 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4150 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4151 break; 4152 case 4: 4153 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4154 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4155 break; 4156 } 4157 4158 MachineFunction *MF = BB->getParent(); 4159 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4160 MachineFunction::iterator It = BB; 4161 ++It; // insert the new blocks after the current block 4162 4163 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4164 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4165 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4166 MF->insert(It, loop1MBB); 4167 MF->insert(It, loop2MBB); 4168 MF->insert(It, exitMBB); 4169 4170 // Transfer the remainder of BB and its successor edges to exitMBB. 4171 exitMBB->splice(exitMBB->begin(), BB, 4172 llvm::next(MachineBasicBlock::iterator(MI)), 4173 BB->end()); 4174 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4175 4176 // thisMBB: 4177 // ... 4178 // fallthrough --> loop1MBB 4179 BB->addSuccessor(loop1MBB); 4180 4181 // loop1MBB: 4182 // ldrex dest, [ptr] 4183 // cmp dest, oldval 4184 // bne exitMBB 4185 BB = loop1MBB; 4186 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4187 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4188 .addReg(dest).addReg(oldval)); 4189 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4190 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4191 BB->addSuccessor(loop2MBB); 4192 BB->addSuccessor(exitMBB); 4193 4194 // loop2MBB: 4195 // strex scratch, newval, [ptr] 4196 // cmp scratch, #0 4197 // bne loop1MBB 4198 BB = loop2MBB; 4199 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 4200 .addReg(ptr)); 4201 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4202 .addReg(scratch).addImm(0)); 4203 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4204 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4205 BB->addSuccessor(loop1MBB); 4206 BB->addSuccessor(exitMBB); 4207 4208 // exitMBB: 4209 // ... 4210 BB = exitMBB; 4211 4212 MI->eraseFromParent(); // The instruction is gone now. 4213 4214 return BB; 4215} 4216 4217MachineBasicBlock * 4218ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4219 unsigned Size, unsigned BinOpcode) const { 4220 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4221 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4222 4223 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4224 MachineFunction *MF = BB->getParent(); 4225 MachineFunction::iterator It = BB; 4226 ++It; 4227 4228 unsigned dest = MI->getOperand(0).getReg(); 4229 unsigned ptr = MI->getOperand(1).getReg(); 4230 unsigned incr = MI->getOperand(2).getReg(); 4231 DebugLoc dl = MI->getDebugLoc(); 4232 4233 bool isThumb2 = Subtarget->isThumb2(); 4234 unsigned ldrOpc, strOpc; 4235 switch (Size) { 4236 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4237 case 1: 4238 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4239 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4240 break; 4241 case 2: 4242 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4243 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4244 break; 4245 case 4: 4246 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4247 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4248 break; 4249 } 4250 4251 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4252 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4253 MF->insert(It, loopMBB); 4254 MF->insert(It, exitMBB); 4255 4256 // Transfer the remainder of BB and its successor edges to exitMBB. 4257 exitMBB->splice(exitMBB->begin(), BB, 4258 llvm::next(MachineBasicBlock::iterator(MI)), 4259 BB->end()); 4260 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4261 4262 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4263 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4264 unsigned scratch2 = (!BinOpcode) ? incr : 4265 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4266 4267 // thisMBB: 4268 // ... 4269 // fallthrough --> loopMBB 4270 BB->addSuccessor(loopMBB); 4271 4272 // loopMBB: 4273 // ldrex dest, ptr 4274 // <binop> scratch2, dest, incr 4275 // strex scratch, scratch2, ptr 4276 // cmp scratch, #0 4277 // bne- loopMBB 4278 // fallthrough --> exitMBB 4279 BB = loopMBB; 4280 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4281 if (BinOpcode) { 4282 // operand order needs to go the other way for NAND 4283 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 4284 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4285 addReg(incr).addReg(dest)).addReg(0); 4286 else 4287 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4288 addReg(dest).addReg(incr)).addReg(0); 4289 } 4290 4291 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 4292 .addReg(ptr)); 4293 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4294 .addReg(scratch).addImm(0)); 4295 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4296 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4297 4298 BB->addSuccessor(loopMBB); 4299 BB->addSuccessor(exitMBB); 4300 4301 // exitMBB: 4302 // ... 4303 BB = exitMBB; 4304 4305 MI->eraseFromParent(); // The instruction is gone now. 4306 4307 return BB; 4308} 4309 4310static 4311MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 4312 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 4313 E = MBB->succ_end(); I != E; ++I) 4314 if (*I != Succ) 4315 return *I; 4316 llvm_unreachable("Expecting a BB with two successors!"); 4317} 4318 4319MachineBasicBlock * 4320ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4321 MachineBasicBlock *BB) const { 4322 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4323 DebugLoc dl = MI->getDebugLoc(); 4324 bool isThumb2 = Subtarget->isThumb2(); 4325 switch (MI->getOpcode()) { 4326 default: 4327 MI->dump(); 4328 llvm_unreachable("Unexpected instr type to insert"); 4329 4330 case ARM::ATOMIC_LOAD_ADD_I8: 4331 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4332 case ARM::ATOMIC_LOAD_ADD_I16: 4333 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4334 case ARM::ATOMIC_LOAD_ADD_I32: 4335 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4336 4337 case ARM::ATOMIC_LOAD_AND_I8: 4338 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4339 case ARM::ATOMIC_LOAD_AND_I16: 4340 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4341 case ARM::ATOMIC_LOAD_AND_I32: 4342 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4343 4344 case ARM::ATOMIC_LOAD_OR_I8: 4345 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4346 case ARM::ATOMIC_LOAD_OR_I16: 4347 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4348 case ARM::ATOMIC_LOAD_OR_I32: 4349 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4350 4351 case ARM::ATOMIC_LOAD_XOR_I8: 4352 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4353 case ARM::ATOMIC_LOAD_XOR_I16: 4354 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4355 case ARM::ATOMIC_LOAD_XOR_I32: 4356 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4357 4358 case ARM::ATOMIC_LOAD_NAND_I8: 4359 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4360 case ARM::ATOMIC_LOAD_NAND_I16: 4361 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4362 case ARM::ATOMIC_LOAD_NAND_I32: 4363 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4364 4365 case ARM::ATOMIC_LOAD_SUB_I8: 4366 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4367 case ARM::ATOMIC_LOAD_SUB_I16: 4368 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4369 case ARM::ATOMIC_LOAD_SUB_I32: 4370 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4371 4372 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 4373 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 4374 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 4375 4376 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 4377 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 4378 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 4379 4380 case ARM::tMOVCCr_pseudo: { 4381 // To "insert" a SELECT_CC instruction, we actually have to insert the 4382 // diamond control-flow pattern. The incoming instruction knows the 4383 // destination vreg to set, the condition code register to branch on, the 4384 // true/false values to select between, and a branch opcode to use. 4385 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4386 MachineFunction::iterator It = BB; 4387 ++It; 4388 4389 // thisMBB: 4390 // ... 4391 // TrueVal = ... 4392 // cmpTY ccX, r1, r2 4393 // bCC copy1MBB 4394 // fallthrough --> copy0MBB 4395 MachineBasicBlock *thisMBB = BB; 4396 MachineFunction *F = BB->getParent(); 4397 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4398 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4399 F->insert(It, copy0MBB); 4400 F->insert(It, sinkMBB); 4401 4402 // Transfer the remainder of BB and its successor edges to sinkMBB. 4403 sinkMBB->splice(sinkMBB->begin(), BB, 4404 llvm::next(MachineBasicBlock::iterator(MI)), 4405 BB->end()); 4406 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 4407 4408 BB->addSuccessor(copy0MBB); 4409 BB->addSuccessor(sinkMBB); 4410 4411 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 4412 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 4413 4414 // copy0MBB: 4415 // %FalseValue = ... 4416 // # fallthrough to sinkMBB 4417 BB = copy0MBB; 4418 4419 // Update machine-CFG edges 4420 BB->addSuccessor(sinkMBB); 4421 4422 // sinkMBB: 4423 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4424 // ... 4425 BB = sinkMBB; 4426 BuildMI(*BB, BB->begin(), dl, 4427 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 4428 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4429 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4430 4431 MI->eraseFromParent(); // The pseudo instruction is gone now. 4432 return BB; 4433 } 4434 4435 case ARM::BCCi64: 4436 case ARM::BCCZi64: { 4437 // If there is an unconditional branch to the other successor, remove it. 4438 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 4439 4440 // Compare both parts that make up the double comparison separately for 4441 // equality. 4442 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 4443 4444 unsigned LHS1 = MI->getOperand(1).getReg(); 4445 unsigned LHS2 = MI->getOperand(2).getReg(); 4446 if (RHSisZero) { 4447 AddDefaultPred(BuildMI(BB, dl, 4448 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4449 .addReg(LHS1).addImm(0)); 4450 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4451 .addReg(LHS2).addImm(0) 4452 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4453 } else { 4454 unsigned RHS1 = MI->getOperand(3).getReg(); 4455 unsigned RHS2 = MI->getOperand(4).getReg(); 4456 AddDefaultPred(BuildMI(BB, dl, 4457 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4458 .addReg(LHS1).addReg(RHS1)); 4459 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4460 .addReg(LHS2).addReg(RHS2) 4461 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4462 } 4463 4464 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 4465 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 4466 if (MI->getOperand(0).getImm() == ARMCC::NE) 4467 std::swap(destMBB, exitMBB); 4468 4469 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4470 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 4471 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 4472 .addMBB(exitMBB); 4473 4474 MI->eraseFromParent(); // The pseudo instruction is gone now. 4475 return BB; 4476 } 4477 } 4478} 4479 4480//===----------------------------------------------------------------------===// 4481// ARM Optimization Hooks 4482//===----------------------------------------------------------------------===// 4483 4484static 4485SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 4486 TargetLowering::DAGCombinerInfo &DCI) { 4487 SelectionDAG &DAG = DCI.DAG; 4488 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4489 EVT VT = N->getValueType(0); 4490 unsigned Opc = N->getOpcode(); 4491 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 4492 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 4493 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 4494 ISD::CondCode CC = ISD::SETCC_INVALID; 4495 4496 if (isSlctCC) { 4497 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 4498 } else { 4499 SDValue CCOp = Slct.getOperand(0); 4500 if (CCOp.getOpcode() == ISD::SETCC) 4501 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 4502 } 4503 4504 bool DoXform = false; 4505 bool InvCC = false; 4506 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 4507 "Bad input!"); 4508 4509 if (LHS.getOpcode() == ISD::Constant && 4510 cast<ConstantSDNode>(LHS)->isNullValue()) { 4511 DoXform = true; 4512 } else if (CC != ISD::SETCC_INVALID && 4513 RHS.getOpcode() == ISD::Constant && 4514 cast<ConstantSDNode>(RHS)->isNullValue()) { 4515 std::swap(LHS, RHS); 4516 SDValue Op0 = Slct.getOperand(0); 4517 EVT OpVT = isSlctCC ? Op0.getValueType() : 4518 Op0.getOperand(0).getValueType(); 4519 bool isInt = OpVT.isInteger(); 4520 CC = ISD::getSetCCInverse(CC, isInt); 4521 4522 if (!TLI.isCondCodeLegal(CC, OpVT)) 4523 return SDValue(); // Inverse operator isn't legal. 4524 4525 DoXform = true; 4526 InvCC = true; 4527 } 4528 4529 if (DoXform) { 4530 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 4531 if (isSlctCC) 4532 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 4533 Slct.getOperand(0), Slct.getOperand(1), CC); 4534 SDValue CCOp = Slct.getOperand(0); 4535 if (InvCC) 4536 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 4537 CCOp.getOperand(0), CCOp.getOperand(1), CC); 4538 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4539 CCOp, OtherOp, Result); 4540 } 4541 return SDValue(); 4542} 4543 4544/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 4545/// operands N0 and N1. This is a helper for PerformADDCombine that is 4546/// called with the default operands, and if that fails, with commuted 4547/// operands. 4548static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 4549 TargetLowering::DAGCombinerInfo &DCI) { 4550 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 4551 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 4552 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 4553 if (Result.getNode()) return Result; 4554 } 4555 return SDValue(); 4556} 4557 4558/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 4559/// 4560static SDValue PerformADDCombine(SDNode *N, 4561 TargetLowering::DAGCombinerInfo &DCI) { 4562 SDValue N0 = N->getOperand(0); 4563 SDValue N1 = N->getOperand(1); 4564 4565 // First try with the default operand order. 4566 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 4567 if (Result.getNode()) 4568 return Result; 4569 4570 // If that didn't work, try again with the operands commuted. 4571 return PerformADDCombineWithOperands(N, N1, N0, DCI); 4572} 4573 4574/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 4575/// 4576static SDValue PerformSUBCombine(SDNode *N, 4577 TargetLowering::DAGCombinerInfo &DCI) { 4578 SDValue N0 = N->getOperand(0); 4579 SDValue N1 = N->getOperand(1); 4580 4581 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 4582 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 4583 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 4584 if (Result.getNode()) return Result; 4585 } 4586 4587 return SDValue(); 4588} 4589 4590static SDValue PerformMULCombine(SDNode *N, 4591 TargetLowering::DAGCombinerInfo &DCI, 4592 const ARMSubtarget *Subtarget) { 4593 SelectionDAG &DAG = DCI.DAG; 4594 4595 if (Subtarget->isThumb1Only()) 4596 return SDValue(); 4597 4598 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 4599 return SDValue(); 4600 4601 EVT VT = N->getValueType(0); 4602 if (VT != MVT::i32) 4603 return SDValue(); 4604 4605 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4606 if (!C) 4607 return SDValue(); 4608 4609 uint64_t MulAmt = C->getZExtValue(); 4610 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 4611 ShiftAmt = ShiftAmt & (32 - 1); 4612 SDValue V = N->getOperand(0); 4613 DebugLoc DL = N->getDebugLoc(); 4614 4615 SDValue Res; 4616 MulAmt >>= ShiftAmt; 4617 if (isPowerOf2_32(MulAmt - 1)) { 4618 // (mul x, 2^N + 1) => (add (shl x, N), x) 4619 Res = DAG.getNode(ISD::ADD, DL, VT, 4620 V, DAG.getNode(ISD::SHL, DL, VT, 4621 V, DAG.getConstant(Log2_32(MulAmt-1), 4622 MVT::i32))); 4623 } else if (isPowerOf2_32(MulAmt + 1)) { 4624 // (mul x, 2^N - 1) => (sub (shl x, N), x) 4625 Res = DAG.getNode(ISD::SUB, DL, VT, 4626 DAG.getNode(ISD::SHL, DL, VT, 4627 V, DAG.getConstant(Log2_32(MulAmt+1), 4628 MVT::i32)), 4629 V); 4630 } else 4631 return SDValue(); 4632 4633 if (ShiftAmt != 0) 4634 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 4635 DAG.getConstant(ShiftAmt, MVT::i32)); 4636 4637 // Do not add new nodes to DAG combiner worklist. 4638 DCI.CombineTo(N, Res, false); 4639 return SDValue(); 4640} 4641 4642static SDValue PerformANDCombine(SDNode *N, 4643 TargetLowering::DAGCombinerInfo &DCI) { 4644 // Attempt to use immediate-form VBIC 4645 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4646 DebugLoc dl = N->getDebugLoc(); 4647 EVT VT = N->getValueType(0); 4648 SelectionDAG &DAG = DCI.DAG; 4649 4650 APInt SplatBits, SplatUndef; 4651 unsigned SplatBitSize; 4652 bool HasAnyUndefs; 4653 if (BVN && 4654 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4655 if (SplatBitSize <= 64) { 4656 EVT VbicVT; 4657 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 4658 SplatUndef.getZExtValue(), SplatBitSize, 4659 DAG, VbicVT, VT.is128BitVector(), 4660 OtherModImm); 4661 if (Val.getNode()) { 4662 SDValue Input = 4663 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 4664 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 4665 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 4666 } 4667 } 4668 } 4669 4670 return SDValue(); 4671} 4672 4673/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 4674static SDValue PerformORCombine(SDNode *N, 4675 TargetLowering::DAGCombinerInfo &DCI, 4676 const ARMSubtarget *Subtarget) { 4677 // Attempt to use immediate-form VORR 4678 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4679 DebugLoc dl = N->getDebugLoc(); 4680 EVT VT = N->getValueType(0); 4681 SelectionDAG &DAG = DCI.DAG; 4682 4683 APInt SplatBits, SplatUndef; 4684 unsigned SplatBitSize; 4685 bool HasAnyUndefs; 4686 if (BVN && Subtarget->hasNEON() && 4687 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4688 if (SplatBitSize <= 64) { 4689 EVT VorrVT; 4690 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4691 SplatUndef.getZExtValue(), SplatBitSize, 4692 DAG, VorrVT, VT.is128BitVector(), 4693 OtherModImm); 4694 if (Val.getNode()) { 4695 SDValue Input = 4696 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 4697 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 4698 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 4699 } 4700 } 4701 } 4702 4703 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 4704 // reasonable. 4705 4706 // BFI is only available on V6T2+ 4707 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 4708 return SDValue(); 4709 4710 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 4711 DebugLoc DL = N->getDebugLoc(); 4712 // 1) or (and A, mask), val => ARMbfi A, val, mask 4713 // iff (val & mask) == val 4714 // 4715 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4716 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 4717 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4718 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 4719 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4720 // (i.e., copy a bitfield value into another bitfield of the same width) 4721 if (N0.getOpcode() != ISD::AND) 4722 return SDValue(); 4723 4724 if (VT != MVT::i32) 4725 return SDValue(); 4726 4727 SDValue N00 = N0.getOperand(0); 4728 4729 // The value and the mask need to be constants so we can verify this is 4730 // actually a bitfield set. If the mask is 0xffff, we can do better 4731 // via a movt instruction, so don't use BFI in that case. 4732 SDValue MaskOp = N0.getOperand(1); 4733 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 4734 if (!MaskC) 4735 return SDValue(); 4736 unsigned Mask = MaskC->getZExtValue(); 4737 if (Mask == 0xffff) 4738 return SDValue(); 4739 SDValue Res; 4740 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 4741 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4742 if (N1C) { 4743 unsigned Val = N1C->getZExtValue(); 4744 if ((Val & ~Mask) != Val) 4745 return SDValue(); 4746 4747 if (ARM::isBitFieldInvertedMask(Mask)) { 4748 Val >>= CountTrailingZeros_32(~Mask); 4749 4750 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 4751 DAG.getConstant(Val, MVT::i32), 4752 DAG.getConstant(Mask, MVT::i32)); 4753 4754 // Do not add new nodes to DAG combiner worklist. 4755 DCI.CombineTo(N, Res, false); 4756 return SDValue(); 4757 } 4758 } else if (N1.getOpcode() == ISD::AND) { 4759 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4760 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4761 if (!N11C) 4762 return SDValue(); 4763 unsigned Mask2 = N11C->getZExtValue(); 4764 4765 if (ARM::isBitFieldInvertedMask(Mask) && 4766 ARM::isBitFieldInvertedMask(~Mask2) && 4767 (CountPopulation_32(Mask) == CountPopulation_32(~Mask2))) { 4768 // The pack halfword instruction works better for masks that fit it, 4769 // so use that when it's available. 4770 if (Subtarget->hasT2ExtractPack() && 4771 (Mask == 0xffff || Mask == 0xffff0000)) 4772 return SDValue(); 4773 // 2a 4774 unsigned lsb = CountTrailingZeros_32(Mask2); 4775 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 4776 DAG.getConstant(lsb, MVT::i32)); 4777 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 4778 DAG.getConstant(Mask, MVT::i32)); 4779 // Do not add new nodes to DAG combiner worklist. 4780 DCI.CombineTo(N, Res, false); 4781 return SDValue(); 4782 } else if (ARM::isBitFieldInvertedMask(~Mask) && 4783 ARM::isBitFieldInvertedMask(Mask2) && 4784 (CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) { 4785 // The pack halfword instruction works better for masks that fit it, 4786 // so use that when it's available. 4787 if (Subtarget->hasT2ExtractPack() && 4788 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 4789 return SDValue(); 4790 // 2b 4791 unsigned lsb = CountTrailingZeros_32(Mask); 4792 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 4793 DAG.getConstant(lsb, MVT::i32)); 4794 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 4795 DAG.getConstant(Mask2, MVT::i32)); 4796 // Do not add new nodes to DAG combiner worklist. 4797 DCI.CombineTo(N, Res, false); 4798 return SDValue(); 4799 } 4800 } 4801 4802 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 4803 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 4804 ARM::isBitFieldInvertedMask(~Mask)) { 4805 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 4806 // where lsb(mask) == #shamt and masked bits of B are known zero. 4807 SDValue ShAmt = N00.getOperand(1); 4808 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 4809 unsigned LSB = CountTrailingZeros_32(Mask); 4810 if (ShAmtC != LSB) 4811 return SDValue(); 4812 4813 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 4814 DAG.getConstant(~Mask, MVT::i32)); 4815 4816 // Do not add new nodes to DAG combiner worklist. 4817 DCI.CombineTo(N, Res, false); 4818 } 4819 4820 return SDValue(); 4821} 4822 4823/// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff 4824/// C1 & C2 == C1. 4825static SDValue PerformBFICombine(SDNode *N, 4826 TargetLowering::DAGCombinerInfo &DCI) { 4827 SDValue N1 = N->getOperand(1); 4828 if (N1.getOpcode() == ISD::AND) { 4829 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4830 if (!N11C) 4831 return SDValue(); 4832 unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 4833 unsigned Mask2 = N11C->getZExtValue(); 4834 if ((Mask & Mask2) == Mask2) 4835 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 4836 N->getOperand(0), N1.getOperand(0), 4837 N->getOperand(2)); 4838 } 4839 return SDValue(); 4840} 4841 4842/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 4843/// ARMISD::VMOVRRD. 4844static SDValue PerformVMOVRRDCombine(SDNode *N, 4845 TargetLowering::DAGCombinerInfo &DCI) { 4846 // vmovrrd(vmovdrr x, y) -> x,y 4847 SDValue InDouble = N->getOperand(0); 4848 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 4849 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 4850 return SDValue(); 4851} 4852 4853/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 4854/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 4855static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 4856 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 4857 SDValue Op0 = N->getOperand(0); 4858 SDValue Op1 = N->getOperand(1); 4859 if (Op0.getOpcode() == ISD::BITCAST) 4860 Op0 = Op0.getOperand(0); 4861 if (Op1.getOpcode() == ISD::BITCAST) 4862 Op1 = Op1.getOperand(0); 4863 if (Op0.getOpcode() == ARMISD::VMOVRRD && 4864 Op0.getNode() == Op1.getNode() && 4865 Op0.getResNo() == 0 && Op1.getResNo() == 1) 4866 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 4867 N->getValueType(0), Op0.getOperand(0)); 4868 return SDValue(); 4869} 4870 4871/// PerformSTORECombine - Target-specific dag combine xforms for 4872/// ISD::STORE. 4873static SDValue PerformSTORECombine(SDNode *N, 4874 TargetLowering::DAGCombinerInfo &DCI) { 4875 // Bitcast an i64 store extracted from a vector to f64. 4876 // Otherwise, the i64 value will be legalized to a pair of i32 values. 4877 StoreSDNode *St = cast<StoreSDNode>(N); 4878 SDValue StVal = St->getValue(); 4879 if (!ISD::isNormalStore(St) || St->isVolatile() || 4880 StVal.getValueType() != MVT::i64 || 4881 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 4882 return SDValue(); 4883 4884 SelectionDAG &DAG = DCI.DAG; 4885 DebugLoc dl = StVal.getDebugLoc(); 4886 SDValue IntVec = StVal.getOperand(0); 4887 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 4888 IntVec.getValueType().getVectorNumElements()); 4889 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 4890 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 4891 Vec, StVal.getOperand(1)); 4892 dl = N->getDebugLoc(); 4893 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 4894 // Make the DAGCombiner fold the bitcasts. 4895 DCI.AddToWorklist(Vec.getNode()); 4896 DCI.AddToWorklist(ExtElt.getNode()); 4897 DCI.AddToWorklist(V.getNode()); 4898 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 4899 St->getPointerInfo(), St->isVolatile(), 4900 St->isNonTemporal(), St->getAlignment(), 4901 St->getTBAAInfo()); 4902} 4903 4904/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 4905/// are normal, non-volatile loads. If so, it is profitable to bitcast an 4906/// i64 vector to have f64 elements, since the value can then be loaded 4907/// directly into a VFP register. 4908static bool hasNormalLoadOperand(SDNode *N) { 4909 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 4910 for (unsigned i = 0; i < NumElts; ++i) { 4911 SDNode *Elt = N->getOperand(i).getNode(); 4912 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 4913 return true; 4914 } 4915 return false; 4916} 4917 4918/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 4919/// ISD::BUILD_VECTOR. 4920static SDValue PerformBUILD_VECTORCombine(SDNode *N, 4921 TargetLowering::DAGCombinerInfo &DCI){ 4922 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 4923 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 4924 // into a pair of GPRs, which is fine when the value is used as a scalar, 4925 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 4926 SelectionDAG &DAG = DCI.DAG; 4927 if (N->getNumOperands() == 2) { 4928 SDValue RV = PerformVMOVDRRCombine(N, DAG); 4929 if (RV.getNode()) 4930 return RV; 4931 } 4932 4933 // Load i64 elements as f64 values so that type legalization does not split 4934 // them up into i32 values. 4935 EVT VT = N->getValueType(0); 4936 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 4937 return SDValue(); 4938 DebugLoc dl = N->getDebugLoc(); 4939 SmallVector<SDValue, 8> Ops; 4940 unsigned NumElts = VT.getVectorNumElements(); 4941 for (unsigned i = 0; i < NumElts; ++i) { 4942 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 4943 Ops.push_back(V); 4944 // Make the DAGCombiner fold the bitcast. 4945 DCI.AddToWorklist(V.getNode()); 4946 } 4947 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 4948 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 4949 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 4950} 4951 4952/// PerformInsertEltCombine - Target-specific dag combine xforms for 4953/// ISD::INSERT_VECTOR_ELT. 4954static SDValue PerformInsertEltCombine(SDNode *N, 4955 TargetLowering::DAGCombinerInfo &DCI) { 4956 // Bitcast an i64 load inserted into a vector to f64. 4957 // Otherwise, the i64 value will be legalized to a pair of i32 values. 4958 EVT VT = N->getValueType(0); 4959 SDNode *Elt = N->getOperand(1).getNode(); 4960 if (VT.getVectorElementType() != MVT::i64 || 4961 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 4962 return SDValue(); 4963 4964 SelectionDAG &DAG = DCI.DAG; 4965 DebugLoc dl = N->getDebugLoc(); 4966 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 4967 VT.getVectorNumElements()); 4968 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 4969 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 4970 // Make the DAGCombiner fold the bitcasts. 4971 DCI.AddToWorklist(Vec.getNode()); 4972 DCI.AddToWorklist(V.getNode()); 4973 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 4974 Vec, V, N->getOperand(2)); 4975 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 4976} 4977 4978/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 4979/// ISD::VECTOR_SHUFFLE. 4980static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 4981 // The LLVM shufflevector instruction does not require the shuffle mask 4982 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 4983 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 4984 // operands do not match the mask length, they are extended by concatenating 4985 // them with undef vectors. That is probably the right thing for other 4986 // targets, but for NEON it is better to concatenate two double-register 4987 // size vector operands into a single quad-register size vector. Do that 4988 // transformation here: 4989 // shuffle(concat(v1, undef), concat(v2, undef)) -> 4990 // shuffle(concat(v1, v2), undef) 4991 SDValue Op0 = N->getOperand(0); 4992 SDValue Op1 = N->getOperand(1); 4993 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 4994 Op1.getOpcode() != ISD::CONCAT_VECTORS || 4995 Op0.getNumOperands() != 2 || 4996 Op1.getNumOperands() != 2) 4997 return SDValue(); 4998 SDValue Concat0Op1 = Op0.getOperand(1); 4999 SDValue Concat1Op1 = Op1.getOperand(1); 5000 if (Concat0Op1.getOpcode() != ISD::UNDEF || 5001 Concat1Op1.getOpcode() != ISD::UNDEF) 5002 return SDValue(); 5003 // Skip the transformation if any of the types are illegal. 5004 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5005 EVT VT = N->getValueType(0); 5006 if (!TLI.isTypeLegal(VT) || 5007 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 5008 !TLI.isTypeLegal(Concat1Op1.getValueType())) 5009 return SDValue(); 5010 5011 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 5012 Op0.getOperand(0), Op1.getOperand(0)); 5013 // Translate the shuffle mask. 5014 SmallVector<int, 16> NewMask; 5015 unsigned NumElts = VT.getVectorNumElements(); 5016 unsigned HalfElts = NumElts/2; 5017 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 5018 for (unsigned n = 0; n < NumElts; ++n) { 5019 int MaskElt = SVN->getMaskElt(n); 5020 int NewElt = -1; 5021 if (MaskElt < (int)HalfElts) 5022 NewElt = MaskElt; 5023 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 5024 NewElt = HalfElts + MaskElt - NumElts; 5025 NewMask.push_back(NewElt); 5026 } 5027 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 5028 DAG.getUNDEF(VT), NewMask.data()); 5029} 5030 5031/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 5032/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 5033/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 5034/// return true. 5035static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 5036 SelectionDAG &DAG = DCI.DAG; 5037 EVT VT = N->getValueType(0); 5038 // vldN-dup instructions only support 64-bit vectors for N > 1. 5039 if (!VT.is64BitVector()) 5040 return false; 5041 5042 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 5043 SDNode *VLD = N->getOperand(0).getNode(); 5044 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 5045 return false; 5046 unsigned NumVecs = 0; 5047 unsigned NewOpc = 0; 5048 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 5049 if (IntNo == Intrinsic::arm_neon_vld2lane) { 5050 NumVecs = 2; 5051 NewOpc = ARMISD::VLD2DUP; 5052 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 5053 NumVecs = 3; 5054 NewOpc = ARMISD::VLD3DUP; 5055 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 5056 NumVecs = 4; 5057 NewOpc = ARMISD::VLD4DUP; 5058 } else { 5059 return false; 5060 } 5061 5062 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 5063 // numbers match the load. 5064 unsigned VLDLaneNo = 5065 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 5066 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 5067 UI != UE; ++UI) { 5068 // Ignore uses of the chain result. 5069 if (UI.getUse().getResNo() == NumVecs) 5070 continue; 5071 SDNode *User = *UI; 5072 if (User->getOpcode() != ARMISD::VDUPLANE || 5073 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 5074 return false; 5075 } 5076 5077 // Create the vldN-dup node. 5078 EVT Tys[5]; 5079 unsigned n; 5080 for (n = 0; n < NumVecs; ++n) 5081 Tys[n] = VT; 5082 Tys[n] = MVT::Other; 5083 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 5084 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 5085 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 5086 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 5087 Ops, 2, VLDMemInt->getMemoryVT(), 5088 VLDMemInt->getMemOperand()); 5089 5090 // Update the uses. 5091 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 5092 UI != UE; ++UI) { 5093 unsigned ResNo = UI.getUse().getResNo(); 5094 // Ignore uses of the chain result. 5095 if (ResNo == NumVecs) 5096 continue; 5097 SDNode *User = *UI; 5098 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 5099 } 5100 5101 // Now the vldN-lane intrinsic is dead except for its chain result. 5102 // Update uses of the chain. 5103 std::vector<SDValue> VLDDupResults; 5104 for (unsigned n = 0; n < NumVecs; ++n) 5105 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 5106 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 5107 DCI.CombineTo(VLD, VLDDupResults); 5108 5109 return true; 5110} 5111 5112/// PerformVDUPLANECombine - Target-specific dag combine xforms for 5113/// ARMISD::VDUPLANE. 5114static SDValue PerformVDUPLANECombine(SDNode *N, 5115 TargetLowering::DAGCombinerInfo &DCI) { 5116 SDValue Op = N->getOperand(0); 5117 5118 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 5119 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 5120 if (CombineVLDDUP(N, DCI)) 5121 return SDValue(N, 0); 5122 5123 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 5124 // redundant. Ignore bit_converts for now; element sizes are checked below. 5125 while (Op.getOpcode() == ISD::BITCAST) 5126 Op = Op.getOperand(0); 5127 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 5128 return SDValue(); 5129 5130 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 5131 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 5132 // The canonical VMOV for a zero vector uses a 32-bit element size. 5133 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5134 unsigned EltBits; 5135 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 5136 EltSize = 8; 5137 EVT VT = N->getValueType(0); 5138 if (EltSize > VT.getVectorElementType().getSizeInBits()) 5139 return SDValue(); 5140 5141 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 5142} 5143 5144/// getVShiftImm - Check if this is a valid build_vector for the immediate 5145/// operand of a vector shift operation, where all the elements of the 5146/// build_vector must have the same constant integer value. 5147static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 5148 // Ignore bit_converts. 5149 while (Op.getOpcode() == ISD::BITCAST) 5150 Op = Op.getOperand(0); 5151 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5152 APInt SplatBits, SplatUndef; 5153 unsigned SplatBitSize; 5154 bool HasAnyUndefs; 5155 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 5156 HasAnyUndefs, ElementBits) || 5157 SplatBitSize > ElementBits) 5158 return false; 5159 Cnt = SplatBits.getSExtValue(); 5160 return true; 5161} 5162 5163/// isVShiftLImm - Check if this is a valid build_vector for the immediate 5164/// operand of a vector shift left operation. That value must be in the range: 5165/// 0 <= Value < ElementBits for a left shift; or 5166/// 0 <= Value <= ElementBits for a long left shift. 5167static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 5168 assert(VT.isVector() && "vector shift count is not a vector type"); 5169 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 5170 if (! getVShiftImm(Op, ElementBits, Cnt)) 5171 return false; 5172 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 5173} 5174 5175/// isVShiftRImm - Check if this is a valid build_vector for the immediate 5176/// operand of a vector shift right operation. For a shift opcode, the value 5177/// is positive, but for an intrinsic the value count must be negative. The 5178/// absolute value must be in the range: 5179/// 1 <= |Value| <= ElementBits for a right shift; or 5180/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 5181static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 5182 int64_t &Cnt) { 5183 assert(VT.isVector() && "vector shift count is not a vector type"); 5184 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 5185 if (! getVShiftImm(Op, ElementBits, Cnt)) 5186 return false; 5187 if (isIntrinsic) 5188 Cnt = -Cnt; 5189 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 5190} 5191 5192/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 5193static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 5194 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 5195 switch (IntNo) { 5196 default: 5197 // Don't do anything for most intrinsics. 5198 break; 5199 5200 // Vector shifts: check for immediate versions and lower them. 5201 // Note: This is done during DAG combining instead of DAG legalizing because 5202 // the build_vectors for 64-bit vector element shift counts are generally 5203 // not legal, and it is hard to see their values after they get legalized to 5204 // loads from a constant pool. 5205 case Intrinsic::arm_neon_vshifts: 5206 case Intrinsic::arm_neon_vshiftu: 5207 case Intrinsic::arm_neon_vshiftls: 5208 case Intrinsic::arm_neon_vshiftlu: 5209 case Intrinsic::arm_neon_vshiftn: 5210 case Intrinsic::arm_neon_vrshifts: 5211 case Intrinsic::arm_neon_vrshiftu: 5212 case Intrinsic::arm_neon_vrshiftn: 5213 case Intrinsic::arm_neon_vqshifts: 5214 case Intrinsic::arm_neon_vqshiftu: 5215 case Intrinsic::arm_neon_vqshiftsu: 5216 case Intrinsic::arm_neon_vqshiftns: 5217 case Intrinsic::arm_neon_vqshiftnu: 5218 case Intrinsic::arm_neon_vqshiftnsu: 5219 case Intrinsic::arm_neon_vqrshiftns: 5220 case Intrinsic::arm_neon_vqrshiftnu: 5221 case Intrinsic::arm_neon_vqrshiftnsu: { 5222 EVT VT = N->getOperand(1).getValueType(); 5223 int64_t Cnt; 5224 unsigned VShiftOpc = 0; 5225 5226 switch (IntNo) { 5227 case Intrinsic::arm_neon_vshifts: 5228 case Intrinsic::arm_neon_vshiftu: 5229 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 5230 VShiftOpc = ARMISD::VSHL; 5231 break; 5232 } 5233 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 5234 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 5235 ARMISD::VSHRs : ARMISD::VSHRu); 5236 break; 5237 } 5238 return SDValue(); 5239 5240 case Intrinsic::arm_neon_vshiftls: 5241 case Intrinsic::arm_neon_vshiftlu: 5242 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 5243 break; 5244 llvm_unreachable("invalid shift count for vshll intrinsic"); 5245 5246 case Intrinsic::arm_neon_vrshifts: 5247 case Intrinsic::arm_neon_vrshiftu: 5248 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 5249 break; 5250 return SDValue(); 5251 5252 case Intrinsic::arm_neon_vqshifts: 5253 case Intrinsic::arm_neon_vqshiftu: 5254 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 5255 break; 5256 return SDValue(); 5257 5258 case Intrinsic::arm_neon_vqshiftsu: 5259 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 5260 break; 5261 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 5262 5263 case Intrinsic::arm_neon_vshiftn: 5264 case Intrinsic::arm_neon_vrshiftn: 5265 case Intrinsic::arm_neon_vqshiftns: 5266 case Intrinsic::arm_neon_vqshiftnu: 5267 case Intrinsic::arm_neon_vqshiftnsu: 5268 case Intrinsic::arm_neon_vqrshiftns: 5269 case Intrinsic::arm_neon_vqrshiftnu: 5270 case Intrinsic::arm_neon_vqrshiftnsu: 5271 // Narrowing shifts require an immediate right shift. 5272 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 5273 break; 5274 llvm_unreachable("invalid shift count for narrowing vector shift " 5275 "intrinsic"); 5276 5277 default: 5278 llvm_unreachable("unhandled vector shift"); 5279 } 5280 5281 switch (IntNo) { 5282 case Intrinsic::arm_neon_vshifts: 5283 case Intrinsic::arm_neon_vshiftu: 5284 // Opcode already set above. 5285 break; 5286 case Intrinsic::arm_neon_vshiftls: 5287 case Intrinsic::arm_neon_vshiftlu: 5288 if (Cnt == VT.getVectorElementType().getSizeInBits()) 5289 VShiftOpc = ARMISD::VSHLLi; 5290 else 5291 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 5292 ARMISD::VSHLLs : ARMISD::VSHLLu); 5293 break; 5294 case Intrinsic::arm_neon_vshiftn: 5295 VShiftOpc = ARMISD::VSHRN; break; 5296 case Intrinsic::arm_neon_vrshifts: 5297 VShiftOpc = ARMISD::VRSHRs; break; 5298 case Intrinsic::arm_neon_vrshiftu: 5299 VShiftOpc = ARMISD::VRSHRu; break; 5300 case Intrinsic::arm_neon_vrshiftn: 5301 VShiftOpc = ARMISD::VRSHRN; break; 5302 case Intrinsic::arm_neon_vqshifts: 5303 VShiftOpc = ARMISD::VQSHLs; break; 5304 case Intrinsic::arm_neon_vqshiftu: 5305 VShiftOpc = ARMISD::VQSHLu; break; 5306 case Intrinsic::arm_neon_vqshiftsu: 5307 VShiftOpc = ARMISD::VQSHLsu; break; 5308 case Intrinsic::arm_neon_vqshiftns: 5309 VShiftOpc = ARMISD::VQSHRNs; break; 5310 case Intrinsic::arm_neon_vqshiftnu: 5311 VShiftOpc = ARMISD::VQSHRNu; break; 5312 case Intrinsic::arm_neon_vqshiftnsu: 5313 VShiftOpc = ARMISD::VQSHRNsu; break; 5314 case Intrinsic::arm_neon_vqrshiftns: 5315 VShiftOpc = ARMISD::VQRSHRNs; break; 5316 case Intrinsic::arm_neon_vqrshiftnu: 5317 VShiftOpc = ARMISD::VQRSHRNu; break; 5318 case Intrinsic::arm_neon_vqrshiftnsu: 5319 VShiftOpc = ARMISD::VQRSHRNsu; break; 5320 } 5321 5322 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 5323 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 5324 } 5325 5326 case Intrinsic::arm_neon_vshiftins: { 5327 EVT VT = N->getOperand(1).getValueType(); 5328 int64_t Cnt; 5329 unsigned VShiftOpc = 0; 5330 5331 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 5332 VShiftOpc = ARMISD::VSLI; 5333 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 5334 VShiftOpc = ARMISD::VSRI; 5335 else { 5336 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 5337 } 5338 5339 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 5340 N->getOperand(1), N->getOperand(2), 5341 DAG.getConstant(Cnt, MVT::i32)); 5342 } 5343 5344 case Intrinsic::arm_neon_vqrshifts: 5345 case Intrinsic::arm_neon_vqrshiftu: 5346 // No immediate versions of these to check for. 5347 break; 5348 } 5349 5350 return SDValue(); 5351} 5352 5353/// PerformShiftCombine - Checks for immediate versions of vector shifts and 5354/// lowers them. As with the vector shift intrinsics, this is done during DAG 5355/// combining instead of DAG legalizing because the build_vectors for 64-bit 5356/// vector element shift counts are generally not legal, and it is hard to see 5357/// their values after they get legalized to loads from a constant pool. 5358static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 5359 const ARMSubtarget *ST) { 5360 EVT VT = N->getValueType(0); 5361 5362 // Nothing to be done for scalar shifts. 5363 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5364 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 5365 return SDValue(); 5366 5367 assert(ST->hasNEON() && "unexpected vector shift"); 5368 int64_t Cnt; 5369 5370 switch (N->getOpcode()) { 5371 default: llvm_unreachable("unexpected shift opcode"); 5372 5373 case ISD::SHL: 5374 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 5375 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 5376 DAG.getConstant(Cnt, MVT::i32)); 5377 break; 5378 5379 case ISD::SRA: 5380 case ISD::SRL: 5381 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 5382 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 5383 ARMISD::VSHRs : ARMISD::VSHRu); 5384 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 5385 DAG.getConstant(Cnt, MVT::i32)); 5386 } 5387 } 5388 return SDValue(); 5389} 5390 5391/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 5392/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 5393static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 5394 const ARMSubtarget *ST) { 5395 SDValue N0 = N->getOperand(0); 5396 5397 // Check for sign- and zero-extensions of vector extract operations of 8- 5398 // and 16-bit vector elements. NEON supports these directly. They are 5399 // handled during DAG combining because type legalization will promote them 5400 // to 32-bit types and it is messy to recognize the operations after that. 5401 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 5402 SDValue Vec = N0.getOperand(0); 5403 SDValue Lane = N0.getOperand(1); 5404 EVT VT = N->getValueType(0); 5405 EVT EltVT = N0.getValueType(); 5406 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5407 5408 if (VT == MVT::i32 && 5409 (EltVT == MVT::i8 || EltVT == MVT::i16) && 5410 TLI.isTypeLegal(Vec.getValueType()) && 5411 isa<ConstantSDNode>(Lane)) { 5412 5413 unsigned Opc = 0; 5414 switch (N->getOpcode()) { 5415 default: llvm_unreachable("unexpected opcode"); 5416 case ISD::SIGN_EXTEND: 5417 Opc = ARMISD::VGETLANEs; 5418 break; 5419 case ISD::ZERO_EXTEND: 5420 case ISD::ANY_EXTEND: 5421 Opc = ARMISD::VGETLANEu; 5422 break; 5423 } 5424 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 5425 } 5426 } 5427 5428 return SDValue(); 5429} 5430 5431/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 5432/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 5433static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 5434 const ARMSubtarget *ST) { 5435 // If the target supports NEON, try to use vmax/vmin instructions for f32 5436 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 5437 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 5438 // a NaN; only do the transformation when it matches that behavior. 5439 5440 // For now only do this when using NEON for FP operations; if using VFP, it 5441 // is not obvious that the benefit outweighs the cost of switching to the 5442 // NEON pipeline. 5443 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 5444 N->getValueType(0) != MVT::f32) 5445 return SDValue(); 5446 5447 SDValue CondLHS = N->getOperand(0); 5448 SDValue CondRHS = N->getOperand(1); 5449 SDValue LHS = N->getOperand(2); 5450 SDValue RHS = N->getOperand(3); 5451 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 5452 5453 unsigned Opcode = 0; 5454 bool IsReversed; 5455 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 5456 IsReversed = false; // x CC y ? x : y 5457 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 5458 IsReversed = true ; // x CC y ? y : x 5459 } else { 5460 return SDValue(); 5461 } 5462 5463 bool IsUnordered; 5464 switch (CC) { 5465 default: break; 5466 case ISD::SETOLT: 5467 case ISD::SETOLE: 5468 case ISD::SETLT: 5469 case ISD::SETLE: 5470 case ISD::SETULT: 5471 case ISD::SETULE: 5472 // If LHS is NaN, an ordered comparison will be false and the result will 5473 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 5474 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5475 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 5476 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5477 break; 5478 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 5479 // will return -0, so vmin can only be used for unsafe math or if one of 5480 // the operands is known to be nonzero. 5481 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 5482 !UnsafeFPMath && 5483 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5484 break; 5485 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 5486 break; 5487 5488 case ISD::SETOGT: 5489 case ISD::SETOGE: 5490 case ISD::SETGT: 5491 case ISD::SETGE: 5492 case ISD::SETUGT: 5493 case ISD::SETUGE: 5494 // If LHS is NaN, an ordered comparison will be false and the result will 5495 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 5496 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5497 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 5498 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5499 break; 5500 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 5501 // will return +0, so vmax can only be used for unsafe math or if one of 5502 // the operands is known to be nonzero. 5503 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 5504 !UnsafeFPMath && 5505 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5506 break; 5507 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 5508 break; 5509 } 5510 5511 if (!Opcode) 5512 return SDValue(); 5513 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 5514} 5515 5516SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 5517 DAGCombinerInfo &DCI) const { 5518 switch (N->getOpcode()) { 5519 default: break; 5520 case ISD::ADD: return PerformADDCombine(N, DCI); 5521 case ISD::SUB: return PerformSUBCombine(N, DCI); 5522 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 5523 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 5524 case ISD::AND: return PerformANDCombine(N, DCI); 5525 case ARMISD::BFI: return PerformBFICombine(N, DCI); 5526 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 5527 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 5528 case ISD::STORE: return PerformSTORECombine(N, DCI); 5529 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 5530 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 5531 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 5532 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 5533 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 5534 case ISD::SHL: 5535 case ISD::SRA: 5536 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 5537 case ISD::SIGN_EXTEND: 5538 case ISD::ZERO_EXTEND: 5539 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 5540 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 5541 } 5542 return SDValue(); 5543} 5544 5545bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 5546 if (!Subtarget->allowsUnalignedMem()) 5547 return false; 5548 5549 switch (VT.getSimpleVT().SimpleTy) { 5550 default: 5551 return false; 5552 case MVT::i8: 5553 case MVT::i16: 5554 case MVT::i32: 5555 return true; 5556 // FIXME: VLD1 etc with standard alignment is legal. 5557 } 5558} 5559 5560static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 5561 if (V < 0) 5562 return false; 5563 5564 unsigned Scale = 1; 5565 switch (VT.getSimpleVT().SimpleTy) { 5566 default: return false; 5567 case MVT::i1: 5568 case MVT::i8: 5569 // Scale == 1; 5570 break; 5571 case MVT::i16: 5572 // Scale == 2; 5573 Scale = 2; 5574 break; 5575 case MVT::i32: 5576 // Scale == 4; 5577 Scale = 4; 5578 break; 5579 } 5580 5581 if ((V & (Scale - 1)) != 0) 5582 return false; 5583 V /= Scale; 5584 return V == (V & ((1LL << 5) - 1)); 5585} 5586 5587static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 5588 const ARMSubtarget *Subtarget) { 5589 bool isNeg = false; 5590 if (V < 0) { 5591 isNeg = true; 5592 V = - V; 5593 } 5594 5595 switch (VT.getSimpleVT().SimpleTy) { 5596 default: return false; 5597 case MVT::i1: 5598 case MVT::i8: 5599 case MVT::i16: 5600 case MVT::i32: 5601 // + imm12 or - imm8 5602 if (isNeg) 5603 return V == (V & ((1LL << 8) - 1)); 5604 return V == (V & ((1LL << 12) - 1)); 5605 case MVT::f32: 5606 case MVT::f64: 5607 // Same as ARM mode. FIXME: NEON? 5608 if (!Subtarget->hasVFP2()) 5609 return false; 5610 if ((V & 3) != 0) 5611 return false; 5612 V >>= 2; 5613 return V == (V & ((1LL << 8) - 1)); 5614 } 5615} 5616 5617/// isLegalAddressImmediate - Return true if the integer value can be used 5618/// as the offset of the target addressing mode for load / store of the 5619/// given type. 5620static bool isLegalAddressImmediate(int64_t V, EVT VT, 5621 const ARMSubtarget *Subtarget) { 5622 if (V == 0) 5623 return true; 5624 5625 if (!VT.isSimple()) 5626 return false; 5627 5628 if (Subtarget->isThumb1Only()) 5629 return isLegalT1AddressImmediate(V, VT); 5630 else if (Subtarget->isThumb2()) 5631 return isLegalT2AddressImmediate(V, VT, Subtarget); 5632 5633 // ARM mode. 5634 if (V < 0) 5635 V = - V; 5636 switch (VT.getSimpleVT().SimpleTy) { 5637 default: return false; 5638 case MVT::i1: 5639 case MVT::i8: 5640 case MVT::i32: 5641 // +- imm12 5642 return V == (V & ((1LL << 12) - 1)); 5643 case MVT::i16: 5644 // +- imm8 5645 return V == (V & ((1LL << 8) - 1)); 5646 case MVT::f32: 5647 case MVT::f64: 5648 if (!Subtarget->hasVFP2()) // FIXME: NEON? 5649 return false; 5650 if ((V & 3) != 0) 5651 return false; 5652 V >>= 2; 5653 return V == (V & ((1LL << 8) - 1)); 5654 } 5655} 5656 5657bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 5658 EVT VT) const { 5659 int Scale = AM.Scale; 5660 if (Scale < 0) 5661 return false; 5662 5663 switch (VT.getSimpleVT().SimpleTy) { 5664 default: return false; 5665 case MVT::i1: 5666 case MVT::i8: 5667 case MVT::i16: 5668 case MVT::i32: 5669 if (Scale == 1) 5670 return true; 5671 // r + r << imm 5672 Scale = Scale & ~1; 5673 return Scale == 2 || Scale == 4 || Scale == 8; 5674 case MVT::i64: 5675 // r + r 5676 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5677 return true; 5678 return false; 5679 case MVT::isVoid: 5680 // Note, we allow "void" uses (basically, uses that aren't loads or 5681 // stores), because arm allows folding a scale into many arithmetic 5682 // operations. This should be made more precise and revisited later. 5683 5684 // Allow r << imm, but the imm has to be a multiple of two. 5685 if (Scale & 1) return false; 5686 return isPowerOf2_32(Scale); 5687 } 5688} 5689 5690/// isLegalAddressingMode - Return true if the addressing mode represented 5691/// by AM is legal for this target, for a load/store of the specified type. 5692bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5693 const Type *Ty) const { 5694 EVT VT = getValueType(Ty, true); 5695 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 5696 return false; 5697 5698 // Can never fold addr of global into load/store. 5699 if (AM.BaseGV) 5700 return false; 5701 5702 switch (AM.Scale) { 5703 case 0: // no scale reg, must be "r+i" or "r", or "i". 5704 break; 5705 case 1: 5706 if (Subtarget->isThumb1Only()) 5707 return false; 5708 // FALL THROUGH. 5709 default: 5710 // ARM doesn't support any R+R*scale+imm addr modes. 5711 if (AM.BaseOffs) 5712 return false; 5713 5714 if (!VT.isSimple()) 5715 return false; 5716 5717 if (Subtarget->isThumb2()) 5718 return isLegalT2ScaledAddressingMode(AM, VT); 5719 5720 int Scale = AM.Scale; 5721 switch (VT.getSimpleVT().SimpleTy) { 5722 default: return false; 5723 case MVT::i1: 5724 case MVT::i8: 5725 case MVT::i32: 5726 if (Scale < 0) Scale = -Scale; 5727 if (Scale == 1) 5728 return true; 5729 // r + r << imm 5730 return isPowerOf2_32(Scale & ~1); 5731 case MVT::i16: 5732 case MVT::i64: 5733 // r + r 5734 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5735 return true; 5736 return false; 5737 5738 case MVT::isVoid: 5739 // Note, we allow "void" uses (basically, uses that aren't loads or 5740 // stores), because arm allows folding a scale into many arithmetic 5741 // operations. This should be made more precise and revisited later. 5742 5743 // Allow r << imm, but the imm has to be a multiple of two. 5744 if (Scale & 1) return false; 5745 return isPowerOf2_32(Scale); 5746 } 5747 break; 5748 } 5749 return true; 5750} 5751 5752/// isLegalICmpImmediate - Return true if the specified immediate is legal 5753/// icmp immediate, that is the target has icmp instructions which can compare 5754/// a register against the immediate without having to materialize the 5755/// immediate into a register. 5756bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 5757 if (!Subtarget->isThumb()) 5758 return ARM_AM::getSOImmVal(Imm) != -1; 5759 if (Subtarget->isThumb2()) 5760 return ARM_AM::getT2SOImmVal(Imm) != -1; 5761 return Imm >= 0 && Imm <= 255; 5762} 5763 5764static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 5765 bool isSEXTLoad, SDValue &Base, 5766 SDValue &Offset, bool &isInc, 5767 SelectionDAG &DAG) { 5768 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5769 return false; 5770 5771 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 5772 // AddressingMode 3 5773 Base = Ptr->getOperand(0); 5774 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5775 int RHSC = (int)RHS->getZExtValue(); 5776 if (RHSC < 0 && RHSC > -256) { 5777 assert(Ptr->getOpcode() == ISD::ADD); 5778 isInc = false; 5779 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5780 return true; 5781 } 5782 } 5783 isInc = (Ptr->getOpcode() == ISD::ADD); 5784 Offset = Ptr->getOperand(1); 5785 return true; 5786 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 5787 // AddressingMode 2 5788 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5789 int RHSC = (int)RHS->getZExtValue(); 5790 if (RHSC < 0 && RHSC > -0x1000) { 5791 assert(Ptr->getOpcode() == ISD::ADD); 5792 isInc = false; 5793 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5794 Base = Ptr->getOperand(0); 5795 return true; 5796 } 5797 } 5798 5799 if (Ptr->getOpcode() == ISD::ADD) { 5800 isInc = true; 5801 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 5802 if (ShOpcVal != ARM_AM::no_shift) { 5803 Base = Ptr->getOperand(1); 5804 Offset = Ptr->getOperand(0); 5805 } else { 5806 Base = Ptr->getOperand(0); 5807 Offset = Ptr->getOperand(1); 5808 } 5809 return true; 5810 } 5811 5812 isInc = (Ptr->getOpcode() == ISD::ADD); 5813 Base = Ptr->getOperand(0); 5814 Offset = Ptr->getOperand(1); 5815 return true; 5816 } 5817 5818 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 5819 return false; 5820} 5821 5822static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 5823 bool isSEXTLoad, SDValue &Base, 5824 SDValue &Offset, bool &isInc, 5825 SelectionDAG &DAG) { 5826 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5827 return false; 5828 5829 Base = Ptr->getOperand(0); 5830 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5831 int RHSC = (int)RHS->getZExtValue(); 5832 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 5833 assert(Ptr->getOpcode() == ISD::ADD); 5834 isInc = false; 5835 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5836 return true; 5837 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 5838 isInc = Ptr->getOpcode() == ISD::ADD; 5839 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 5840 return true; 5841 } 5842 } 5843 5844 return false; 5845} 5846 5847/// getPreIndexedAddressParts - returns true by value, base pointer and 5848/// offset pointer and addressing mode by reference if the node's address 5849/// can be legally represented as pre-indexed load / store address. 5850bool 5851ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 5852 SDValue &Offset, 5853 ISD::MemIndexedMode &AM, 5854 SelectionDAG &DAG) const { 5855 if (Subtarget->isThumb1Only()) 5856 return false; 5857 5858 EVT VT; 5859 SDValue Ptr; 5860 bool isSEXTLoad = false; 5861 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5862 Ptr = LD->getBasePtr(); 5863 VT = LD->getMemoryVT(); 5864 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5865 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5866 Ptr = ST->getBasePtr(); 5867 VT = ST->getMemoryVT(); 5868 } else 5869 return false; 5870 5871 bool isInc; 5872 bool isLegal = false; 5873 if (Subtarget->isThumb2()) 5874 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5875 Offset, isInc, DAG); 5876 else 5877 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5878 Offset, isInc, DAG); 5879 if (!isLegal) 5880 return false; 5881 5882 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 5883 return true; 5884} 5885 5886/// getPostIndexedAddressParts - returns true by value, base pointer and 5887/// offset pointer and addressing mode by reference if this node can be 5888/// combined with a load / store to form a post-indexed load / store. 5889bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 5890 SDValue &Base, 5891 SDValue &Offset, 5892 ISD::MemIndexedMode &AM, 5893 SelectionDAG &DAG) const { 5894 if (Subtarget->isThumb1Only()) 5895 return false; 5896 5897 EVT VT; 5898 SDValue Ptr; 5899 bool isSEXTLoad = false; 5900 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5901 VT = LD->getMemoryVT(); 5902 Ptr = LD->getBasePtr(); 5903 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5904 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5905 VT = ST->getMemoryVT(); 5906 Ptr = ST->getBasePtr(); 5907 } else 5908 return false; 5909 5910 bool isInc; 5911 bool isLegal = false; 5912 if (Subtarget->isThumb2()) 5913 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5914 isInc, DAG); 5915 else 5916 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5917 isInc, DAG); 5918 if (!isLegal) 5919 return false; 5920 5921 if (Ptr != Base) { 5922 // Swap base ptr and offset to catch more post-index load / store when 5923 // it's legal. In Thumb2 mode, offset must be an immediate. 5924 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 5925 !Subtarget->isThumb2()) 5926 std::swap(Base, Offset); 5927 5928 // Post-indexed load / store update the base pointer. 5929 if (Ptr != Base) 5930 return false; 5931 } 5932 5933 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 5934 return true; 5935} 5936 5937void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5938 const APInt &Mask, 5939 APInt &KnownZero, 5940 APInt &KnownOne, 5941 const SelectionDAG &DAG, 5942 unsigned Depth) const { 5943 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5944 switch (Op.getOpcode()) { 5945 default: break; 5946 case ARMISD::CMOV: { 5947 // Bits are known zero/one if known on the LHS and RHS. 5948 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 5949 if (KnownZero == 0 && KnownOne == 0) return; 5950 5951 APInt KnownZeroRHS, KnownOneRHS; 5952 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 5953 KnownZeroRHS, KnownOneRHS, Depth+1); 5954 KnownZero &= KnownZeroRHS; 5955 KnownOne &= KnownOneRHS; 5956 return; 5957 } 5958 } 5959} 5960 5961//===----------------------------------------------------------------------===// 5962// ARM Inline Assembly Support 5963//===----------------------------------------------------------------------===// 5964 5965/// getConstraintType - Given a constraint letter, return the type of 5966/// constraint it is for this target. 5967ARMTargetLowering::ConstraintType 5968ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 5969 if (Constraint.size() == 1) { 5970 switch (Constraint[0]) { 5971 default: break; 5972 case 'l': return C_RegisterClass; 5973 case 'w': return C_RegisterClass; 5974 } 5975 } 5976 return TargetLowering::getConstraintType(Constraint); 5977} 5978 5979/// Examine constraint type and operand type and determine a weight value. 5980/// This object must already have been set up with the operand type 5981/// and the current alternative constraint selected. 5982TargetLowering::ConstraintWeight 5983ARMTargetLowering::getSingleConstraintMatchWeight( 5984 AsmOperandInfo &info, const char *constraint) const { 5985 ConstraintWeight weight = CW_Invalid; 5986 Value *CallOperandVal = info.CallOperandVal; 5987 // If we don't have a value, we can't do a match, 5988 // but allow it at the lowest weight. 5989 if (CallOperandVal == NULL) 5990 return CW_Default; 5991 const Type *type = CallOperandVal->getType(); 5992 // Look at the constraint type. 5993 switch (*constraint) { 5994 default: 5995 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 5996 break; 5997 case 'l': 5998 if (type->isIntegerTy()) { 5999 if (Subtarget->isThumb()) 6000 weight = CW_SpecificReg; 6001 else 6002 weight = CW_Register; 6003 } 6004 break; 6005 case 'w': 6006 if (type->isFloatingPointTy()) 6007 weight = CW_Register; 6008 break; 6009 } 6010 return weight; 6011} 6012 6013std::pair<unsigned, const TargetRegisterClass*> 6014ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6015 EVT VT) const { 6016 if (Constraint.size() == 1) { 6017 // GCC ARM Constraint Letters 6018 switch (Constraint[0]) { 6019 case 'l': 6020 if (Subtarget->isThumb()) 6021 return std::make_pair(0U, ARM::tGPRRegisterClass); 6022 else 6023 return std::make_pair(0U, ARM::GPRRegisterClass); 6024 case 'r': 6025 return std::make_pair(0U, ARM::GPRRegisterClass); 6026 case 'w': 6027 if (VT == MVT::f32) 6028 return std::make_pair(0U, ARM::SPRRegisterClass); 6029 if (VT.getSizeInBits() == 64) 6030 return std::make_pair(0U, ARM::DPRRegisterClass); 6031 if (VT.getSizeInBits() == 128) 6032 return std::make_pair(0U, ARM::QPRRegisterClass); 6033 break; 6034 } 6035 } 6036 if (StringRef("{cc}").equals_lower(Constraint)) 6037 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 6038 6039 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6040} 6041 6042std::vector<unsigned> ARMTargetLowering:: 6043getRegClassForInlineAsmConstraint(const std::string &Constraint, 6044 EVT VT) const { 6045 if (Constraint.size() != 1) 6046 return std::vector<unsigned>(); 6047 6048 switch (Constraint[0]) { // GCC ARM Constraint Letters 6049 default: break; 6050 case 'l': 6051 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 6052 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 6053 0); 6054 case 'r': 6055 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 6056 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 6057 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 6058 ARM::R12, ARM::LR, 0); 6059 case 'w': 6060 if (VT == MVT::f32) 6061 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 6062 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 6063 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 6064 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 6065 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 6066 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 6067 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 6068 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 6069 if (VT.getSizeInBits() == 64) 6070 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 6071 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 6072 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 6073 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 6074 if (VT.getSizeInBits() == 128) 6075 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 6076 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 6077 break; 6078 } 6079 6080 return std::vector<unsigned>(); 6081} 6082 6083/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 6084/// vector. If it is invalid, don't add anything to Ops. 6085void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 6086 char Constraint, 6087 std::vector<SDValue>&Ops, 6088 SelectionDAG &DAG) const { 6089 SDValue Result(0, 0); 6090 6091 switch (Constraint) { 6092 default: break; 6093 case 'I': case 'J': case 'K': case 'L': 6094 case 'M': case 'N': case 'O': 6095 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 6096 if (!C) 6097 return; 6098 6099 int64_t CVal64 = C->getSExtValue(); 6100 int CVal = (int) CVal64; 6101 // None of these constraints allow values larger than 32 bits. Check 6102 // that the value fits in an int. 6103 if (CVal != CVal64) 6104 return; 6105 6106 switch (Constraint) { 6107 case 'I': 6108 if (Subtarget->isThumb1Only()) { 6109 // This must be a constant between 0 and 255, for ADD 6110 // immediates. 6111 if (CVal >= 0 && CVal <= 255) 6112 break; 6113 } else if (Subtarget->isThumb2()) { 6114 // A constant that can be used as an immediate value in a 6115 // data-processing instruction. 6116 if (ARM_AM::getT2SOImmVal(CVal) != -1) 6117 break; 6118 } else { 6119 // A constant that can be used as an immediate value in a 6120 // data-processing instruction. 6121 if (ARM_AM::getSOImmVal(CVal) != -1) 6122 break; 6123 } 6124 return; 6125 6126 case 'J': 6127 if (Subtarget->isThumb()) { // FIXME thumb2 6128 // This must be a constant between -255 and -1, for negated ADD 6129 // immediates. This can be used in GCC with an "n" modifier that 6130 // prints the negated value, for use with SUB instructions. It is 6131 // not useful otherwise but is implemented for compatibility. 6132 if (CVal >= -255 && CVal <= -1) 6133 break; 6134 } else { 6135 // This must be a constant between -4095 and 4095. It is not clear 6136 // what this constraint is intended for. Implemented for 6137 // compatibility with GCC. 6138 if (CVal >= -4095 && CVal <= 4095) 6139 break; 6140 } 6141 return; 6142 6143 case 'K': 6144 if (Subtarget->isThumb1Only()) { 6145 // A 32-bit value where only one byte has a nonzero value. Exclude 6146 // zero to match GCC. This constraint is used by GCC internally for 6147 // constants that can be loaded with a move/shift combination. 6148 // It is not useful otherwise but is implemented for compatibility. 6149 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 6150 break; 6151 } else if (Subtarget->isThumb2()) { 6152 // A constant whose bitwise inverse can be used as an immediate 6153 // value in a data-processing instruction. This can be used in GCC 6154 // with a "B" modifier that prints the inverted value, for use with 6155 // BIC and MVN instructions. It is not useful otherwise but is 6156 // implemented for compatibility. 6157 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 6158 break; 6159 } else { 6160 // A constant whose bitwise inverse can be used as an immediate 6161 // value in a data-processing instruction. This can be used in GCC 6162 // with a "B" modifier that prints the inverted value, for use with 6163 // BIC and MVN instructions. It is not useful otherwise but is 6164 // implemented for compatibility. 6165 if (ARM_AM::getSOImmVal(~CVal) != -1) 6166 break; 6167 } 6168 return; 6169 6170 case 'L': 6171 if (Subtarget->isThumb1Only()) { 6172 // This must be a constant between -7 and 7, 6173 // for 3-operand ADD/SUB immediate instructions. 6174 if (CVal >= -7 && CVal < 7) 6175 break; 6176 } else if (Subtarget->isThumb2()) { 6177 // A constant whose negation can be used as an immediate value in a 6178 // data-processing instruction. This can be used in GCC with an "n" 6179 // modifier that prints the negated value, for use with SUB 6180 // instructions. It is not useful otherwise but is implemented for 6181 // compatibility. 6182 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 6183 break; 6184 } else { 6185 // A constant whose negation can be used as an immediate value in a 6186 // data-processing instruction. This can be used in GCC with an "n" 6187 // modifier that prints the negated value, for use with SUB 6188 // instructions. It is not useful otherwise but is implemented for 6189 // compatibility. 6190 if (ARM_AM::getSOImmVal(-CVal) != -1) 6191 break; 6192 } 6193 return; 6194 6195 case 'M': 6196 if (Subtarget->isThumb()) { // FIXME thumb2 6197 // This must be a multiple of 4 between 0 and 1020, for 6198 // ADD sp + immediate. 6199 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 6200 break; 6201 } else { 6202 // A power of two or a constant between 0 and 32. This is used in 6203 // GCC for the shift amount on shifted register operands, but it is 6204 // useful in general for any shift amounts. 6205 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 6206 break; 6207 } 6208 return; 6209 6210 case 'N': 6211 if (Subtarget->isThumb()) { // FIXME thumb2 6212 // This must be a constant between 0 and 31, for shift amounts. 6213 if (CVal >= 0 && CVal <= 31) 6214 break; 6215 } 6216 return; 6217 6218 case 'O': 6219 if (Subtarget->isThumb()) { // FIXME thumb2 6220 // This must be a multiple of 4 between -508 and 508, for 6221 // ADD/SUB sp = sp + immediate. 6222 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 6223 break; 6224 } 6225 return; 6226 } 6227 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 6228 break; 6229 } 6230 6231 if (Result.getNode()) { 6232 Ops.push_back(Result); 6233 return; 6234 } 6235 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6236} 6237 6238bool 6239ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 6240 // The ARM target isn't yet aware of offsets. 6241 return false; 6242} 6243 6244int ARM::getVFPf32Imm(const APFloat &FPImm) { 6245 APInt Imm = FPImm.bitcastToAPInt(); 6246 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 6247 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 6248 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 6249 6250 // We can handle 4 bits of mantissa. 6251 // mantissa = (16+UInt(e:f:g:h))/16. 6252 if (Mantissa & 0x7ffff) 6253 return -1; 6254 Mantissa >>= 19; 6255 if ((Mantissa & 0xf) != Mantissa) 6256 return -1; 6257 6258 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 6259 if (Exp < -3 || Exp > 4) 6260 return -1; 6261 Exp = ((Exp+3) & 0x7) ^ 4; 6262 6263 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 6264} 6265 6266int ARM::getVFPf64Imm(const APFloat &FPImm) { 6267 APInt Imm = FPImm.bitcastToAPInt(); 6268 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 6269 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 6270 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 6271 6272 // We can handle 4 bits of mantissa. 6273 // mantissa = (16+UInt(e:f:g:h))/16. 6274 if (Mantissa & 0xffffffffffffLL) 6275 return -1; 6276 Mantissa >>= 48; 6277 if ((Mantissa & 0xf) != Mantissa) 6278 return -1; 6279 6280 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 6281 if (Exp < -3 || Exp > 4) 6282 return -1; 6283 Exp = ((Exp+3) & 0x7) ^ 4; 6284 6285 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 6286} 6287 6288bool ARM::isBitFieldInvertedMask(unsigned v) { 6289 if (v == 0xffffffff) 6290 return 0; 6291 // there can be 1's on either or both "outsides", all the "inside" 6292 // bits must be 0's 6293 unsigned int lsb = 0, msb = 31; 6294 while (v & (1 << msb)) --msb; 6295 while (v & (1 << lsb)) ++lsb; 6296 for (unsigned int i = lsb; i <= msb; ++i) { 6297 if (v & (1 << i)) 6298 return 0; 6299 } 6300 return 1; 6301} 6302 6303/// isFPImmLegal - Returns true if the target can instruction select the 6304/// specified FP immediate natively. If false, the legalizer will 6305/// materialize the FP immediate as a load from a constant pool. 6306bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 6307 if (!Subtarget->hasVFP3()) 6308 return false; 6309 if (VT == MVT::f32) 6310 return ARM::getVFPf32Imm(Imm) != -1; 6311 if (VT == MVT::f64) 6312 return ARM::getVFPf64Imm(Imm) != -1; 6313 return false; 6314} 6315 6316/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 6317/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 6318/// specified in the intrinsic calls. 6319bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 6320 const CallInst &I, 6321 unsigned Intrinsic) const { 6322 switch (Intrinsic) { 6323 case Intrinsic::arm_neon_vld1: 6324 case Intrinsic::arm_neon_vld2: 6325 case Intrinsic::arm_neon_vld3: 6326 case Intrinsic::arm_neon_vld4: 6327 case Intrinsic::arm_neon_vld2lane: 6328 case Intrinsic::arm_neon_vld3lane: 6329 case Intrinsic::arm_neon_vld4lane: { 6330 Info.opc = ISD::INTRINSIC_W_CHAIN; 6331 // Conservatively set memVT to the entire set of vectors loaded. 6332 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 6333 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 6334 Info.ptrVal = I.getArgOperand(0); 6335 Info.offset = 0; 6336 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 6337 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 6338 Info.vol = false; // volatile loads with NEON intrinsics not supported 6339 Info.readMem = true; 6340 Info.writeMem = false; 6341 return true; 6342 } 6343 case Intrinsic::arm_neon_vst1: 6344 case Intrinsic::arm_neon_vst2: 6345 case Intrinsic::arm_neon_vst3: 6346 case Intrinsic::arm_neon_vst4: 6347 case Intrinsic::arm_neon_vst2lane: 6348 case Intrinsic::arm_neon_vst3lane: 6349 case Intrinsic::arm_neon_vst4lane: { 6350 Info.opc = ISD::INTRINSIC_VOID; 6351 // Conservatively set memVT to the entire set of vectors stored. 6352 unsigned NumElts = 0; 6353 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 6354 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 6355 if (!ArgTy->isVectorTy()) 6356 break; 6357 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 6358 } 6359 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 6360 Info.ptrVal = I.getArgOperand(0); 6361 Info.offset = 0; 6362 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 6363 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 6364 Info.vol = false; // volatile stores with NEON intrinsics not supported 6365 Info.readMem = false; 6366 Info.writeMem = true; 6367 return true; 6368 } 6369 default: 6370 break; 6371 } 6372 6373 return false; 6374} 6375