ARMISelLowering.cpp revision fc5d305597ea6336d75bd7f3b741e8d57d6a5105
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/PseudoSourceValue.h" 43#include "llvm/CodeGen/SelectionDAG.h" 44#include "llvm/MC/MCSectionMachO.h" 45#include "llvm/Target/TargetOptions.h" 46#include "llvm/ADT/VectorExtras.h" 47#include "llvm/ADT/StringExtras.h" 48#include "llvm/ADT/Statistic.h" 49#include "llvm/Support/CommandLine.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53#include <sstream> 54using namespace llvm; 55 56STATISTIC(NumTailCalls, "Number of tail calls"); 57STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 58 59// This option should go away when tail calls fully work. 60static cl::opt<bool> 61EnableARMTailCalls("arm-tail-calls", cl::Hidden, 62 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 63 cl::init(false)); 64 65cl::opt<bool> 66EnableARMLongCalls("arm-long-calls", cl::Hidden, 67 cl::desc("Generate calls via indirect call instructions"), 68 cl::init(false)); 69 70static cl::opt<bool> 71ARMInterworking("arm-interworking", cl::Hidden, 72 cl::desc("Enable / disable ARM interworking (for debugging only)"), 73 cl::init(true)); 74 75// The APCS parameter registers. 76static const unsigned GPRArgRegs[] = { 77 ARM::R0, ARM::R1, ARM::R2, ARM::R3 78}; 79 80void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 81 EVT PromotedBitwiseVT) { 82 if (VT != PromotedLdStVT) { 83 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 84 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 85 PromotedLdStVT.getSimpleVT()); 86 87 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 88 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 89 PromotedLdStVT.getSimpleVT()); 90 } 91 92 EVT ElemTy = VT.getVectorElementType(); 93 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 94 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 95 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 96 if (ElemTy != MVT::i32) { 97 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 98 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 100 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 101 } 102 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 104 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 105 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 106 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 107 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 108 if (VT.isInteger()) { 109 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 110 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 111 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 112 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 113 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 114 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 115 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 116 setTruncStoreAction(VT.getSimpleVT(), 117 (MVT::SimpleValueType)InnerVT, Expand); 118 } 119 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 120 121 // Promote all bit-wise operations. 122 if (VT.isInteger() && VT != PromotedBitwiseVT) { 123 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 124 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 125 PromotedBitwiseVT.getSimpleVT()); 126 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 127 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 128 PromotedBitwiseVT.getSimpleVT()); 129 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 130 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 131 PromotedBitwiseVT.getSimpleVT()); 132 } 133 134 // Neon does not support vector divide/remainder operations. 135 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 136 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 137 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 138 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 139 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 140 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 141} 142 143void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 144 addRegisterClass(VT, ARM::DPRRegisterClass); 145 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 146} 147 148void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 149 addRegisterClass(VT, ARM::QPRRegisterClass); 150 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 151} 152 153static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 154 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 155 return new TargetLoweringObjectFileMachO(); 156 157 return new ARMElfTargetObjectFile(); 158} 159 160ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 161 : TargetLowering(TM, createTLOF(TM)) { 162 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 163 RegInfo = TM.getRegisterInfo(); 164 Itins = TM.getInstrItineraryData(); 165 166 if (Subtarget->isTargetDarwin()) { 167 // Uses VFP for Thumb libfuncs if available. 168 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 169 // Single-precision floating-point arithmetic. 170 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 171 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 172 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 173 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 174 175 // Double-precision floating-point arithmetic. 176 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 177 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 178 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 179 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 180 181 // Single-precision comparisons. 182 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 183 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 184 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 185 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 186 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 187 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 188 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 189 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 190 191 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 192 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 193 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 194 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 195 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 196 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 197 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 198 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 199 200 // Double-precision comparisons. 201 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 202 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 203 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 204 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 205 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 206 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 207 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 208 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 209 210 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 216 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 217 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 218 219 // Floating-point to integer conversions. 220 // i64 conversions are done via library routines even when generating VFP 221 // instructions, so use the same ones. 222 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 223 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 224 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 225 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 226 227 // Conversions between floating types. 228 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 229 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 230 231 // Integer to floating-point conversions. 232 // i64 conversions are done via library routines even when generating VFP 233 // instructions, so use the same ones. 234 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 235 // e.g., __floatunsidf vs. __floatunssidfvfp. 236 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 237 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 238 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 239 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 240 } 241 } 242 243 // These libcalls are not available in 32-bit. 244 setLibcallName(RTLIB::SHL_I128, 0); 245 setLibcallName(RTLIB::SRL_I128, 0); 246 setLibcallName(RTLIB::SRA_I128, 0); 247 248 if (Subtarget->isAAPCS_ABI()) { 249 // Double-precision floating-point arithmetic helper functions 250 // RTABI chapter 4.1.2, Table 2 251 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 252 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 253 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 254 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 255 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 256 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 257 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 258 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 259 260 // Double-precision floating-point comparison helper functions 261 // RTABI chapter 4.1.2, Table 3 262 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 263 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 264 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 265 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 266 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 267 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 268 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 269 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 270 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 271 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 272 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 273 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 274 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 275 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 276 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 277 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 278 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 279 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 280 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 281 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 282 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 283 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 284 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 285 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 286 287 // Single-precision floating-point arithmetic helper functions 288 // RTABI chapter 4.1.2, Table 4 289 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 290 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 291 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 292 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 293 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 294 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 295 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 297 298 // Single-precision floating-point comparison helper functions 299 // RTABI chapter 4.1.2, Table 5 300 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 301 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 302 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 303 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 304 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 305 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 306 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 307 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 308 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 309 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 310 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 311 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 312 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 313 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 314 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 315 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 316 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 317 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 318 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 319 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 320 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 321 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 322 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 323 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 324 325 // Floating-point to integer conversions. 326 // RTABI chapter 4.1.2, Table 6 327 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 328 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 329 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 330 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 331 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 332 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 333 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 334 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 335 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 342 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 343 344 // Conversions between floating types. 345 // RTABI chapter 4.1.2, Table 7 346 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 347 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 348 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 349 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 350 351 // Integer to floating-point conversions. 352 // RTABI chapter 4.1.2, Table 8 353 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 354 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 355 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 356 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 357 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 358 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 359 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 360 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 361 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 362 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 363 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 364 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 365 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 366 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 367 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 368 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 369 370 // Long long helper functions 371 // RTABI chapter 4.2, Table 9 372 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 373 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 374 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 375 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 376 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 377 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 378 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 380 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 384 385 // Integer division functions 386 // RTABI chapter 4.3.1 387 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 388 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 389 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 390 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 391 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 392 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 393 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 394 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 395 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 396 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 397 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 399 } 400 401 if (Subtarget->isThumb1Only()) 402 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 403 else 404 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 405 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 406 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 407 if (!Subtarget->isFPOnlySP()) 408 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 409 410 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 411 } 412 413 if (Subtarget->hasNEON()) { 414 addDRTypeForNEON(MVT::v2f32); 415 addDRTypeForNEON(MVT::v8i8); 416 addDRTypeForNEON(MVT::v4i16); 417 addDRTypeForNEON(MVT::v2i32); 418 addDRTypeForNEON(MVT::v1i64); 419 420 addQRTypeForNEON(MVT::v4f32); 421 addQRTypeForNEON(MVT::v2f64); 422 addQRTypeForNEON(MVT::v16i8); 423 addQRTypeForNEON(MVT::v8i16); 424 addQRTypeForNEON(MVT::v4i32); 425 addQRTypeForNEON(MVT::v2i64); 426 427 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 428 // neither Neon nor VFP support any arithmetic operations on it. 429 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 430 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 431 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 432 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 433 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 434 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 435 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 436 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 437 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 438 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 439 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 440 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 441 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 442 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 443 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 444 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 445 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 446 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 447 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 448 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 449 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 450 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 451 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 452 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 453 454 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 455 456 // Neon does not support some operations on v1i64 and v2i64 types. 457 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 458 // Custom handling for some quad-vector types to detect VMULL. 459 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 460 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 461 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 462 // Custom handling for some vector types to avoid expensive expansions 463 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 464 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 465 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 466 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 467 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 468 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 469 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 470 // a destination type that is wider than the source. 471 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 472 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 473 474 setTargetDAGCombine(ISD::INTRINSIC_VOID); 475 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 476 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 477 setTargetDAGCombine(ISD::SHL); 478 setTargetDAGCombine(ISD::SRL); 479 setTargetDAGCombine(ISD::SRA); 480 setTargetDAGCombine(ISD::SIGN_EXTEND); 481 setTargetDAGCombine(ISD::ZERO_EXTEND); 482 setTargetDAGCombine(ISD::ANY_EXTEND); 483 setTargetDAGCombine(ISD::SELECT_CC); 484 setTargetDAGCombine(ISD::BUILD_VECTOR); 485 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 486 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 487 setTargetDAGCombine(ISD::STORE); 488 } 489 490 computeRegisterProperties(); 491 492 // ARM does not have f32 extending load. 493 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 494 495 // ARM does not have i1 sign extending load. 496 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 497 498 // ARM supports all 4 flavors of integer indexed load / store. 499 if (!Subtarget->isThumb1Only()) { 500 for (unsigned im = (unsigned)ISD::PRE_INC; 501 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 502 setIndexedLoadAction(im, MVT::i1, Legal); 503 setIndexedLoadAction(im, MVT::i8, Legal); 504 setIndexedLoadAction(im, MVT::i16, Legal); 505 setIndexedLoadAction(im, MVT::i32, Legal); 506 setIndexedStoreAction(im, MVT::i1, Legal); 507 setIndexedStoreAction(im, MVT::i8, Legal); 508 setIndexedStoreAction(im, MVT::i16, Legal); 509 setIndexedStoreAction(im, MVT::i32, Legal); 510 } 511 } 512 513 // i64 operation support. 514 setOperationAction(ISD::MUL, MVT::i64, Expand); 515 setOperationAction(ISD::MULHU, MVT::i32, Expand); 516 if (Subtarget->isThumb1Only()) { 517 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 518 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 519 } 520 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()) 521 setOperationAction(ISD::MULHS, MVT::i32, Expand); 522 523 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 524 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 525 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 526 setOperationAction(ISD::SRL, MVT::i64, Custom); 527 setOperationAction(ISD::SRA, MVT::i64, Custom); 528 529 // ARM does not have ROTL. 530 setOperationAction(ISD::ROTL, MVT::i32, Expand); 531 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 532 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 533 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 534 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 535 536 // Only ARMv6 has BSWAP. 537 if (!Subtarget->hasV6Ops()) 538 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 539 540 // These are expanded into libcalls. 541 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 542 // v7M has a hardware divider 543 setOperationAction(ISD::SDIV, MVT::i32, Expand); 544 setOperationAction(ISD::UDIV, MVT::i32, Expand); 545 } 546 setOperationAction(ISD::SREM, MVT::i32, Expand); 547 setOperationAction(ISD::UREM, MVT::i32, Expand); 548 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 549 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 550 551 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 552 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 553 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 554 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 555 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 556 557 setOperationAction(ISD::TRAP, MVT::Other, Legal); 558 559 // Use the default implementation. 560 setOperationAction(ISD::VASTART, MVT::Other, Custom); 561 setOperationAction(ISD::VAARG, MVT::Other, Expand); 562 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 563 setOperationAction(ISD::VAEND, MVT::Other, Expand); 564 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 565 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 566 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 567 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 568 setExceptionPointerRegister(ARM::R0); 569 setExceptionSelectorRegister(ARM::R1); 570 571 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 572 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 573 // the default expansion. 574 if (Subtarget->hasDataBarrier() || 575 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 576 // membarrier needs custom lowering; the rest are legal and handled 577 // normally. 578 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 579 } else { 580 // Set them all for expansion, which will force libcalls. 581 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 582 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 583 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 584 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 585 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 586 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 587 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 588 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 589 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 590 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 591 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 592 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 593 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 594 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 595 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 596 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 597 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 598 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 599 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 600 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 601 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 602 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 603 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 604 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 605 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 606 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i8, Expand); 607 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i16, Expand); 608 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 609 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i8, Expand); 610 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i16, Expand); 611 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 612 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i8, Expand); 613 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i16, Expand); 614 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 615 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i8, Expand); 616 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i16, Expand); 617 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 618 // Since the libcalls include locking, fold in the fences 619 setShouldFoldAtomicFences(true); 620 } 621 // 64-bit versions are always libcalls (for now) 622 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 623 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 624 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 625 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 626 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 627 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 628 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 629 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 630 631 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 632 633 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 634 if (!Subtarget->hasV6Ops()) { 635 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 636 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 637 } 638 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 639 640 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 641 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 642 // iff target supports vfp2. 643 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 644 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 645 } 646 647 // We want to custom lower some of our intrinsics. 648 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 649 if (Subtarget->isTargetDarwin()) { 650 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 651 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 652 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 653 } 654 655 setOperationAction(ISD::SETCC, MVT::i32, Expand); 656 setOperationAction(ISD::SETCC, MVT::f32, Expand); 657 setOperationAction(ISD::SETCC, MVT::f64, Expand); 658 setOperationAction(ISD::SELECT, MVT::i32, Custom); 659 setOperationAction(ISD::SELECT, MVT::f32, Custom); 660 setOperationAction(ISD::SELECT, MVT::f64, Custom); 661 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 662 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 663 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 664 665 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 666 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 667 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 668 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 669 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 670 671 // We don't support sin/cos/fmod/copysign/pow 672 setOperationAction(ISD::FSIN, MVT::f64, Expand); 673 setOperationAction(ISD::FSIN, MVT::f32, Expand); 674 setOperationAction(ISD::FCOS, MVT::f32, Expand); 675 setOperationAction(ISD::FCOS, MVT::f64, Expand); 676 setOperationAction(ISD::FREM, MVT::f64, Expand); 677 setOperationAction(ISD::FREM, MVT::f32, Expand); 678 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 679 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 680 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 681 } 682 setOperationAction(ISD::FPOW, MVT::f64, Expand); 683 setOperationAction(ISD::FPOW, MVT::f32, Expand); 684 685 // Various VFP goodness 686 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 687 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 688 if (Subtarget->hasVFP2()) { 689 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 690 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 691 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 692 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 693 } 694 // Special handling for half-precision FP. 695 if (!Subtarget->hasFP16()) { 696 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 697 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 698 } 699 } 700 701 // We have target-specific dag combine patterns for the following nodes: 702 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 703 setTargetDAGCombine(ISD::ADD); 704 setTargetDAGCombine(ISD::SUB); 705 setTargetDAGCombine(ISD::MUL); 706 707 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 708 setTargetDAGCombine(ISD::OR); 709 if (Subtarget->hasNEON()) 710 setTargetDAGCombine(ISD::AND); 711 712 setStackPointerRegisterToSaveRestore(ARM::SP); 713 714 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 715 setSchedulingPreference(Sched::RegPressure); 716 else 717 setSchedulingPreference(Sched::Hybrid); 718 719 //// temporary - rewrite interface to use type 720 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 721 722 // On ARM arguments smaller than 4 bytes are extended, so all arguments 723 // are at least 4 bytes aligned. 724 setMinStackArgumentAlignment(4); 725 726 benefitFromCodePlacementOpt = true; 727 728 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 729} 730 731// FIXME: It might make sense to define the representative register class as the 732// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 733// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 734// SPR's representative would be DPR_VFP2. This should work well if register 735// pressure tracking were modified such that a register use would increment the 736// pressure of the register class's representative and all of it's super 737// classes' representatives transitively. We have not implemented this because 738// of the difficulty prior to coalescing of modeling operand register classes 739// due to the common occurrence of cross class copies and subregister insertions 740// and extractions. 741std::pair<const TargetRegisterClass*, uint8_t> 742ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 743 const TargetRegisterClass *RRC = 0; 744 uint8_t Cost = 1; 745 switch (VT.getSimpleVT().SimpleTy) { 746 default: 747 return TargetLowering::findRepresentativeClass(VT); 748 // Use DPR as representative register class for all floating point 749 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 750 // the cost is 1 for both f32 and f64. 751 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 752 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 753 RRC = ARM::DPRRegisterClass; 754 // When NEON is used for SP, only half of the register file is available 755 // because operations that define both SP and DP results will be constrained 756 // to the VFP2 class (D0-D15). We currently model this constraint prior to 757 // coalescing by double-counting the SP regs. See the FIXME above. 758 if (Subtarget->useNEONForSinglePrecisionFP()) 759 Cost = 2; 760 break; 761 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 762 case MVT::v4f32: case MVT::v2f64: 763 RRC = ARM::DPRRegisterClass; 764 Cost = 2; 765 break; 766 case MVT::v4i64: 767 RRC = ARM::DPRRegisterClass; 768 Cost = 4; 769 break; 770 case MVT::v8i64: 771 RRC = ARM::DPRRegisterClass; 772 Cost = 8; 773 break; 774 } 775 return std::make_pair(RRC, Cost); 776} 777 778const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 779 switch (Opcode) { 780 default: return 0; 781 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 782 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 783 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 784 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 785 case ARMISD::CALL: return "ARMISD::CALL"; 786 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 787 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 788 case ARMISD::tCALL: return "ARMISD::tCALL"; 789 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 790 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 791 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 792 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 793 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 794 case ARMISD::CMP: return "ARMISD::CMP"; 795 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 796 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 797 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 798 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 799 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 800 case ARMISD::CMOV: return "ARMISD::CMOV"; 801 802 case ARMISD::RBIT: return "ARMISD::RBIT"; 803 804 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 805 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 806 case ARMISD::SITOF: return "ARMISD::SITOF"; 807 case ARMISD::UITOF: return "ARMISD::UITOF"; 808 809 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 810 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 811 case ARMISD::RRX: return "ARMISD::RRX"; 812 813 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 814 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 815 816 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 817 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 818 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 819 820 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 821 822 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 823 824 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 825 826 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 827 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 828 829 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 830 831 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 832 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 833 case ARMISD::VCGE: return "ARMISD::VCGE"; 834 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 835 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 836 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 837 case ARMISD::VCGT: return "ARMISD::VCGT"; 838 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 839 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 840 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 841 case ARMISD::VTST: return "ARMISD::VTST"; 842 843 case ARMISD::VSHL: return "ARMISD::VSHL"; 844 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 845 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 846 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 847 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 848 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 849 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 850 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 851 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 852 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 853 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 854 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 855 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 856 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 857 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 858 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 859 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 860 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 861 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 862 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 863 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 864 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 865 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 866 case ARMISD::VDUP: return "ARMISD::VDUP"; 867 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 868 case ARMISD::VEXT: return "ARMISD::VEXT"; 869 case ARMISD::VREV64: return "ARMISD::VREV64"; 870 case ARMISD::VREV32: return "ARMISD::VREV32"; 871 case ARMISD::VREV16: return "ARMISD::VREV16"; 872 case ARMISD::VZIP: return "ARMISD::VZIP"; 873 case ARMISD::VUZP: return "ARMISD::VUZP"; 874 case ARMISD::VTRN: return "ARMISD::VTRN"; 875 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 876 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 877 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 878 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 879 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 880 case ARMISD::FMAX: return "ARMISD::FMAX"; 881 case ARMISD::FMIN: return "ARMISD::FMIN"; 882 case ARMISD::BFI: return "ARMISD::BFI"; 883 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 884 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 885 case ARMISD::VBSL: return "ARMISD::VBSL"; 886 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 887 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 888 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 889 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 890 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 891 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 892 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 893 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 894 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 895 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 896 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 897 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 898 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 899 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 900 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 901 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 902 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 903 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 904 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 905 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 906 } 907} 908 909/// getRegClassFor - Return the register class that should be used for the 910/// specified value type. 911TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 912 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 913 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 914 // load / store 4 to 8 consecutive D registers. 915 if (Subtarget->hasNEON()) { 916 if (VT == MVT::v4i64) 917 return ARM::QQPRRegisterClass; 918 else if (VT == MVT::v8i64) 919 return ARM::QQQQPRRegisterClass; 920 } 921 return TargetLowering::getRegClassFor(VT); 922} 923 924// Create a fast isel object. 925FastISel * 926ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 927 return ARM::createFastISel(funcInfo); 928} 929 930/// getMaximalGlobalOffset - Returns the maximal possible offset which can 931/// be used for loads / stores from the global. 932unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 933 return (Subtarget->isThumb1Only() ? 127 : 4095); 934} 935 936Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 937 unsigned NumVals = N->getNumValues(); 938 if (!NumVals) 939 return Sched::RegPressure; 940 941 for (unsigned i = 0; i != NumVals; ++i) { 942 EVT VT = N->getValueType(i); 943 if (VT == MVT::Glue || VT == MVT::Other) 944 continue; 945 if (VT.isFloatingPoint() || VT.isVector()) 946 return Sched::Latency; 947 } 948 949 if (!N->isMachineOpcode()) 950 return Sched::RegPressure; 951 952 // Load are scheduled for latency even if there instruction itinerary 953 // is not available. 954 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 955 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 956 957 if (TID.getNumDefs() == 0) 958 return Sched::RegPressure; 959 if (!Itins->isEmpty() && 960 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 961 return Sched::Latency; 962 963 return Sched::RegPressure; 964} 965 966//===----------------------------------------------------------------------===// 967// Lowering Code 968//===----------------------------------------------------------------------===// 969 970/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 971static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 972 switch (CC) { 973 default: llvm_unreachable("Unknown condition code!"); 974 case ISD::SETNE: return ARMCC::NE; 975 case ISD::SETEQ: return ARMCC::EQ; 976 case ISD::SETGT: return ARMCC::GT; 977 case ISD::SETGE: return ARMCC::GE; 978 case ISD::SETLT: return ARMCC::LT; 979 case ISD::SETLE: return ARMCC::LE; 980 case ISD::SETUGT: return ARMCC::HI; 981 case ISD::SETUGE: return ARMCC::HS; 982 case ISD::SETULT: return ARMCC::LO; 983 case ISD::SETULE: return ARMCC::LS; 984 } 985} 986 987/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 988static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 989 ARMCC::CondCodes &CondCode2) { 990 CondCode2 = ARMCC::AL; 991 switch (CC) { 992 default: llvm_unreachable("Unknown FP condition!"); 993 case ISD::SETEQ: 994 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 995 case ISD::SETGT: 996 case ISD::SETOGT: CondCode = ARMCC::GT; break; 997 case ISD::SETGE: 998 case ISD::SETOGE: CondCode = ARMCC::GE; break; 999 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1000 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1001 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1002 case ISD::SETO: CondCode = ARMCC::VC; break; 1003 case ISD::SETUO: CondCode = ARMCC::VS; break; 1004 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1005 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1006 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1007 case ISD::SETLT: 1008 case ISD::SETULT: CondCode = ARMCC::LT; break; 1009 case ISD::SETLE: 1010 case ISD::SETULE: CondCode = ARMCC::LE; break; 1011 case ISD::SETNE: 1012 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1013 } 1014} 1015 1016//===----------------------------------------------------------------------===// 1017// Calling Convention Implementation 1018//===----------------------------------------------------------------------===// 1019 1020#include "ARMGenCallingConv.inc" 1021 1022/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1023/// given CallingConvention value. 1024CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1025 bool Return, 1026 bool isVarArg) const { 1027 switch (CC) { 1028 default: 1029 llvm_unreachable("Unsupported calling convention"); 1030 case CallingConv::Fast: 1031 if (Subtarget->hasVFP2() && !isVarArg) { 1032 if (!Subtarget->isAAPCS_ABI()) 1033 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1034 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1035 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1036 } 1037 // Fallthrough 1038 case CallingConv::C: { 1039 // Use target triple & subtarget features to do actual dispatch. 1040 if (!Subtarget->isAAPCS_ABI()) 1041 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1042 else if (Subtarget->hasVFP2() && 1043 FloatABIType == FloatABI::Hard && !isVarArg) 1044 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1045 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1046 } 1047 case CallingConv::ARM_AAPCS_VFP: 1048 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1049 case CallingConv::ARM_AAPCS: 1050 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1051 case CallingConv::ARM_APCS: 1052 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1053 } 1054} 1055 1056/// LowerCallResult - Lower the result values of a call into the 1057/// appropriate copies out of appropriate physical registers. 1058SDValue 1059ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1060 CallingConv::ID CallConv, bool isVarArg, 1061 const SmallVectorImpl<ISD::InputArg> &Ins, 1062 DebugLoc dl, SelectionDAG &DAG, 1063 SmallVectorImpl<SDValue> &InVals) const { 1064 1065 // Assign locations to each value returned by this call. 1066 SmallVector<CCValAssign, 16> RVLocs; 1067 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1068 RVLocs, *DAG.getContext()); 1069 CCInfo.AnalyzeCallResult(Ins, 1070 CCAssignFnForNode(CallConv, /* Return*/ true, 1071 isVarArg)); 1072 1073 // Copy all of the result registers out of their specified physreg. 1074 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1075 CCValAssign VA = RVLocs[i]; 1076 1077 SDValue Val; 1078 if (VA.needsCustom()) { 1079 // Handle f64 or half of a v2f64. 1080 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1081 InFlag); 1082 Chain = Lo.getValue(1); 1083 InFlag = Lo.getValue(2); 1084 VA = RVLocs[++i]; // skip ahead to next loc 1085 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1086 InFlag); 1087 Chain = Hi.getValue(1); 1088 InFlag = Hi.getValue(2); 1089 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1090 1091 if (VA.getLocVT() == MVT::v2f64) { 1092 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1093 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1094 DAG.getConstant(0, MVT::i32)); 1095 1096 VA = RVLocs[++i]; // skip ahead to next loc 1097 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1098 Chain = Lo.getValue(1); 1099 InFlag = Lo.getValue(2); 1100 VA = RVLocs[++i]; // skip ahead to next loc 1101 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1102 Chain = Hi.getValue(1); 1103 InFlag = Hi.getValue(2); 1104 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1105 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1106 DAG.getConstant(1, MVT::i32)); 1107 } 1108 } else { 1109 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1110 InFlag); 1111 Chain = Val.getValue(1); 1112 InFlag = Val.getValue(2); 1113 } 1114 1115 switch (VA.getLocInfo()) { 1116 default: llvm_unreachable("Unknown loc info!"); 1117 case CCValAssign::Full: break; 1118 case CCValAssign::BCvt: 1119 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1120 break; 1121 } 1122 1123 InVals.push_back(Val); 1124 } 1125 1126 return Chain; 1127} 1128 1129/// LowerMemOpCallTo - Store the argument to the stack. 1130SDValue 1131ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1132 SDValue StackPtr, SDValue Arg, 1133 DebugLoc dl, SelectionDAG &DAG, 1134 const CCValAssign &VA, 1135 ISD::ArgFlagsTy Flags) const { 1136 unsigned LocMemOffset = VA.getLocMemOffset(); 1137 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1138 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1139 return DAG.getStore(Chain, dl, Arg, PtrOff, 1140 MachinePointerInfo::getStack(LocMemOffset), 1141 false, false, 0); 1142} 1143 1144void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1145 SDValue Chain, SDValue &Arg, 1146 RegsToPassVector &RegsToPass, 1147 CCValAssign &VA, CCValAssign &NextVA, 1148 SDValue &StackPtr, 1149 SmallVector<SDValue, 8> &MemOpChains, 1150 ISD::ArgFlagsTy Flags) const { 1151 1152 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1153 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1154 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1155 1156 if (NextVA.isRegLoc()) 1157 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1158 else { 1159 assert(NextVA.isMemLoc()); 1160 if (StackPtr.getNode() == 0) 1161 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1162 1163 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1164 dl, DAG, NextVA, 1165 Flags)); 1166 } 1167} 1168 1169/// LowerCall - Lowering a call into a callseq_start <- 1170/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1171/// nodes. 1172SDValue 1173ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1174 CallingConv::ID CallConv, bool isVarArg, 1175 bool &isTailCall, 1176 const SmallVectorImpl<ISD::OutputArg> &Outs, 1177 const SmallVectorImpl<SDValue> &OutVals, 1178 const SmallVectorImpl<ISD::InputArg> &Ins, 1179 DebugLoc dl, SelectionDAG &DAG, 1180 SmallVectorImpl<SDValue> &InVals) const { 1181 MachineFunction &MF = DAG.getMachineFunction(); 1182 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1183 bool IsSibCall = false; 1184 // Temporarily disable tail calls so things don't break. 1185 if (!EnableARMTailCalls) 1186 isTailCall = false; 1187 if (isTailCall) { 1188 // Check if it's really possible to do a tail call. 1189 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1190 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1191 Outs, OutVals, Ins, DAG); 1192 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1193 // detected sibcalls. 1194 if (isTailCall) { 1195 ++NumTailCalls; 1196 IsSibCall = true; 1197 } 1198 } 1199 1200 // Analyze operands of the call, assigning locations to each operand. 1201 SmallVector<CCValAssign, 16> ArgLocs; 1202 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1203 *DAG.getContext()); 1204 CCInfo.setCallOrPrologue(Call); 1205 CCInfo.AnalyzeCallOperands(Outs, 1206 CCAssignFnForNode(CallConv, /* Return*/ false, 1207 isVarArg)); 1208 1209 // Get a count of how many bytes are to be pushed on the stack. 1210 unsigned NumBytes = CCInfo.getNextStackOffset(); 1211 1212 // For tail calls, memory operands are available in our caller's stack. 1213 if (IsSibCall) 1214 NumBytes = 0; 1215 1216 // Adjust the stack pointer for the new arguments... 1217 // These operations are automatically eliminated by the prolog/epilog pass 1218 if (!IsSibCall) 1219 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1220 1221 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1222 1223 RegsToPassVector RegsToPass; 1224 SmallVector<SDValue, 8> MemOpChains; 1225 1226 // Walk the register/memloc assignments, inserting copies/loads. In the case 1227 // of tail call optimization, arguments are handled later. 1228 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1229 i != e; 1230 ++i, ++realArgIdx) { 1231 CCValAssign &VA = ArgLocs[i]; 1232 SDValue Arg = OutVals[realArgIdx]; 1233 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1234 bool isByVal = Flags.isByVal(); 1235 1236 // Promote the value if needed. 1237 switch (VA.getLocInfo()) { 1238 default: llvm_unreachable("Unknown loc info!"); 1239 case CCValAssign::Full: break; 1240 case CCValAssign::SExt: 1241 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1242 break; 1243 case CCValAssign::ZExt: 1244 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1245 break; 1246 case CCValAssign::AExt: 1247 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1248 break; 1249 case CCValAssign::BCvt: 1250 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1251 break; 1252 } 1253 1254 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1255 if (VA.needsCustom()) { 1256 if (VA.getLocVT() == MVT::v2f64) { 1257 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1258 DAG.getConstant(0, MVT::i32)); 1259 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1260 DAG.getConstant(1, MVT::i32)); 1261 1262 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1263 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1264 1265 VA = ArgLocs[++i]; // skip ahead to next loc 1266 if (VA.isRegLoc()) { 1267 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1268 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1269 } else { 1270 assert(VA.isMemLoc()); 1271 1272 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1273 dl, DAG, VA, Flags)); 1274 } 1275 } else { 1276 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1277 StackPtr, MemOpChains, Flags); 1278 } 1279 } else if (VA.isRegLoc()) { 1280 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1281 } else if (isByVal) { 1282 assert(VA.isMemLoc()); 1283 unsigned offset = 0; 1284 1285 // True if this byval aggregate will be split between registers 1286 // and memory. 1287 if (CCInfo.isFirstByValRegValid()) { 1288 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1289 unsigned int i, j; 1290 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1291 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1292 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1293 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1294 MachinePointerInfo(), 1295 false, false, 0); 1296 MemOpChains.push_back(Load.getValue(1)); 1297 RegsToPass.push_back(std::make_pair(j, Load)); 1298 } 1299 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1300 CCInfo.clearFirstByValReg(); 1301 } 1302 1303 unsigned LocMemOffset = VA.getLocMemOffset(); 1304 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1305 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1306 StkPtrOff); 1307 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1308 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1309 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1310 MVT::i32); 1311 MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 1312 Flags.getByValAlign(), 1313 /*isVolatile=*/false, 1314 /*AlwaysInline=*/false, 1315 MachinePointerInfo(0), 1316 MachinePointerInfo(0))); 1317 1318 } else if (!IsSibCall) { 1319 assert(VA.isMemLoc()); 1320 1321 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1322 dl, DAG, VA, Flags)); 1323 } 1324 } 1325 1326 if (!MemOpChains.empty()) 1327 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1328 &MemOpChains[0], MemOpChains.size()); 1329 1330 // Build a sequence of copy-to-reg nodes chained together with token chain 1331 // and flag operands which copy the outgoing args into the appropriate regs. 1332 SDValue InFlag; 1333 // Tail call byval lowering might overwrite argument registers so in case of 1334 // tail call optimization the copies to registers are lowered later. 1335 if (!isTailCall) 1336 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1337 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1338 RegsToPass[i].second, InFlag); 1339 InFlag = Chain.getValue(1); 1340 } 1341 1342 // For tail calls lower the arguments to the 'real' stack slot. 1343 if (isTailCall) { 1344 // Force all the incoming stack arguments to be loaded from the stack 1345 // before any new outgoing arguments are stored to the stack, because the 1346 // outgoing stack slots may alias the incoming argument stack slots, and 1347 // the alias isn't otherwise explicit. This is slightly more conservative 1348 // than necessary, because it means that each store effectively depends 1349 // on every argument instead of just those arguments it would clobber. 1350 1351 // Do not flag preceding copytoreg stuff together with the following stuff. 1352 InFlag = SDValue(); 1353 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1354 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1355 RegsToPass[i].second, InFlag); 1356 InFlag = Chain.getValue(1); 1357 } 1358 InFlag =SDValue(); 1359 } 1360 1361 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1362 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1363 // node so that legalize doesn't hack it. 1364 bool isDirect = false; 1365 bool isARMFunc = false; 1366 bool isLocalARMFunc = false; 1367 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1368 1369 if (EnableARMLongCalls) { 1370 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1371 && "long-calls with non-static relocation model!"); 1372 // Handle a global address or an external symbol. If it's not one of 1373 // those, the target's already in a register, so we don't need to do 1374 // anything extra. 1375 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1376 const GlobalValue *GV = G->getGlobal(); 1377 // Create a constant pool entry for the callee address 1378 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1379 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1380 ARMPCLabelIndex, 1381 ARMCP::CPValue, 0); 1382 // Get the address of the callee into a register 1383 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1384 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1385 Callee = DAG.getLoad(getPointerTy(), dl, 1386 DAG.getEntryNode(), CPAddr, 1387 MachinePointerInfo::getConstantPool(), 1388 false, false, 0); 1389 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1390 const char *Sym = S->getSymbol(); 1391 1392 // Create a constant pool entry for the callee address 1393 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1394 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1395 Sym, ARMPCLabelIndex, 0); 1396 // Get the address of the callee into a register 1397 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1398 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1399 Callee = DAG.getLoad(getPointerTy(), dl, 1400 DAG.getEntryNode(), CPAddr, 1401 MachinePointerInfo::getConstantPool(), 1402 false, false, 0); 1403 } 1404 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1405 const GlobalValue *GV = G->getGlobal(); 1406 isDirect = true; 1407 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1408 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1409 getTargetMachine().getRelocationModel() != Reloc::Static; 1410 isARMFunc = !Subtarget->isThumb() || isStub; 1411 // ARM call to a local ARM function is predicable. 1412 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1413 // tBX takes a register source operand. 1414 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1415 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1416 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1417 ARMPCLabelIndex, 1418 ARMCP::CPValue, 4); 1419 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1420 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1421 Callee = DAG.getLoad(getPointerTy(), dl, 1422 DAG.getEntryNode(), CPAddr, 1423 MachinePointerInfo::getConstantPool(), 1424 false, false, 0); 1425 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1426 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1427 getPointerTy(), Callee, PICLabel); 1428 } else { 1429 // On ELF targets for PIC code, direct calls should go through the PLT 1430 unsigned OpFlags = 0; 1431 if (Subtarget->isTargetELF() && 1432 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1433 OpFlags = ARMII::MO_PLT; 1434 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1435 } 1436 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1437 isDirect = true; 1438 bool isStub = Subtarget->isTargetDarwin() && 1439 getTargetMachine().getRelocationModel() != Reloc::Static; 1440 isARMFunc = !Subtarget->isThumb() || isStub; 1441 // tBX takes a register source operand. 1442 const char *Sym = S->getSymbol(); 1443 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1444 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1445 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1446 Sym, ARMPCLabelIndex, 4); 1447 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1448 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1449 Callee = DAG.getLoad(getPointerTy(), dl, 1450 DAG.getEntryNode(), CPAddr, 1451 MachinePointerInfo::getConstantPool(), 1452 false, false, 0); 1453 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1454 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1455 getPointerTy(), Callee, PICLabel); 1456 } else { 1457 unsigned OpFlags = 0; 1458 // On ELF targets for PIC code, direct calls should go through the PLT 1459 if (Subtarget->isTargetELF() && 1460 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1461 OpFlags = ARMII::MO_PLT; 1462 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1463 } 1464 } 1465 1466 // FIXME: handle tail calls differently. 1467 unsigned CallOpc; 1468 if (Subtarget->isThumb()) { 1469 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1470 CallOpc = ARMISD::CALL_NOLINK; 1471 else 1472 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1473 } else { 1474 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1475 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1476 : ARMISD::CALL_NOLINK; 1477 } 1478 1479 std::vector<SDValue> Ops; 1480 Ops.push_back(Chain); 1481 Ops.push_back(Callee); 1482 1483 // Add argument registers to the end of the list so that they are known live 1484 // into the call. 1485 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1486 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1487 RegsToPass[i].second.getValueType())); 1488 1489 if (InFlag.getNode()) 1490 Ops.push_back(InFlag); 1491 1492 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1493 if (isTailCall) 1494 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1495 1496 // Returns a chain and a flag for retval copy to use. 1497 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1498 InFlag = Chain.getValue(1); 1499 1500 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1501 DAG.getIntPtrConstant(0, true), InFlag); 1502 if (!Ins.empty()) 1503 InFlag = Chain.getValue(1); 1504 1505 // Handle result values, copying them out of physregs into vregs that we 1506 // return. 1507 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1508 dl, DAG, InVals); 1509} 1510 1511/// HandleByVal - Every parameter *after* a byval parameter is passed 1512/// on the stack. Remember the next parameter register to allocate, 1513/// and then confiscate the rest of the parameter registers to insure 1514/// this. 1515void 1516llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { 1517 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1518 assert((State->getCallOrPrologue() == Prologue || 1519 State->getCallOrPrologue() == Call) && 1520 "unhandled ParmContext"); 1521 if ((!State->isFirstByValRegValid()) && 1522 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1523 State->setFirstByValReg(reg); 1524 // At a call site, a byval parameter that is split between 1525 // registers and memory needs its size truncated here. In a 1526 // function prologue, such byval parameters are reassembled in 1527 // memory, and are not truncated. 1528 if (State->getCallOrPrologue() == Call) { 1529 unsigned excess = 4 * (ARM::R4 - reg); 1530 assert(size >= excess && "expected larger existing stack allocation"); 1531 size -= excess; 1532 } 1533 } 1534 // Confiscate any remaining parameter registers to preclude their 1535 // assignment to subsequent parameters. 1536 while (State->AllocateReg(GPRArgRegs, 4)) 1537 ; 1538} 1539 1540/// MatchingStackOffset - Return true if the given stack call argument is 1541/// already available in the same position (relatively) of the caller's 1542/// incoming argument stack. 1543static 1544bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1545 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1546 const ARMInstrInfo *TII) { 1547 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1548 int FI = INT_MAX; 1549 if (Arg.getOpcode() == ISD::CopyFromReg) { 1550 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1551 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1552 return false; 1553 MachineInstr *Def = MRI->getVRegDef(VR); 1554 if (!Def) 1555 return false; 1556 if (!Flags.isByVal()) { 1557 if (!TII->isLoadFromStackSlot(Def, FI)) 1558 return false; 1559 } else { 1560 return false; 1561 } 1562 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1563 if (Flags.isByVal()) 1564 // ByVal argument is passed in as a pointer but it's now being 1565 // dereferenced. e.g. 1566 // define @foo(%struct.X* %A) { 1567 // tail call @bar(%struct.X* byval %A) 1568 // } 1569 return false; 1570 SDValue Ptr = Ld->getBasePtr(); 1571 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1572 if (!FINode) 1573 return false; 1574 FI = FINode->getIndex(); 1575 } else 1576 return false; 1577 1578 assert(FI != INT_MAX); 1579 if (!MFI->isFixedObjectIndex(FI)) 1580 return false; 1581 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1582} 1583 1584/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1585/// for tail call optimization. Targets which want to do tail call 1586/// optimization should implement this function. 1587bool 1588ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1589 CallingConv::ID CalleeCC, 1590 bool isVarArg, 1591 bool isCalleeStructRet, 1592 bool isCallerStructRet, 1593 const SmallVectorImpl<ISD::OutputArg> &Outs, 1594 const SmallVectorImpl<SDValue> &OutVals, 1595 const SmallVectorImpl<ISD::InputArg> &Ins, 1596 SelectionDAG& DAG) const { 1597 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1598 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1599 bool CCMatch = CallerCC == CalleeCC; 1600 1601 // Look for obvious safe cases to perform tail call optimization that do not 1602 // require ABI changes. This is what gcc calls sibcall. 1603 1604 // Do not sibcall optimize vararg calls unless the call site is not passing 1605 // any arguments. 1606 if (isVarArg && !Outs.empty()) 1607 return false; 1608 1609 // Also avoid sibcall optimization if either caller or callee uses struct 1610 // return semantics. 1611 if (isCalleeStructRet || isCallerStructRet) 1612 return false; 1613 1614 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1615 // emitEpilogue is not ready for them. 1616 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1617 // LR. This means if we need to reload LR, it takes an extra instructions, 1618 // which outweighs the value of the tail call; but here we don't know yet 1619 // whether LR is going to be used. Probably the right approach is to 1620 // generate the tail call here and turn it back into CALL/RET in 1621 // emitEpilogue if LR is used. 1622 1623 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1624 // but we need to make sure there are enough registers; the only valid 1625 // registers are the 4 used for parameters. We don't currently do this 1626 // case. 1627 if (Subtarget->isThumb1Only()) 1628 return false; 1629 1630 // If the calling conventions do not match, then we'd better make sure the 1631 // results are returned in the same way as what the caller expects. 1632 if (!CCMatch) { 1633 SmallVector<CCValAssign, 16> RVLocs1; 1634 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1635 RVLocs1, *DAG.getContext()); 1636 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1637 1638 SmallVector<CCValAssign, 16> RVLocs2; 1639 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1640 RVLocs2, *DAG.getContext()); 1641 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1642 1643 if (RVLocs1.size() != RVLocs2.size()) 1644 return false; 1645 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1646 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1647 return false; 1648 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1649 return false; 1650 if (RVLocs1[i].isRegLoc()) { 1651 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1652 return false; 1653 } else { 1654 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1655 return false; 1656 } 1657 } 1658 } 1659 1660 // If the callee takes no arguments then go on to check the results of the 1661 // call. 1662 if (!Outs.empty()) { 1663 // Check if stack adjustment is needed. For now, do not do this if any 1664 // argument is passed on the stack. 1665 SmallVector<CCValAssign, 16> ArgLocs; 1666 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1667 ArgLocs, *DAG.getContext()); 1668 CCInfo.AnalyzeCallOperands(Outs, 1669 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1670 if (CCInfo.getNextStackOffset()) { 1671 MachineFunction &MF = DAG.getMachineFunction(); 1672 1673 // Check if the arguments are already laid out in the right way as 1674 // the caller's fixed stack objects. 1675 MachineFrameInfo *MFI = MF.getFrameInfo(); 1676 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1677 const ARMInstrInfo *TII = 1678 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1679 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1680 i != e; 1681 ++i, ++realArgIdx) { 1682 CCValAssign &VA = ArgLocs[i]; 1683 EVT RegVT = VA.getLocVT(); 1684 SDValue Arg = OutVals[realArgIdx]; 1685 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1686 if (VA.getLocInfo() == CCValAssign::Indirect) 1687 return false; 1688 if (VA.needsCustom()) { 1689 // f64 and vector types are split into multiple registers or 1690 // register/stack-slot combinations. The types will not match 1691 // the registers; give up on memory f64 refs until we figure 1692 // out what to do about this. 1693 if (!VA.isRegLoc()) 1694 return false; 1695 if (!ArgLocs[++i].isRegLoc()) 1696 return false; 1697 if (RegVT == MVT::v2f64) { 1698 if (!ArgLocs[++i].isRegLoc()) 1699 return false; 1700 if (!ArgLocs[++i].isRegLoc()) 1701 return false; 1702 } 1703 } else if (!VA.isRegLoc()) { 1704 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1705 MFI, MRI, TII)) 1706 return false; 1707 } 1708 } 1709 } 1710 } 1711 1712 return true; 1713} 1714 1715SDValue 1716ARMTargetLowering::LowerReturn(SDValue Chain, 1717 CallingConv::ID CallConv, bool isVarArg, 1718 const SmallVectorImpl<ISD::OutputArg> &Outs, 1719 const SmallVectorImpl<SDValue> &OutVals, 1720 DebugLoc dl, SelectionDAG &DAG) const { 1721 1722 // CCValAssign - represent the assignment of the return value to a location. 1723 SmallVector<CCValAssign, 16> RVLocs; 1724 1725 // CCState - Info about the registers and stack slots. 1726 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1727 *DAG.getContext()); 1728 1729 // Analyze outgoing return values. 1730 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1731 isVarArg)); 1732 1733 // If this is the first return lowered for this function, add 1734 // the regs to the liveout set for the function. 1735 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1736 for (unsigned i = 0; i != RVLocs.size(); ++i) 1737 if (RVLocs[i].isRegLoc()) 1738 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1739 } 1740 1741 SDValue Flag; 1742 1743 // Copy the result values into the output registers. 1744 for (unsigned i = 0, realRVLocIdx = 0; 1745 i != RVLocs.size(); 1746 ++i, ++realRVLocIdx) { 1747 CCValAssign &VA = RVLocs[i]; 1748 assert(VA.isRegLoc() && "Can only return in registers!"); 1749 1750 SDValue Arg = OutVals[realRVLocIdx]; 1751 1752 switch (VA.getLocInfo()) { 1753 default: llvm_unreachable("Unknown loc info!"); 1754 case CCValAssign::Full: break; 1755 case CCValAssign::BCvt: 1756 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1757 break; 1758 } 1759 1760 if (VA.needsCustom()) { 1761 if (VA.getLocVT() == MVT::v2f64) { 1762 // Extract the first half and return it in two registers. 1763 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1764 DAG.getConstant(0, MVT::i32)); 1765 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1766 DAG.getVTList(MVT::i32, MVT::i32), Half); 1767 1768 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1769 Flag = Chain.getValue(1); 1770 VA = RVLocs[++i]; // skip ahead to next loc 1771 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1772 HalfGPRs.getValue(1), Flag); 1773 Flag = Chain.getValue(1); 1774 VA = RVLocs[++i]; // skip ahead to next loc 1775 1776 // Extract the 2nd half and fall through to handle it as an f64 value. 1777 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1778 DAG.getConstant(1, MVT::i32)); 1779 } 1780 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1781 // available. 1782 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1783 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1784 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1785 Flag = Chain.getValue(1); 1786 VA = RVLocs[++i]; // skip ahead to next loc 1787 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1788 Flag); 1789 } else 1790 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1791 1792 // Guarantee that all emitted copies are 1793 // stuck together, avoiding something bad. 1794 Flag = Chain.getValue(1); 1795 } 1796 1797 SDValue result; 1798 if (Flag.getNode()) 1799 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1800 else // Return Void 1801 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1802 1803 return result; 1804} 1805 1806bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1807 if (N->getNumValues() != 1) 1808 return false; 1809 if (!N->hasNUsesOfValue(1, 0)) 1810 return false; 1811 1812 unsigned NumCopies = 0; 1813 SDNode* Copies[2]; 1814 SDNode *Use = *N->use_begin(); 1815 if (Use->getOpcode() == ISD::CopyToReg) { 1816 Copies[NumCopies++] = Use; 1817 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1818 // f64 returned in a pair of GPRs. 1819 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1820 UI != UE; ++UI) { 1821 if (UI->getOpcode() != ISD::CopyToReg) 1822 return false; 1823 Copies[UI.getUse().getResNo()] = *UI; 1824 ++NumCopies; 1825 } 1826 } else if (Use->getOpcode() == ISD::BITCAST) { 1827 // f32 returned in a single GPR. 1828 if (!Use->hasNUsesOfValue(1, 0)) 1829 return false; 1830 Use = *Use->use_begin(); 1831 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1832 return false; 1833 Copies[NumCopies++] = Use; 1834 } else { 1835 return false; 1836 } 1837 1838 if (NumCopies != 1 && NumCopies != 2) 1839 return false; 1840 1841 bool HasRet = false; 1842 for (unsigned i = 0; i < NumCopies; ++i) { 1843 SDNode *Copy = Copies[i]; 1844 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1845 UI != UE; ++UI) { 1846 if (UI->getOpcode() == ISD::CopyToReg) { 1847 SDNode *Use = *UI; 1848 if (Use == Copies[0] || Use == Copies[1]) 1849 continue; 1850 return false; 1851 } 1852 if (UI->getOpcode() != ARMISD::RET_FLAG) 1853 return false; 1854 HasRet = true; 1855 } 1856 } 1857 1858 return HasRet; 1859} 1860 1861bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1862 if (!EnableARMTailCalls) 1863 return false; 1864 1865 if (!CI->isTailCall()) 1866 return false; 1867 1868 return !Subtarget->isThumb1Only(); 1869} 1870 1871// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1872// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1873// one of the above mentioned nodes. It has to be wrapped because otherwise 1874// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1875// be used to form addressing mode. These wrapped nodes will be selected 1876// into MOVi. 1877static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1878 EVT PtrVT = Op.getValueType(); 1879 // FIXME there is no actual debug info here 1880 DebugLoc dl = Op.getDebugLoc(); 1881 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1882 SDValue Res; 1883 if (CP->isMachineConstantPoolEntry()) 1884 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1885 CP->getAlignment()); 1886 else 1887 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1888 CP->getAlignment()); 1889 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1890} 1891 1892unsigned ARMTargetLowering::getJumpTableEncoding() const { 1893 return MachineJumpTableInfo::EK_Inline; 1894} 1895 1896SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1897 SelectionDAG &DAG) const { 1898 MachineFunction &MF = DAG.getMachineFunction(); 1899 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1900 unsigned ARMPCLabelIndex = 0; 1901 DebugLoc DL = Op.getDebugLoc(); 1902 EVT PtrVT = getPointerTy(); 1903 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1904 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1905 SDValue CPAddr; 1906 if (RelocM == Reloc::Static) { 1907 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1908 } else { 1909 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1910 ARMPCLabelIndex = AFI->createPICLabelUId(); 1911 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1912 ARMCP::CPBlockAddress, 1913 PCAdj); 1914 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1915 } 1916 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1917 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1918 MachinePointerInfo::getConstantPool(), 1919 false, false, 0); 1920 if (RelocM == Reloc::Static) 1921 return Result; 1922 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1923 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1924} 1925 1926// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1927SDValue 1928ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1929 SelectionDAG &DAG) const { 1930 DebugLoc dl = GA->getDebugLoc(); 1931 EVT PtrVT = getPointerTy(); 1932 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1933 MachineFunction &MF = DAG.getMachineFunction(); 1934 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1935 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1936 ARMConstantPoolValue *CPV = 1937 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1938 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1939 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1940 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1941 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1942 MachinePointerInfo::getConstantPool(), 1943 false, false, 0); 1944 SDValue Chain = Argument.getValue(1); 1945 1946 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1947 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1948 1949 // call __tls_get_addr. 1950 ArgListTy Args; 1951 ArgListEntry Entry; 1952 Entry.Node = Argument; 1953 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1954 Args.push_back(Entry); 1955 // FIXME: is there useful debug info available here? 1956 std::pair<SDValue, SDValue> CallResult = 1957 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1958 false, false, false, false, 1959 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1960 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1961 return CallResult.first; 1962} 1963 1964// Lower ISD::GlobalTLSAddress using the "initial exec" or 1965// "local exec" model. 1966SDValue 1967ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1968 SelectionDAG &DAG) const { 1969 const GlobalValue *GV = GA->getGlobal(); 1970 DebugLoc dl = GA->getDebugLoc(); 1971 SDValue Offset; 1972 SDValue Chain = DAG.getEntryNode(); 1973 EVT PtrVT = getPointerTy(); 1974 // Get the Thread Pointer 1975 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1976 1977 if (GV->isDeclaration()) { 1978 MachineFunction &MF = DAG.getMachineFunction(); 1979 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1980 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1981 // Initial exec model. 1982 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1983 ARMConstantPoolValue *CPV = 1984 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1985 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true); 1986 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1987 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1988 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1989 MachinePointerInfo::getConstantPool(), 1990 false, false, 0); 1991 Chain = Offset.getValue(1); 1992 1993 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1994 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1995 1996 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1997 MachinePointerInfo::getConstantPool(), 1998 false, false, 0); 1999 } else { 2000 // local exec model 2001 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF); 2002 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2003 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2004 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2005 MachinePointerInfo::getConstantPool(), 2006 false, false, 0); 2007 } 2008 2009 // The address of the thread local variable is the add of the thread 2010 // pointer with the offset of the variable. 2011 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2012} 2013 2014SDValue 2015ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2016 // TODO: implement the "local dynamic" model 2017 assert(Subtarget->isTargetELF() && 2018 "TLS not implemented for non-ELF targets"); 2019 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2020 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 2021 // otherwise use the "Local Exec" TLS Model 2022 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 2023 return LowerToTLSGeneralDynamicModel(GA, DAG); 2024 else 2025 return LowerToTLSExecModels(GA, DAG); 2026} 2027 2028SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2029 SelectionDAG &DAG) const { 2030 EVT PtrVT = getPointerTy(); 2031 DebugLoc dl = Op.getDebugLoc(); 2032 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2033 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2034 if (RelocM == Reloc::PIC_) { 2035 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2036 ARMConstantPoolValue *CPV = 2037 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2038 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2039 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2040 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2041 CPAddr, 2042 MachinePointerInfo::getConstantPool(), 2043 false, false, 0); 2044 SDValue Chain = Result.getValue(1); 2045 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2046 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2047 if (!UseGOTOFF) 2048 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2049 MachinePointerInfo::getGOT(), false, false, 0); 2050 return Result; 2051 } 2052 2053 // If we have T2 ops, we can materialize the address directly via movt/movw 2054 // pair. This is always cheaper. 2055 if (Subtarget->useMovt()) { 2056 ++NumMovwMovt; 2057 // FIXME: Once remat is capable of dealing with instructions with register 2058 // operands, expand this into two nodes. 2059 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2060 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2061 } else { 2062 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2063 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2064 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2065 MachinePointerInfo::getConstantPool(), 2066 false, false, 0); 2067 } 2068} 2069 2070SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2071 SelectionDAG &DAG) const { 2072 EVT PtrVT = getPointerTy(); 2073 DebugLoc dl = Op.getDebugLoc(); 2074 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2075 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2076 MachineFunction &MF = DAG.getMachineFunction(); 2077 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2078 2079 if (Subtarget->useMovt()) { 2080 ++NumMovwMovt; 2081 // FIXME: Once remat is capable of dealing with instructions with register 2082 // operands, expand this into two nodes. 2083 if (RelocM == Reloc::Static) 2084 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2085 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2086 2087 unsigned Wrapper = (RelocM == Reloc::PIC_) 2088 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2089 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2090 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2091 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2092 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2093 MachinePointerInfo::getGOT(), false, false, 0); 2094 return Result; 2095 } 2096 2097 unsigned ARMPCLabelIndex = 0; 2098 SDValue CPAddr; 2099 if (RelocM == Reloc::Static) { 2100 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2101 } else { 2102 ARMPCLabelIndex = AFI->createPICLabelUId(); 2103 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2104 ARMConstantPoolValue *CPV = 2105 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 2106 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2107 } 2108 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2109 2110 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2111 MachinePointerInfo::getConstantPool(), 2112 false, false, 0); 2113 SDValue Chain = Result.getValue(1); 2114 2115 if (RelocM == Reloc::PIC_) { 2116 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2117 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2118 } 2119 2120 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2121 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2122 false, false, 0); 2123 2124 return Result; 2125} 2126 2127SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2128 SelectionDAG &DAG) const { 2129 assert(Subtarget->isTargetELF() && 2130 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2131 MachineFunction &MF = DAG.getMachineFunction(); 2132 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2133 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2134 EVT PtrVT = getPointerTy(); 2135 DebugLoc dl = Op.getDebugLoc(); 2136 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2137 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 2138 "_GLOBAL_OFFSET_TABLE_", 2139 ARMPCLabelIndex, PCAdj); 2140 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2141 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2142 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2143 MachinePointerInfo::getConstantPool(), 2144 false, false, 0); 2145 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2146 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2147} 2148 2149SDValue 2150ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2151 const { 2152 DebugLoc dl = Op.getDebugLoc(); 2153 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2154 Op.getOperand(0)); 2155} 2156 2157SDValue 2158ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2159 DebugLoc dl = Op.getDebugLoc(); 2160 SDValue Val = DAG.getConstant(0, MVT::i32); 2161 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2162 Op.getOperand(1), Val); 2163} 2164 2165SDValue 2166ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2167 DebugLoc dl = Op.getDebugLoc(); 2168 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2169 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2170} 2171 2172SDValue 2173ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2174 const ARMSubtarget *Subtarget) const { 2175 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2176 DebugLoc dl = Op.getDebugLoc(); 2177 switch (IntNo) { 2178 default: return SDValue(); // Don't custom lower most intrinsics. 2179 case Intrinsic::arm_thread_pointer: { 2180 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2181 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2182 } 2183 case Intrinsic::eh_sjlj_lsda: { 2184 MachineFunction &MF = DAG.getMachineFunction(); 2185 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2186 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2187 EVT PtrVT = getPointerTy(); 2188 DebugLoc dl = Op.getDebugLoc(); 2189 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2190 SDValue CPAddr; 2191 unsigned PCAdj = (RelocM != Reloc::PIC_) 2192 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2193 ARMConstantPoolValue *CPV = 2194 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2195 ARMCP::CPLSDA, PCAdj); 2196 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2197 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2198 SDValue Result = 2199 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2200 MachinePointerInfo::getConstantPool(), 2201 false, false, 0); 2202 2203 if (RelocM == Reloc::PIC_) { 2204 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2205 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2206 } 2207 return Result; 2208 } 2209 case Intrinsic::arm_neon_vmulls: 2210 case Intrinsic::arm_neon_vmullu: { 2211 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2212 ? ARMISD::VMULLs : ARMISD::VMULLu; 2213 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2214 Op.getOperand(1), Op.getOperand(2)); 2215 } 2216 } 2217} 2218 2219static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2220 const ARMSubtarget *Subtarget) { 2221 DebugLoc dl = Op.getDebugLoc(); 2222 if (!Subtarget->hasDataBarrier()) { 2223 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2224 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2225 // here. 2226 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2227 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2228 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2229 DAG.getConstant(0, MVT::i32)); 2230 } 2231 2232 SDValue Op5 = Op.getOperand(5); 2233 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2234 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2235 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2236 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2237 2238 ARM_MB::MemBOpt DMBOpt; 2239 if (isDeviceBarrier) 2240 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2241 else 2242 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2243 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2244 DAG.getConstant(DMBOpt, MVT::i32)); 2245} 2246 2247static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2248 const ARMSubtarget *Subtarget) { 2249 // ARM pre v5TE and Thumb1 does not have preload instructions. 2250 if (!(Subtarget->isThumb2() || 2251 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2252 // Just preserve the chain. 2253 return Op.getOperand(0); 2254 2255 DebugLoc dl = Op.getDebugLoc(); 2256 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2257 if (!isRead && 2258 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2259 // ARMv7 with MP extension has PLDW. 2260 return Op.getOperand(0); 2261 2262 if (Subtarget->isThumb()) 2263 // Invert the bits. 2264 isRead = ~isRead & 1; 2265 unsigned isData = Subtarget->isThumb() ? 0 : 1; 2266 2267 // Currently there is no intrinsic that matches pli. 2268 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2269 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2270 DAG.getConstant(isData, MVT::i32)); 2271} 2272 2273static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2274 MachineFunction &MF = DAG.getMachineFunction(); 2275 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2276 2277 // vastart just stores the address of the VarArgsFrameIndex slot into the 2278 // memory location argument. 2279 DebugLoc dl = Op.getDebugLoc(); 2280 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2281 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2282 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2283 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2284 MachinePointerInfo(SV), false, false, 0); 2285} 2286 2287SDValue 2288ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2289 SDValue &Root, SelectionDAG &DAG, 2290 DebugLoc dl) const { 2291 MachineFunction &MF = DAG.getMachineFunction(); 2292 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2293 2294 TargetRegisterClass *RC; 2295 if (AFI->isThumb1OnlyFunction()) 2296 RC = ARM::tGPRRegisterClass; 2297 else 2298 RC = ARM::GPRRegisterClass; 2299 2300 // Transform the arguments stored in physical registers into virtual ones. 2301 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2302 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2303 2304 SDValue ArgValue2; 2305 if (NextVA.isMemLoc()) { 2306 MachineFrameInfo *MFI = MF.getFrameInfo(); 2307 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2308 2309 // Create load node to retrieve arguments from the stack. 2310 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2311 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2312 MachinePointerInfo::getFixedStack(FI), 2313 false, false, 0); 2314 } else { 2315 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2316 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2317 } 2318 2319 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2320} 2321 2322void 2323ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2324 unsigned &VARegSize, unsigned &VARegSaveSize) 2325 const { 2326 unsigned NumGPRs; 2327 if (CCInfo.isFirstByValRegValid()) 2328 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2329 else { 2330 unsigned int firstUnalloced; 2331 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2332 sizeof(GPRArgRegs) / 2333 sizeof(GPRArgRegs[0])); 2334 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2335 } 2336 2337 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2338 VARegSize = NumGPRs * 4; 2339 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2340} 2341 2342// The remaining GPRs hold either the beginning of variable-argument 2343// data, or the beginning of an aggregate passed by value (usuall 2344// byval). Either way, we allocate stack slots adjacent to the data 2345// provided by our caller, and store the unallocated registers there. 2346// If this is a variadic function, the va_list pointer will begin with 2347// these values; otherwise, this reassembles a (byval) structure that 2348// was split between registers and memory. 2349void 2350ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2351 DebugLoc dl, SDValue &Chain, 2352 unsigned ArgOffset) const { 2353 MachineFunction &MF = DAG.getMachineFunction(); 2354 MachineFrameInfo *MFI = MF.getFrameInfo(); 2355 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2356 unsigned firstRegToSaveIndex; 2357 if (CCInfo.isFirstByValRegValid()) 2358 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2359 else { 2360 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2361 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2362 } 2363 2364 unsigned VARegSize, VARegSaveSize; 2365 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2366 if (VARegSaveSize) { 2367 // If this function is vararg, store any remaining integer argument regs 2368 // to their spots on the stack so that they may be loaded by deferencing 2369 // the result of va_next. 2370 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2371 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2372 ArgOffset + VARegSaveSize 2373 - VARegSize, 2374 false)); 2375 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2376 getPointerTy()); 2377 2378 SmallVector<SDValue, 4> MemOps; 2379 for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { 2380 TargetRegisterClass *RC; 2381 if (AFI->isThumb1OnlyFunction()) 2382 RC = ARM::tGPRRegisterClass; 2383 else 2384 RC = ARM::GPRRegisterClass; 2385 2386 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2387 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2388 SDValue Store = 2389 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2390 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2391 false, false, 0); 2392 MemOps.push_back(Store); 2393 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2394 DAG.getConstant(4, getPointerTy())); 2395 } 2396 if (!MemOps.empty()) 2397 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2398 &MemOps[0], MemOps.size()); 2399 } else 2400 // This will point to the next argument passed via stack. 2401 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2402} 2403 2404SDValue 2405ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2406 CallingConv::ID CallConv, bool isVarArg, 2407 const SmallVectorImpl<ISD::InputArg> 2408 &Ins, 2409 DebugLoc dl, SelectionDAG &DAG, 2410 SmallVectorImpl<SDValue> &InVals) 2411 const { 2412 MachineFunction &MF = DAG.getMachineFunction(); 2413 MachineFrameInfo *MFI = MF.getFrameInfo(); 2414 2415 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2416 2417 // Assign locations to all of the incoming arguments. 2418 SmallVector<CCValAssign, 16> ArgLocs; 2419 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2420 *DAG.getContext()); 2421 CCInfo.setCallOrPrologue(Prologue); 2422 CCInfo.AnalyzeFormalArguments(Ins, 2423 CCAssignFnForNode(CallConv, /* Return*/ false, 2424 isVarArg)); 2425 2426 SmallVector<SDValue, 16> ArgValues; 2427 int lastInsIndex = -1; 2428 2429 SDValue ArgValue; 2430 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2431 CCValAssign &VA = ArgLocs[i]; 2432 2433 // Arguments stored in registers. 2434 if (VA.isRegLoc()) { 2435 EVT RegVT = VA.getLocVT(); 2436 2437 if (VA.needsCustom()) { 2438 // f64 and vector types are split up into multiple registers or 2439 // combinations of registers and stack slots. 2440 if (VA.getLocVT() == MVT::v2f64) { 2441 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2442 Chain, DAG, dl); 2443 VA = ArgLocs[++i]; // skip ahead to next loc 2444 SDValue ArgValue2; 2445 if (VA.isMemLoc()) { 2446 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2447 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2448 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2449 MachinePointerInfo::getFixedStack(FI), 2450 false, false, 0); 2451 } else { 2452 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2453 Chain, DAG, dl); 2454 } 2455 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2456 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2457 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2458 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2459 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2460 } else 2461 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2462 2463 } else { 2464 TargetRegisterClass *RC; 2465 2466 if (RegVT == MVT::f32) 2467 RC = ARM::SPRRegisterClass; 2468 else if (RegVT == MVT::f64) 2469 RC = ARM::DPRRegisterClass; 2470 else if (RegVT == MVT::v2f64) 2471 RC = ARM::QPRRegisterClass; 2472 else if (RegVT == MVT::i32) 2473 RC = (AFI->isThumb1OnlyFunction() ? 2474 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2475 else 2476 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2477 2478 // Transform the arguments in physical registers into virtual ones. 2479 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2480 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2481 } 2482 2483 // If this is an 8 or 16-bit value, it is really passed promoted 2484 // to 32 bits. Insert an assert[sz]ext to capture this, then 2485 // truncate to the right size. 2486 switch (VA.getLocInfo()) { 2487 default: llvm_unreachable("Unknown loc info!"); 2488 case CCValAssign::Full: break; 2489 case CCValAssign::BCvt: 2490 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2491 break; 2492 case CCValAssign::SExt: 2493 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2494 DAG.getValueType(VA.getValVT())); 2495 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2496 break; 2497 case CCValAssign::ZExt: 2498 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2499 DAG.getValueType(VA.getValVT())); 2500 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2501 break; 2502 } 2503 2504 InVals.push_back(ArgValue); 2505 2506 } else { // VA.isRegLoc() 2507 2508 // sanity check 2509 assert(VA.isMemLoc()); 2510 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2511 2512 int index = ArgLocs[i].getValNo(); 2513 2514 // Some Ins[] entries become multiple ArgLoc[] entries. 2515 // Process them only once. 2516 if (index != lastInsIndex) 2517 { 2518 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2519 // FIXME: For now, all byval parameter objects are marked mutable. 2520 // This can be changed with more analysis. 2521 // In case of tail call optimization mark all arguments mutable. 2522 // Since they could be overwritten by lowering of arguments in case of 2523 // a tail call. 2524 if (Flags.isByVal()) { 2525 unsigned VARegSize, VARegSaveSize; 2526 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2527 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); 2528 unsigned Bytes = Flags.getByValSize() - VARegSize; 2529 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2530 int FI = MFI->CreateFixedObject(Bytes, 2531 VA.getLocMemOffset(), false); 2532 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2533 } else { 2534 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2535 VA.getLocMemOffset(), true); 2536 2537 // Create load nodes to retrieve arguments from the stack. 2538 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2539 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2540 MachinePointerInfo::getFixedStack(FI), 2541 false, false, 0)); 2542 } 2543 lastInsIndex = index; 2544 } 2545 } 2546 } 2547 2548 // varargs 2549 if (isVarArg) 2550 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); 2551 2552 return Chain; 2553} 2554 2555/// isFloatingPointZero - Return true if this is +0.0. 2556static bool isFloatingPointZero(SDValue Op) { 2557 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2558 return CFP->getValueAPF().isPosZero(); 2559 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2560 // Maybe this has already been legalized into the constant pool? 2561 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2562 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2563 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2564 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2565 return CFP->getValueAPF().isPosZero(); 2566 } 2567 } 2568 return false; 2569} 2570 2571/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2572/// the given operands. 2573SDValue 2574ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2575 SDValue &ARMcc, SelectionDAG &DAG, 2576 DebugLoc dl) const { 2577 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2578 unsigned C = RHSC->getZExtValue(); 2579 if (!isLegalICmpImmediate(C)) { 2580 // Constant does not fit, try adjusting it by one? 2581 switch (CC) { 2582 default: break; 2583 case ISD::SETLT: 2584 case ISD::SETGE: 2585 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2586 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2587 RHS = DAG.getConstant(C-1, MVT::i32); 2588 } 2589 break; 2590 case ISD::SETULT: 2591 case ISD::SETUGE: 2592 if (C != 0 && isLegalICmpImmediate(C-1)) { 2593 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2594 RHS = DAG.getConstant(C-1, MVT::i32); 2595 } 2596 break; 2597 case ISD::SETLE: 2598 case ISD::SETGT: 2599 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2600 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2601 RHS = DAG.getConstant(C+1, MVT::i32); 2602 } 2603 break; 2604 case ISD::SETULE: 2605 case ISD::SETUGT: 2606 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2607 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2608 RHS = DAG.getConstant(C+1, MVT::i32); 2609 } 2610 break; 2611 } 2612 } 2613 } 2614 2615 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2616 ARMISD::NodeType CompareType; 2617 switch (CondCode) { 2618 default: 2619 CompareType = ARMISD::CMP; 2620 break; 2621 case ARMCC::EQ: 2622 case ARMCC::NE: 2623 // Uses only Z Flag 2624 CompareType = ARMISD::CMPZ; 2625 break; 2626 } 2627 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2628 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2629} 2630 2631/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2632SDValue 2633ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2634 DebugLoc dl) const { 2635 SDValue Cmp; 2636 if (!isFloatingPointZero(RHS)) 2637 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2638 else 2639 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2640 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2641} 2642 2643/// duplicateCmp - Glue values can have only one use, so this function 2644/// duplicates a comparison node. 2645SDValue 2646ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2647 unsigned Opc = Cmp.getOpcode(); 2648 DebugLoc DL = Cmp.getDebugLoc(); 2649 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2650 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2651 2652 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2653 Cmp = Cmp.getOperand(0); 2654 Opc = Cmp.getOpcode(); 2655 if (Opc == ARMISD::CMPFP) 2656 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2657 else { 2658 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2659 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2660 } 2661 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2662} 2663 2664SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2665 SDValue Cond = Op.getOperand(0); 2666 SDValue SelectTrue = Op.getOperand(1); 2667 SDValue SelectFalse = Op.getOperand(2); 2668 DebugLoc dl = Op.getDebugLoc(); 2669 2670 // Convert: 2671 // 2672 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2673 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2674 // 2675 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2676 const ConstantSDNode *CMOVTrue = 2677 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2678 const ConstantSDNode *CMOVFalse = 2679 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2680 2681 if (CMOVTrue && CMOVFalse) { 2682 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2683 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2684 2685 SDValue True; 2686 SDValue False; 2687 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2688 True = SelectTrue; 2689 False = SelectFalse; 2690 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2691 True = SelectFalse; 2692 False = SelectTrue; 2693 } 2694 2695 if (True.getNode() && False.getNode()) { 2696 EVT VT = Cond.getValueType(); 2697 SDValue ARMcc = Cond.getOperand(2); 2698 SDValue CCR = Cond.getOperand(3); 2699 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2700 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2701 } 2702 } 2703 } 2704 2705 return DAG.getSelectCC(dl, Cond, 2706 DAG.getConstant(0, Cond.getValueType()), 2707 SelectTrue, SelectFalse, ISD::SETNE); 2708} 2709 2710SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2711 EVT VT = Op.getValueType(); 2712 SDValue LHS = Op.getOperand(0); 2713 SDValue RHS = Op.getOperand(1); 2714 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2715 SDValue TrueVal = Op.getOperand(2); 2716 SDValue FalseVal = Op.getOperand(3); 2717 DebugLoc dl = Op.getDebugLoc(); 2718 2719 if (LHS.getValueType() == MVT::i32) { 2720 SDValue ARMcc; 2721 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2722 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2723 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2724 } 2725 2726 ARMCC::CondCodes CondCode, CondCode2; 2727 FPCCToARMCC(CC, CondCode, CondCode2); 2728 2729 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2730 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2731 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2732 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2733 ARMcc, CCR, Cmp); 2734 if (CondCode2 != ARMCC::AL) { 2735 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2736 // FIXME: Needs another CMP because flag can have but one use. 2737 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2738 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2739 Result, TrueVal, ARMcc2, CCR, Cmp2); 2740 } 2741 return Result; 2742} 2743 2744/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2745/// to morph to an integer compare sequence. 2746static bool canChangeToInt(SDValue Op, bool &SeenZero, 2747 const ARMSubtarget *Subtarget) { 2748 SDNode *N = Op.getNode(); 2749 if (!N->hasOneUse()) 2750 // Otherwise it requires moving the value from fp to integer registers. 2751 return false; 2752 if (!N->getNumValues()) 2753 return false; 2754 EVT VT = Op.getValueType(); 2755 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2756 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2757 // vmrs are very slow, e.g. cortex-a8. 2758 return false; 2759 2760 if (isFloatingPointZero(Op)) { 2761 SeenZero = true; 2762 return true; 2763 } 2764 return ISD::isNormalLoad(N); 2765} 2766 2767static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2768 if (isFloatingPointZero(Op)) 2769 return DAG.getConstant(0, MVT::i32); 2770 2771 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2772 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2773 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2774 Ld->isVolatile(), Ld->isNonTemporal(), 2775 Ld->getAlignment()); 2776 2777 llvm_unreachable("Unknown VFP cmp argument!"); 2778} 2779 2780static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2781 SDValue &RetVal1, SDValue &RetVal2) { 2782 if (isFloatingPointZero(Op)) { 2783 RetVal1 = DAG.getConstant(0, MVT::i32); 2784 RetVal2 = DAG.getConstant(0, MVT::i32); 2785 return; 2786 } 2787 2788 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2789 SDValue Ptr = Ld->getBasePtr(); 2790 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2791 Ld->getChain(), Ptr, 2792 Ld->getPointerInfo(), 2793 Ld->isVolatile(), Ld->isNonTemporal(), 2794 Ld->getAlignment()); 2795 2796 EVT PtrType = Ptr.getValueType(); 2797 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2798 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2799 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2800 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2801 Ld->getChain(), NewPtr, 2802 Ld->getPointerInfo().getWithOffset(4), 2803 Ld->isVolatile(), Ld->isNonTemporal(), 2804 NewAlign); 2805 return; 2806 } 2807 2808 llvm_unreachable("Unknown VFP cmp argument!"); 2809} 2810 2811/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2812/// f32 and even f64 comparisons to integer ones. 2813SDValue 2814ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2815 SDValue Chain = Op.getOperand(0); 2816 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2817 SDValue LHS = Op.getOperand(2); 2818 SDValue RHS = Op.getOperand(3); 2819 SDValue Dest = Op.getOperand(4); 2820 DebugLoc dl = Op.getDebugLoc(); 2821 2822 bool SeenZero = false; 2823 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2824 canChangeToInt(RHS, SeenZero, Subtarget) && 2825 // If one of the operand is zero, it's safe to ignore the NaN case since 2826 // we only care about equality comparisons. 2827 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2828 // If unsafe fp math optimization is enabled and there are no other uses of 2829 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2830 // to an integer comparison. 2831 if (CC == ISD::SETOEQ) 2832 CC = ISD::SETEQ; 2833 else if (CC == ISD::SETUNE) 2834 CC = ISD::SETNE; 2835 2836 SDValue ARMcc; 2837 if (LHS.getValueType() == MVT::f32) { 2838 LHS = bitcastf32Toi32(LHS, DAG); 2839 RHS = bitcastf32Toi32(RHS, DAG); 2840 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2841 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2842 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2843 Chain, Dest, ARMcc, CCR, Cmp); 2844 } 2845 2846 SDValue LHS1, LHS2; 2847 SDValue RHS1, RHS2; 2848 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2849 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2850 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2851 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2852 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2853 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2854 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2855 } 2856 2857 return SDValue(); 2858} 2859 2860SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2861 SDValue Chain = Op.getOperand(0); 2862 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2863 SDValue LHS = Op.getOperand(2); 2864 SDValue RHS = Op.getOperand(3); 2865 SDValue Dest = Op.getOperand(4); 2866 DebugLoc dl = Op.getDebugLoc(); 2867 2868 if (LHS.getValueType() == MVT::i32) { 2869 SDValue ARMcc; 2870 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2871 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2872 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2873 Chain, Dest, ARMcc, CCR, Cmp); 2874 } 2875 2876 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2877 2878 if (UnsafeFPMath && 2879 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2880 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2881 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2882 if (Result.getNode()) 2883 return Result; 2884 } 2885 2886 ARMCC::CondCodes CondCode, CondCode2; 2887 FPCCToARMCC(CC, CondCode, CondCode2); 2888 2889 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2890 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2891 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2892 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2893 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2894 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2895 if (CondCode2 != ARMCC::AL) { 2896 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2897 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2898 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2899 } 2900 return Res; 2901} 2902 2903SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2904 SDValue Chain = Op.getOperand(0); 2905 SDValue Table = Op.getOperand(1); 2906 SDValue Index = Op.getOperand(2); 2907 DebugLoc dl = Op.getDebugLoc(); 2908 2909 EVT PTy = getPointerTy(); 2910 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2911 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2912 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2913 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2914 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2915 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2916 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2917 if (Subtarget->isThumb2()) { 2918 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2919 // which does another jump to the destination. This also makes it easier 2920 // to translate it to TBB / TBH later. 2921 // FIXME: This might not work if the function is extremely large. 2922 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2923 Addr, Op.getOperand(2), JTI, UId); 2924 } 2925 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2926 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2927 MachinePointerInfo::getJumpTable(), 2928 false, false, 0); 2929 Chain = Addr.getValue(1); 2930 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2931 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2932 } else { 2933 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2934 MachinePointerInfo::getJumpTable(), false, false, 0); 2935 Chain = Addr.getValue(1); 2936 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2937 } 2938} 2939 2940static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2941 DebugLoc dl = Op.getDebugLoc(); 2942 unsigned Opc; 2943 2944 switch (Op.getOpcode()) { 2945 default: 2946 assert(0 && "Invalid opcode!"); 2947 case ISD::FP_TO_SINT: 2948 Opc = ARMISD::FTOSI; 2949 break; 2950 case ISD::FP_TO_UINT: 2951 Opc = ARMISD::FTOUI; 2952 break; 2953 } 2954 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2955 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 2956} 2957 2958static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2959 EVT VT = Op.getValueType(); 2960 DebugLoc dl = Op.getDebugLoc(); 2961 2962 EVT OperandVT = Op.getOperand(0).getValueType(); 2963 assert(OperandVT == MVT::v4i16 && "Invalid type for custom lowering!"); 2964 if (VT != MVT::v4f32) 2965 return DAG.UnrollVectorOp(Op.getNode()); 2966 2967 unsigned CastOpc; 2968 unsigned Opc; 2969 switch (Op.getOpcode()) { 2970 default: 2971 assert(0 && "Invalid opcode!"); 2972 case ISD::SINT_TO_FP: 2973 CastOpc = ISD::SIGN_EXTEND; 2974 Opc = ISD::SINT_TO_FP; 2975 break; 2976 case ISD::UINT_TO_FP: 2977 CastOpc = ISD::ZERO_EXTEND; 2978 Opc = ISD::UINT_TO_FP; 2979 break; 2980 } 2981 2982 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 2983 return DAG.getNode(Opc, dl, VT, Op); 2984} 2985 2986static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2987 EVT VT = Op.getValueType(); 2988 if (VT.isVector()) 2989 return LowerVectorINT_TO_FP(Op, DAG); 2990 2991 DebugLoc dl = Op.getDebugLoc(); 2992 unsigned Opc; 2993 2994 switch (Op.getOpcode()) { 2995 default: 2996 assert(0 && "Invalid opcode!"); 2997 case ISD::SINT_TO_FP: 2998 Opc = ARMISD::SITOF; 2999 break; 3000 case ISD::UINT_TO_FP: 3001 Opc = ARMISD::UITOF; 3002 break; 3003 } 3004 3005 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3006 return DAG.getNode(Opc, dl, VT, Op); 3007} 3008 3009SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3010 // Implement fcopysign with a fabs and a conditional fneg. 3011 SDValue Tmp0 = Op.getOperand(0); 3012 SDValue Tmp1 = Op.getOperand(1); 3013 DebugLoc dl = Op.getDebugLoc(); 3014 EVT VT = Op.getValueType(); 3015 EVT SrcVT = Tmp1.getValueType(); 3016 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3017 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3018 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3019 3020 if (UseNEON) { 3021 // Use VBSL to copy the sign bit. 3022 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3023 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3024 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3025 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3026 if (VT == MVT::f64) 3027 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3028 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3029 DAG.getConstant(32, MVT::i32)); 3030 else /*if (VT == MVT::f32)*/ 3031 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3032 if (SrcVT == MVT::f32) { 3033 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3034 if (VT == MVT::f64) 3035 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3036 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3037 DAG.getConstant(32, MVT::i32)); 3038 } else if (VT == MVT::f32) 3039 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3040 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3041 DAG.getConstant(32, MVT::i32)); 3042 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3043 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3044 3045 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3046 MVT::i32); 3047 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3048 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3049 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3050 3051 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3052 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3053 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3054 if (VT == MVT::f32) { 3055 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3056 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3057 DAG.getConstant(0, MVT::i32)); 3058 } else { 3059 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3060 } 3061 3062 return Res; 3063 } 3064 3065 // Bitcast operand 1 to i32. 3066 if (SrcVT == MVT::f64) 3067 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3068 &Tmp1, 1).getValue(1); 3069 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3070 3071 // Or in the signbit with integer operations. 3072 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3073 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3074 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3075 if (VT == MVT::f32) { 3076 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3077 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3078 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3079 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3080 } 3081 3082 // f64: Or the high part with signbit and then combine two parts. 3083 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3084 &Tmp0, 1); 3085 SDValue Lo = Tmp0.getValue(0); 3086 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3087 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3088 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3089} 3090 3091SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3092 MachineFunction &MF = DAG.getMachineFunction(); 3093 MachineFrameInfo *MFI = MF.getFrameInfo(); 3094 MFI->setReturnAddressIsTaken(true); 3095 3096 EVT VT = Op.getValueType(); 3097 DebugLoc dl = Op.getDebugLoc(); 3098 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3099 if (Depth) { 3100 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3101 SDValue Offset = DAG.getConstant(4, MVT::i32); 3102 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3103 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3104 MachinePointerInfo(), false, false, 0); 3105 } 3106 3107 // Return LR, which contains the return address. Mark it an implicit live-in. 3108 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3109 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3110} 3111 3112SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3113 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3114 MFI->setFrameAddressIsTaken(true); 3115 3116 EVT VT = Op.getValueType(); 3117 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3118 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3119 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3120 ? ARM::R7 : ARM::R11; 3121 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3122 while (Depth--) 3123 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3124 MachinePointerInfo(), 3125 false, false, 0); 3126 return FrameAddr; 3127} 3128 3129/// ExpandBITCAST - If the target supports VFP, this function is called to 3130/// expand a bit convert where either the source or destination type is i64 to 3131/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3132/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3133/// vectors), since the legalizer won't know what to do with that. 3134static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3135 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3136 DebugLoc dl = N->getDebugLoc(); 3137 SDValue Op = N->getOperand(0); 3138 3139 // This function is only supposed to be called for i64 types, either as the 3140 // source or destination of the bit convert. 3141 EVT SrcVT = Op.getValueType(); 3142 EVT DstVT = N->getValueType(0); 3143 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3144 "ExpandBITCAST called for non-i64 type"); 3145 3146 // Turn i64->f64 into VMOVDRR. 3147 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3148 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3149 DAG.getConstant(0, MVT::i32)); 3150 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3151 DAG.getConstant(1, MVT::i32)); 3152 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3153 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3154 } 3155 3156 // Turn f64->i64 into VMOVRRD. 3157 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3158 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3159 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3160 // Merge the pieces into a single i64 value. 3161 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3162 } 3163 3164 return SDValue(); 3165} 3166 3167/// getZeroVector - Returns a vector of specified type with all zero elements. 3168/// Zero vectors are used to represent vector negation and in those cases 3169/// will be implemented with the NEON VNEG instruction. However, VNEG does 3170/// not support i64 elements, so sometimes the zero vectors will need to be 3171/// explicitly constructed. Regardless, use a canonical VMOV to create the 3172/// zero vector. 3173static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3174 assert(VT.isVector() && "Expected a vector type"); 3175 // The canonical modified immediate encoding of a zero vector is....0! 3176 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3177 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3178 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3179 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3180} 3181 3182/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3183/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3184SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3185 SelectionDAG &DAG) const { 3186 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3187 EVT VT = Op.getValueType(); 3188 unsigned VTBits = VT.getSizeInBits(); 3189 DebugLoc dl = Op.getDebugLoc(); 3190 SDValue ShOpLo = Op.getOperand(0); 3191 SDValue ShOpHi = Op.getOperand(1); 3192 SDValue ShAmt = Op.getOperand(2); 3193 SDValue ARMcc; 3194 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3195 3196 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3197 3198 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3199 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3200 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3201 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3202 DAG.getConstant(VTBits, MVT::i32)); 3203 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3204 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3205 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3206 3207 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3208 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3209 ARMcc, DAG, dl); 3210 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3211 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3212 CCR, Cmp); 3213 3214 SDValue Ops[2] = { Lo, Hi }; 3215 return DAG.getMergeValues(Ops, 2, dl); 3216} 3217 3218/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3219/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3220SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3221 SelectionDAG &DAG) const { 3222 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3223 EVT VT = Op.getValueType(); 3224 unsigned VTBits = VT.getSizeInBits(); 3225 DebugLoc dl = Op.getDebugLoc(); 3226 SDValue ShOpLo = Op.getOperand(0); 3227 SDValue ShOpHi = Op.getOperand(1); 3228 SDValue ShAmt = Op.getOperand(2); 3229 SDValue ARMcc; 3230 3231 assert(Op.getOpcode() == ISD::SHL_PARTS); 3232 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3233 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3234 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3235 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3236 DAG.getConstant(VTBits, MVT::i32)); 3237 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3238 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3239 3240 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3241 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3242 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3243 ARMcc, DAG, dl); 3244 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3245 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3246 CCR, Cmp); 3247 3248 SDValue Ops[2] = { Lo, Hi }; 3249 return DAG.getMergeValues(Ops, 2, dl); 3250} 3251 3252SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3253 SelectionDAG &DAG) const { 3254 // The rounding mode is in bits 23:22 of the FPSCR. 3255 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3256 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3257 // so that the shift + and get folded into a bitfield extract. 3258 DebugLoc dl = Op.getDebugLoc(); 3259 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3260 DAG.getConstant(Intrinsic::arm_get_fpscr, 3261 MVT::i32)); 3262 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3263 DAG.getConstant(1U << 22, MVT::i32)); 3264 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3265 DAG.getConstant(22, MVT::i32)); 3266 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3267 DAG.getConstant(3, MVT::i32)); 3268} 3269 3270static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3271 const ARMSubtarget *ST) { 3272 EVT VT = N->getValueType(0); 3273 DebugLoc dl = N->getDebugLoc(); 3274 3275 if (!ST->hasV6T2Ops()) 3276 return SDValue(); 3277 3278 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3279 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3280} 3281 3282static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3283 const ARMSubtarget *ST) { 3284 EVT VT = N->getValueType(0); 3285 DebugLoc dl = N->getDebugLoc(); 3286 3287 if (!VT.isVector()) 3288 return SDValue(); 3289 3290 // Lower vector shifts on NEON to use VSHL. 3291 assert(ST->hasNEON() && "unexpected vector shift"); 3292 3293 // Left shifts translate directly to the vshiftu intrinsic. 3294 if (N->getOpcode() == ISD::SHL) 3295 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3296 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3297 N->getOperand(0), N->getOperand(1)); 3298 3299 assert((N->getOpcode() == ISD::SRA || 3300 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3301 3302 // NEON uses the same intrinsics for both left and right shifts. For 3303 // right shifts, the shift amounts are negative, so negate the vector of 3304 // shift amounts. 3305 EVT ShiftVT = N->getOperand(1).getValueType(); 3306 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3307 getZeroVector(ShiftVT, DAG, dl), 3308 N->getOperand(1)); 3309 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3310 Intrinsic::arm_neon_vshifts : 3311 Intrinsic::arm_neon_vshiftu); 3312 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3313 DAG.getConstant(vshiftInt, MVT::i32), 3314 N->getOperand(0), NegatedCount); 3315} 3316 3317static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3318 const ARMSubtarget *ST) { 3319 EVT VT = N->getValueType(0); 3320 DebugLoc dl = N->getDebugLoc(); 3321 3322 // We can get here for a node like i32 = ISD::SHL i32, i64 3323 if (VT != MVT::i64) 3324 return SDValue(); 3325 3326 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3327 "Unknown shift to lower!"); 3328 3329 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3330 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3331 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3332 return SDValue(); 3333 3334 // If we are in thumb mode, we don't have RRX. 3335 if (ST->isThumb1Only()) return SDValue(); 3336 3337 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3338 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3339 DAG.getConstant(0, MVT::i32)); 3340 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3341 DAG.getConstant(1, MVT::i32)); 3342 3343 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3344 // captures the result into a carry flag. 3345 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3346 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3347 3348 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3349 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3350 3351 // Merge the pieces into a single i64 value. 3352 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3353} 3354 3355static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3356 SDValue TmpOp0, TmpOp1; 3357 bool Invert = false; 3358 bool Swap = false; 3359 unsigned Opc = 0; 3360 3361 SDValue Op0 = Op.getOperand(0); 3362 SDValue Op1 = Op.getOperand(1); 3363 SDValue CC = Op.getOperand(2); 3364 EVT VT = Op.getValueType(); 3365 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3366 DebugLoc dl = Op.getDebugLoc(); 3367 3368 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3369 switch (SetCCOpcode) { 3370 default: llvm_unreachable("Illegal FP comparison"); break; 3371 case ISD::SETUNE: 3372 case ISD::SETNE: Invert = true; // Fallthrough 3373 case ISD::SETOEQ: 3374 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3375 case ISD::SETOLT: 3376 case ISD::SETLT: Swap = true; // Fallthrough 3377 case ISD::SETOGT: 3378 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3379 case ISD::SETOLE: 3380 case ISD::SETLE: Swap = true; // Fallthrough 3381 case ISD::SETOGE: 3382 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3383 case ISD::SETUGE: Swap = true; // Fallthrough 3384 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3385 case ISD::SETUGT: Swap = true; // Fallthrough 3386 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3387 case ISD::SETUEQ: Invert = true; // Fallthrough 3388 case ISD::SETONE: 3389 // Expand this to (OLT | OGT). 3390 TmpOp0 = Op0; 3391 TmpOp1 = Op1; 3392 Opc = ISD::OR; 3393 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3394 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3395 break; 3396 case ISD::SETUO: Invert = true; // Fallthrough 3397 case ISD::SETO: 3398 // Expand this to (OLT | OGE). 3399 TmpOp0 = Op0; 3400 TmpOp1 = Op1; 3401 Opc = ISD::OR; 3402 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3403 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3404 break; 3405 } 3406 } else { 3407 // Integer comparisons. 3408 switch (SetCCOpcode) { 3409 default: llvm_unreachable("Illegal integer comparison"); break; 3410 case ISD::SETNE: Invert = true; 3411 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3412 case ISD::SETLT: Swap = true; 3413 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3414 case ISD::SETLE: Swap = true; 3415 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3416 case ISD::SETULT: Swap = true; 3417 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3418 case ISD::SETULE: Swap = true; 3419 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3420 } 3421 3422 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3423 if (Opc == ARMISD::VCEQ) { 3424 3425 SDValue AndOp; 3426 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3427 AndOp = Op0; 3428 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3429 AndOp = Op1; 3430 3431 // Ignore bitconvert. 3432 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3433 AndOp = AndOp.getOperand(0); 3434 3435 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3436 Opc = ARMISD::VTST; 3437 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3438 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3439 Invert = !Invert; 3440 } 3441 } 3442 } 3443 3444 if (Swap) 3445 std::swap(Op0, Op1); 3446 3447 // If one of the operands is a constant vector zero, attempt to fold the 3448 // comparison to a specialized compare-against-zero form. 3449 SDValue SingleOp; 3450 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3451 SingleOp = Op0; 3452 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3453 if (Opc == ARMISD::VCGE) 3454 Opc = ARMISD::VCLEZ; 3455 else if (Opc == ARMISD::VCGT) 3456 Opc = ARMISD::VCLTZ; 3457 SingleOp = Op1; 3458 } 3459 3460 SDValue Result; 3461 if (SingleOp.getNode()) { 3462 switch (Opc) { 3463 case ARMISD::VCEQ: 3464 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3465 case ARMISD::VCGE: 3466 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3467 case ARMISD::VCLEZ: 3468 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3469 case ARMISD::VCGT: 3470 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3471 case ARMISD::VCLTZ: 3472 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3473 default: 3474 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3475 } 3476 } else { 3477 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3478 } 3479 3480 if (Invert) 3481 Result = DAG.getNOT(dl, Result, VT); 3482 3483 return Result; 3484} 3485 3486/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3487/// valid vector constant for a NEON instruction with a "modified immediate" 3488/// operand (e.g., VMOV). If so, return the encoded value. 3489static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3490 unsigned SplatBitSize, SelectionDAG &DAG, 3491 EVT &VT, bool is128Bits, NEONModImmType type) { 3492 unsigned OpCmode, Imm; 3493 3494 // SplatBitSize is set to the smallest size that splats the vector, so a 3495 // zero vector will always have SplatBitSize == 8. However, NEON modified 3496 // immediate instructions others than VMOV do not support the 8-bit encoding 3497 // of a zero vector, and the default encoding of zero is supposed to be the 3498 // 32-bit version. 3499 if (SplatBits == 0) 3500 SplatBitSize = 32; 3501 3502 switch (SplatBitSize) { 3503 case 8: 3504 if (type != VMOVModImm) 3505 return SDValue(); 3506 // Any 1-byte value is OK. Op=0, Cmode=1110. 3507 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3508 OpCmode = 0xe; 3509 Imm = SplatBits; 3510 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3511 break; 3512 3513 case 16: 3514 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3515 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3516 if ((SplatBits & ~0xff) == 0) { 3517 // Value = 0x00nn: Op=x, Cmode=100x. 3518 OpCmode = 0x8; 3519 Imm = SplatBits; 3520 break; 3521 } 3522 if ((SplatBits & ~0xff00) == 0) { 3523 // Value = 0xnn00: Op=x, Cmode=101x. 3524 OpCmode = 0xa; 3525 Imm = SplatBits >> 8; 3526 break; 3527 } 3528 return SDValue(); 3529 3530 case 32: 3531 // NEON's 32-bit VMOV supports splat values where: 3532 // * only one byte is nonzero, or 3533 // * the least significant byte is 0xff and the second byte is nonzero, or 3534 // * the least significant 2 bytes are 0xff and the third is nonzero. 3535 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3536 if ((SplatBits & ~0xff) == 0) { 3537 // Value = 0x000000nn: Op=x, Cmode=000x. 3538 OpCmode = 0; 3539 Imm = SplatBits; 3540 break; 3541 } 3542 if ((SplatBits & ~0xff00) == 0) { 3543 // Value = 0x0000nn00: Op=x, Cmode=001x. 3544 OpCmode = 0x2; 3545 Imm = SplatBits >> 8; 3546 break; 3547 } 3548 if ((SplatBits & ~0xff0000) == 0) { 3549 // Value = 0x00nn0000: Op=x, Cmode=010x. 3550 OpCmode = 0x4; 3551 Imm = SplatBits >> 16; 3552 break; 3553 } 3554 if ((SplatBits & ~0xff000000) == 0) { 3555 // Value = 0xnn000000: Op=x, Cmode=011x. 3556 OpCmode = 0x6; 3557 Imm = SplatBits >> 24; 3558 break; 3559 } 3560 3561 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3562 if (type == OtherModImm) return SDValue(); 3563 3564 if ((SplatBits & ~0xffff) == 0 && 3565 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3566 // Value = 0x0000nnff: Op=x, Cmode=1100. 3567 OpCmode = 0xc; 3568 Imm = SplatBits >> 8; 3569 SplatBits |= 0xff; 3570 break; 3571 } 3572 3573 if ((SplatBits & ~0xffffff) == 0 && 3574 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3575 // Value = 0x00nnffff: Op=x, Cmode=1101. 3576 OpCmode = 0xd; 3577 Imm = SplatBits >> 16; 3578 SplatBits |= 0xffff; 3579 break; 3580 } 3581 3582 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3583 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3584 // VMOV.I32. A (very) minor optimization would be to replicate the value 3585 // and fall through here to test for a valid 64-bit splat. But, then the 3586 // caller would also need to check and handle the change in size. 3587 return SDValue(); 3588 3589 case 64: { 3590 if (type != VMOVModImm) 3591 return SDValue(); 3592 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3593 uint64_t BitMask = 0xff; 3594 uint64_t Val = 0; 3595 unsigned ImmMask = 1; 3596 Imm = 0; 3597 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3598 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3599 Val |= BitMask; 3600 Imm |= ImmMask; 3601 } else if ((SplatBits & BitMask) != 0) { 3602 return SDValue(); 3603 } 3604 BitMask <<= 8; 3605 ImmMask <<= 1; 3606 } 3607 // Op=1, Cmode=1110. 3608 OpCmode = 0x1e; 3609 SplatBits = Val; 3610 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3611 break; 3612 } 3613 3614 default: 3615 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3616 return SDValue(); 3617 } 3618 3619 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3620 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3621} 3622 3623static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3624 bool &ReverseVEXT, unsigned &Imm) { 3625 unsigned NumElts = VT.getVectorNumElements(); 3626 ReverseVEXT = false; 3627 3628 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3629 if (M[0] < 0) 3630 return false; 3631 3632 Imm = M[0]; 3633 3634 // If this is a VEXT shuffle, the immediate value is the index of the first 3635 // element. The other shuffle indices must be the successive elements after 3636 // the first one. 3637 unsigned ExpectedElt = Imm; 3638 for (unsigned i = 1; i < NumElts; ++i) { 3639 // Increment the expected index. If it wraps around, it may still be 3640 // a VEXT but the source vectors must be swapped. 3641 ExpectedElt += 1; 3642 if (ExpectedElt == NumElts * 2) { 3643 ExpectedElt = 0; 3644 ReverseVEXT = true; 3645 } 3646 3647 if (M[i] < 0) continue; // ignore UNDEF indices 3648 if (ExpectedElt != static_cast<unsigned>(M[i])) 3649 return false; 3650 } 3651 3652 // Adjust the index value if the source operands will be swapped. 3653 if (ReverseVEXT) 3654 Imm -= NumElts; 3655 3656 return true; 3657} 3658 3659/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3660/// instruction with the specified blocksize. (The order of the elements 3661/// within each block of the vector is reversed.) 3662static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3663 unsigned BlockSize) { 3664 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3665 "Only possible block sizes for VREV are: 16, 32, 64"); 3666 3667 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3668 if (EltSz == 64) 3669 return false; 3670 3671 unsigned NumElts = VT.getVectorNumElements(); 3672 unsigned BlockElts = M[0] + 1; 3673 // If the first shuffle index is UNDEF, be optimistic. 3674 if (M[0] < 0) 3675 BlockElts = BlockSize / EltSz; 3676 3677 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3678 return false; 3679 3680 for (unsigned i = 0; i < NumElts; ++i) { 3681 if (M[i] < 0) continue; // ignore UNDEF indices 3682 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3683 return false; 3684 } 3685 3686 return true; 3687} 3688 3689static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3690 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3691 // range, then 0 is placed into the resulting vector. So pretty much any mask 3692 // of 8 elements can work here. 3693 return VT == MVT::v8i8 && M.size() == 8; 3694} 3695 3696static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3697 unsigned &WhichResult) { 3698 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3699 if (EltSz == 64) 3700 return false; 3701 3702 unsigned NumElts = VT.getVectorNumElements(); 3703 WhichResult = (M[0] == 0 ? 0 : 1); 3704 for (unsigned i = 0; i < NumElts; i += 2) { 3705 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3706 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3707 return false; 3708 } 3709 return true; 3710} 3711 3712/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3713/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3714/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3715static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3716 unsigned &WhichResult) { 3717 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3718 if (EltSz == 64) 3719 return false; 3720 3721 unsigned NumElts = VT.getVectorNumElements(); 3722 WhichResult = (M[0] == 0 ? 0 : 1); 3723 for (unsigned i = 0; i < NumElts; i += 2) { 3724 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3725 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3726 return false; 3727 } 3728 return true; 3729} 3730 3731static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3732 unsigned &WhichResult) { 3733 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3734 if (EltSz == 64) 3735 return false; 3736 3737 unsigned NumElts = VT.getVectorNumElements(); 3738 WhichResult = (M[0] == 0 ? 0 : 1); 3739 for (unsigned i = 0; i != NumElts; ++i) { 3740 if (M[i] < 0) continue; // ignore UNDEF indices 3741 if ((unsigned) M[i] != 2 * i + WhichResult) 3742 return false; 3743 } 3744 3745 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3746 if (VT.is64BitVector() && EltSz == 32) 3747 return false; 3748 3749 return true; 3750} 3751 3752/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3753/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3754/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3755static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3756 unsigned &WhichResult) { 3757 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3758 if (EltSz == 64) 3759 return false; 3760 3761 unsigned Half = VT.getVectorNumElements() / 2; 3762 WhichResult = (M[0] == 0 ? 0 : 1); 3763 for (unsigned j = 0; j != 2; ++j) { 3764 unsigned Idx = WhichResult; 3765 for (unsigned i = 0; i != Half; ++i) { 3766 int MIdx = M[i + j * Half]; 3767 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3768 return false; 3769 Idx += 2; 3770 } 3771 } 3772 3773 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3774 if (VT.is64BitVector() && EltSz == 32) 3775 return false; 3776 3777 return true; 3778} 3779 3780static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3781 unsigned &WhichResult) { 3782 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3783 if (EltSz == 64) 3784 return false; 3785 3786 unsigned NumElts = VT.getVectorNumElements(); 3787 WhichResult = (M[0] == 0 ? 0 : 1); 3788 unsigned Idx = WhichResult * NumElts / 2; 3789 for (unsigned i = 0; i != NumElts; i += 2) { 3790 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3791 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3792 return false; 3793 Idx += 1; 3794 } 3795 3796 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3797 if (VT.is64BitVector() && EltSz == 32) 3798 return false; 3799 3800 return true; 3801} 3802 3803/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3804/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3805/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3806static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3807 unsigned &WhichResult) { 3808 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3809 if (EltSz == 64) 3810 return false; 3811 3812 unsigned NumElts = VT.getVectorNumElements(); 3813 WhichResult = (M[0] == 0 ? 0 : 1); 3814 unsigned Idx = WhichResult * NumElts / 2; 3815 for (unsigned i = 0; i != NumElts; i += 2) { 3816 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3817 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3818 return false; 3819 Idx += 1; 3820 } 3821 3822 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3823 if (VT.is64BitVector() && EltSz == 32) 3824 return false; 3825 3826 return true; 3827} 3828 3829// If N is an integer constant that can be moved into a register in one 3830// instruction, return an SDValue of such a constant (will become a MOV 3831// instruction). Otherwise return null. 3832static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3833 const ARMSubtarget *ST, DebugLoc dl) { 3834 uint64_t Val; 3835 if (!isa<ConstantSDNode>(N)) 3836 return SDValue(); 3837 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3838 3839 if (ST->isThumb1Only()) { 3840 if (Val <= 255 || ~Val <= 255) 3841 return DAG.getConstant(Val, MVT::i32); 3842 } else { 3843 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3844 return DAG.getConstant(Val, MVT::i32); 3845 } 3846 return SDValue(); 3847} 3848 3849// If this is a case we can't handle, return null and let the default 3850// expansion code take care of it. 3851SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3852 const ARMSubtarget *ST) const { 3853 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3854 DebugLoc dl = Op.getDebugLoc(); 3855 EVT VT = Op.getValueType(); 3856 3857 APInt SplatBits, SplatUndef; 3858 unsigned SplatBitSize; 3859 bool HasAnyUndefs; 3860 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3861 if (SplatBitSize <= 64) { 3862 // Check if an immediate VMOV works. 3863 EVT VmovVT; 3864 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3865 SplatUndef.getZExtValue(), SplatBitSize, 3866 DAG, VmovVT, VT.is128BitVector(), 3867 VMOVModImm); 3868 if (Val.getNode()) { 3869 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3870 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3871 } 3872 3873 // Try an immediate VMVN. 3874 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3875 ((1LL << SplatBitSize) - 1)); 3876 Val = isNEONModifiedImm(NegatedImm, 3877 SplatUndef.getZExtValue(), SplatBitSize, 3878 DAG, VmovVT, VT.is128BitVector(), 3879 VMVNModImm); 3880 if (Val.getNode()) { 3881 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3882 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3883 } 3884 } 3885 } 3886 3887 // Scan through the operands to see if only one value is used. 3888 unsigned NumElts = VT.getVectorNumElements(); 3889 bool isOnlyLowElement = true; 3890 bool usesOnlyOneValue = true; 3891 bool isConstant = true; 3892 SDValue Value; 3893 for (unsigned i = 0; i < NumElts; ++i) { 3894 SDValue V = Op.getOperand(i); 3895 if (V.getOpcode() == ISD::UNDEF) 3896 continue; 3897 if (i > 0) 3898 isOnlyLowElement = false; 3899 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3900 isConstant = false; 3901 3902 if (!Value.getNode()) 3903 Value = V; 3904 else if (V != Value) 3905 usesOnlyOneValue = false; 3906 } 3907 3908 if (!Value.getNode()) 3909 return DAG.getUNDEF(VT); 3910 3911 if (isOnlyLowElement) 3912 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3913 3914 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3915 3916 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3917 // i32 and try again. 3918 if (usesOnlyOneValue && EltSize <= 32) { 3919 if (!isConstant) 3920 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3921 if (VT.getVectorElementType().isFloatingPoint()) { 3922 SmallVector<SDValue, 8> Ops; 3923 for (unsigned i = 0; i < NumElts; ++i) 3924 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3925 Op.getOperand(i))); 3926 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3927 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3928 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3929 if (Val.getNode()) 3930 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3931 } 3932 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3933 if (Val.getNode()) 3934 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3935 } 3936 3937 // If all elements are constants and the case above didn't get hit, fall back 3938 // to the default expansion, which will generate a load from the constant 3939 // pool. 3940 if (isConstant) 3941 return SDValue(); 3942 3943 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 3944 if (NumElts >= 4) { 3945 SDValue shuffle = ReconstructShuffle(Op, DAG); 3946 if (shuffle != SDValue()) 3947 return shuffle; 3948 } 3949 3950 // Vectors with 32- or 64-bit elements can be built by directly assigning 3951 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3952 // will be legalized. 3953 if (EltSize >= 32) { 3954 // Do the expansion with floating-point types, since that is what the VFP 3955 // registers are defined to use, and since i64 is not legal. 3956 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3957 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3958 SmallVector<SDValue, 8> Ops; 3959 for (unsigned i = 0; i < NumElts; ++i) 3960 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 3961 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3962 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3963 } 3964 3965 return SDValue(); 3966} 3967 3968// Gather data to see if the operation can be modelled as a 3969// shuffle in combination with VEXTs. 3970SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 3971 SelectionDAG &DAG) const { 3972 DebugLoc dl = Op.getDebugLoc(); 3973 EVT VT = Op.getValueType(); 3974 unsigned NumElts = VT.getVectorNumElements(); 3975 3976 SmallVector<SDValue, 2> SourceVecs; 3977 SmallVector<unsigned, 2> MinElts; 3978 SmallVector<unsigned, 2> MaxElts; 3979 3980 for (unsigned i = 0; i < NumElts; ++i) { 3981 SDValue V = Op.getOperand(i); 3982 if (V.getOpcode() == ISD::UNDEF) 3983 continue; 3984 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 3985 // A shuffle can only come from building a vector from various 3986 // elements of other vectors. 3987 return SDValue(); 3988 } 3989 3990 // Record this extraction against the appropriate vector if possible... 3991 SDValue SourceVec = V.getOperand(0); 3992 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 3993 bool FoundSource = false; 3994 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 3995 if (SourceVecs[j] == SourceVec) { 3996 if (MinElts[j] > EltNo) 3997 MinElts[j] = EltNo; 3998 if (MaxElts[j] < EltNo) 3999 MaxElts[j] = EltNo; 4000 FoundSource = true; 4001 break; 4002 } 4003 } 4004 4005 // Or record a new source if not... 4006 if (!FoundSource) { 4007 SourceVecs.push_back(SourceVec); 4008 MinElts.push_back(EltNo); 4009 MaxElts.push_back(EltNo); 4010 } 4011 } 4012 4013 // Currently only do something sane when at most two source vectors 4014 // involved. 4015 if (SourceVecs.size() > 2) 4016 return SDValue(); 4017 4018 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4019 int VEXTOffsets[2] = {0, 0}; 4020 4021 // This loop extracts the usage patterns of the source vectors 4022 // and prepares appropriate SDValues for a shuffle if possible. 4023 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4024 if (SourceVecs[i].getValueType() == VT) { 4025 // No VEXT necessary 4026 ShuffleSrcs[i] = SourceVecs[i]; 4027 VEXTOffsets[i] = 0; 4028 continue; 4029 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4030 // It probably isn't worth padding out a smaller vector just to 4031 // break it down again in a shuffle. 4032 return SDValue(); 4033 } 4034 4035 // Since only 64-bit and 128-bit vectors are legal on ARM and 4036 // we've eliminated the other cases... 4037 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4038 "unexpected vector sizes in ReconstructShuffle"); 4039 4040 if (MaxElts[i] - MinElts[i] >= NumElts) { 4041 // Span too large for a VEXT to cope 4042 return SDValue(); 4043 } 4044 4045 if (MinElts[i] >= NumElts) { 4046 // The extraction can just take the second half 4047 VEXTOffsets[i] = NumElts; 4048 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4049 SourceVecs[i], 4050 DAG.getIntPtrConstant(NumElts)); 4051 } else if (MaxElts[i] < NumElts) { 4052 // The extraction can just take the first half 4053 VEXTOffsets[i] = 0; 4054 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4055 SourceVecs[i], 4056 DAG.getIntPtrConstant(0)); 4057 } else { 4058 // An actual VEXT is needed 4059 VEXTOffsets[i] = MinElts[i]; 4060 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4061 SourceVecs[i], 4062 DAG.getIntPtrConstant(0)); 4063 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4064 SourceVecs[i], 4065 DAG.getIntPtrConstant(NumElts)); 4066 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4067 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4068 } 4069 } 4070 4071 SmallVector<int, 8> Mask; 4072 4073 for (unsigned i = 0; i < NumElts; ++i) { 4074 SDValue Entry = Op.getOperand(i); 4075 if (Entry.getOpcode() == ISD::UNDEF) { 4076 Mask.push_back(-1); 4077 continue; 4078 } 4079 4080 SDValue ExtractVec = Entry.getOperand(0); 4081 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4082 .getOperand(1))->getSExtValue(); 4083 if (ExtractVec == SourceVecs[0]) { 4084 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4085 } else { 4086 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4087 } 4088 } 4089 4090 // Final check before we try to produce nonsense... 4091 if (isShuffleMaskLegal(Mask, VT)) 4092 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4093 &Mask[0]); 4094 4095 return SDValue(); 4096} 4097 4098/// isShuffleMaskLegal - Targets can use this to indicate that they only 4099/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4100/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4101/// are assumed to be legal. 4102bool 4103ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4104 EVT VT) const { 4105 if (VT.getVectorNumElements() == 4 && 4106 (VT.is128BitVector() || VT.is64BitVector())) { 4107 unsigned PFIndexes[4]; 4108 for (unsigned i = 0; i != 4; ++i) { 4109 if (M[i] < 0) 4110 PFIndexes[i] = 8; 4111 else 4112 PFIndexes[i] = M[i]; 4113 } 4114 4115 // Compute the index in the perfect shuffle table. 4116 unsigned PFTableIndex = 4117 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4118 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4119 unsigned Cost = (PFEntry >> 30); 4120 4121 if (Cost <= 4) 4122 return true; 4123 } 4124 4125 bool ReverseVEXT; 4126 unsigned Imm, WhichResult; 4127 4128 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4129 return (EltSize >= 32 || 4130 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4131 isVREVMask(M, VT, 64) || 4132 isVREVMask(M, VT, 32) || 4133 isVREVMask(M, VT, 16) || 4134 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4135 isVTBLMask(M, VT) || 4136 isVTRNMask(M, VT, WhichResult) || 4137 isVUZPMask(M, VT, WhichResult) || 4138 isVZIPMask(M, VT, WhichResult) || 4139 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4140 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4141 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4142} 4143 4144/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4145/// the specified operations to build the shuffle. 4146static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4147 SDValue RHS, SelectionDAG &DAG, 4148 DebugLoc dl) { 4149 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4150 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4151 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4152 4153 enum { 4154 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4155 OP_VREV, 4156 OP_VDUP0, 4157 OP_VDUP1, 4158 OP_VDUP2, 4159 OP_VDUP3, 4160 OP_VEXT1, 4161 OP_VEXT2, 4162 OP_VEXT3, 4163 OP_VUZPL, // VUZP, left result 4164 OP_VUZPR, // VUZP, right result 4165 OP_VZIPL, // VZIP, left result 4166 OP_VZIPR, // VZIP, right result 4167 OP_VTRNL, // VTRN, left result 4168 OP_VTRNR // VTRN, right result 4169 }; 4170 4171 if (OpNum == OP_COPY) { 4172 if (LHSID == (1*9+2)*9+3) return LHS; 4173 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4174 return RHS; 4175 } 4176 4177 SDValue OpLHS, OpRHS; 4178 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4179 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4180 EVT VT = OpLHS.getValueType(); 4181 4182 switch (OpNum) { 4183 default: llvm_unreachable("Unknown shuffle opcode!"); 4184 case OP_VREV: 4185 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4186 case OP_VDUP0: 4187 case OP_VDUP1: 4188 case OP_VDUP2: 4189 case OP_VDUP3: 4190 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4191 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4192 case OP_VEXT1: 4193 case OP_VEXT2: 4194 case OP_VEXT3: 4195 return DAG.getNode(ARMISD::VEXT, dl, VT, 4196 OpLHS, OpRHS, 4197 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4198 case OP_VUZPL: 4199 case OP_VUZPR: 4200 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4201 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4202 case OP_VZIPL: 4203 case OP_VZIPR: 4204 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4205 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4206 case OP_VTRNL: 4207 case OP_VTRNR: 4208 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4209 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4210 } 4211} 4212 4213static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4214 SmallVectorImpl<int> &ShuffleMask, 4215 SelectionDAG &DAG) { 4216 // Check to see if we can use the VTBL instruction. 4217 SDValue V1 = Op.getOperand(0); 4218 SDValue V2 = Op.getOperand(1); 4219 DebugLoc DL = Op.getDebugLoc(); 4220 4221 SmallVector<SDValue, 8> VTBLMask; 4222 for (SmallVectorImpl<int>::iterator 4223 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4224 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4225 4226 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4227 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4228 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4229 &VTBLMask[0], 8)); 4230 4231 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4232 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4233 &VTBLMask[0], 8)); 4234} 4235 4236static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4237 SDValue V1 = Op.getOperand(0); 4238 SDValue V2 = Op.getOperand(1); 4239 DebugLoc dl = Op.getDebugLoc(); 4240 EVT VT = Op.getValueType(); 4241 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4242 SmallVector<int, 8> ShuffleMask; 4243 4244 // Convert shuffles that are directly supported on NEON to target-specific 4245 // DAG nodes, instead of keeping them as shuffles and matching them again 4246 // during code selection. This is more efficient and avoids the possibility 4247 // of inconsistencies between legalization and selection. 4248 // FIXME: floating-point vectors should be canonicalized to integer vectors 4249 // of the same time so that they get CSEd properly. 4250 SVN->getMask(ShuffleMask); 4251 4252 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4253 if (EltSize <= 32) { 4254 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4255 int Lane = SVN->getSplatIndex(); 4256 // If this is undef splat, generate it via "just" vdup, if possible. 4257 if (Lane == -1) Lane = 0; 4258 4259 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4260 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4261 } 4262 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4263 DAG.getConstant(Lane, MVT::i32)); 4264 } 4265 4266 bool ReverseVEXT; 4267 unsigned Imm; 4268 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4269 if (ReverseVEXT) 4270 std::swap(V1, V2); 4271 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4272 DAG.getConstant(Imm, MVT::i32)); 4273 } 4274 4275 if (isVREVMask(ShuffleMask, VT, 64)) 4276 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4277 if (isVREVMask(ShuffleMask, VT, 32)) 4278 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4279 if (isVREVMask(ShuffleMask, VT, 16)) 4280 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4281 4282 // Check for Neon shuffles that modify both input vectors in place. 4283 // If both results are used, i.e., if there are two shuffles with the same 4284 // source operands and with masks corresponding to both results of one of 4285 // these operations, DAG memoization will ensure that a single node is 4286 // used for both shuffles. 4287 unsigned WhichResult; 4288 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4289 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4290 V1, V2).getValue(WhichResult); 4291 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4292 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4293 V1, V2).getValue(WhichResult); 4294 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4295 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4296 V1, V2).getValue(WhichResult); 4297 4298 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4299 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4300 V1, V1).getValue(WhichResult); 4301 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4302 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4303 V1, V1).getValue(WhichResult); 4304 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4305 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4306 V1, V1).getValue(WhichResult); 4307 } 4308 4309 // If the shuffle is not directly supported and it has 4 elements, use 4310 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4311 unsigned NumElts = VT.getVectorNumElements(); 4312 if (NumElts == 4) { 4313 unsigned PFIndexes[4]; 4314 for (unsigned i = 0; i != 4; ++i) { 4315 if (ShuffleMask[i] < 0) 4316 PFIndexes[i] = 8; 4317 else 4318 PFIndexes[i] = ShuffleMask[i]; 4319 } 4320 4321 // Compute the index in the perfect shuffle table. 4322 unsigned PFTableIndex = 4323 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4324 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4325 unsigned Cost = (PFEntry >> 30); 4326 4327 if (Cost <= 4) 4328 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4329 } 4330 4331 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4332 if (EltSize >= 32) { 4333 // Do the expansion with floating-point types, since that is what the VFP 4334 // registers are defined to use, and since i64 is not legal. 4335 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4336 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4337 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4338 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4339 SmallVector<SDValue, 8> Ops; 4340 for (unsigned i = 0; i < NumElts; ++i) { 4341 if (ShuffleMask[i] < 0) 4342 Ops.push_back(DAG.getUNDEF(EltVT)); 4343 else 4344 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4345 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4346 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4347 MVT::i32))); 4348 } 4349 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4350 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4351 } 4352 4353 if (VT == MVT::v8i8) { 4354 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4355 if (NewOp.getNode()) 4356 return NewOp; 4357 } 4358 4359 return SDValue(); 4360} 4361 4362static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4363 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4364 SDValue Lane = Op.getOperand(1); 4365 if (!isa<ConstantSDNode>(Lane)) 4366 return SDValue(); 4367 4368 SDValue Vec = Op.getOperand(0); 4369 if (Op.getValueType() == MVT::i32 && 4370 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4371 DebugLoc dl = Op.getDebugLoc(); 4372 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4373 } 4374 4375 return Op; 4376} 4377 4378static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4379 // The only time a CONCAT_VECTORS operation can have legal types is when 4380 // two 64-bit vectors are concatenated to a 128-bit vector. 4381 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4382 "unexpected CONCAT_VECTORS"); 4383 DebugLoc dl = Op.getDebugLoc(); 4384 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4385 SDValue Op0 = Op.getOperand(0); 4386 SDValue Op1 = Op.getOperand(1); 4387 if (Op0.getOpcode() != ISD::UNDEF) 4388 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4389 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4390 DAG.getIntPtrConstant(0)); 4391 if (Op1.getOpcode() != ISD::UNDEF) 4392 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4393 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4394 DAG.getIntPtrConstant(1)); 4395 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4396} 4397 4398/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4399/// element has been zero/sign-extended, depending on the isSigned parameter, 4400/// from an integer type half its size. 4401static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4402 bool isSigned) { 4403 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4404 EVT VT = N->getValueType(0); 4405 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4406 SDNode *BVN = N->getOperand(0).getNode(); 4407 if (BVN->getValueType(0) != MVT::v4i32 || 4408 BVN->getOpcode() != ISD::BUILD_VECTOR) 4409 return false; 4410 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4411 unsigned HiElt = 1 - LoElt; 4412 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4413 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4414 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4415 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4416 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4417 return false; 4418 if (isSigned) { 4419 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4420 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4421 return true; 4422 } else { 4423 if (Hi0->isNullValue() && Hi1->isNullValue()) 4424 return true; 4425 } 4426 return false; 4427 } 4428 4429 if (N->getOpcode() != ISD::BUILD_VECTOR) 4430 return false; 4431 4432 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4433 SDNode *Elt = N->getOperand(i).getNode(); 4434 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4435 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4436 unsigned HalfSize = EltSize / 2; 4437 if (isSigned) { 4438 int64_t SExtVal = C->getSExtValue(); 4439 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 4440 return false; 4441 } else { 4442 if ((C->getZExtValue() >> HalfSize) != 0) 4443 return false; 4444 } 4445 continue; 4446 } 4447 return false; 4448 } 4449 4450 return true; 4451} 4452 4453/// isSignExtended - Check if a node is a vector value that is sign-extended 4454/// or a constant BUILD_VECTOR with sign-extended elements. 4455static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4456 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4457 return true; 4458 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4459 return true; 4460 return false; 4461} 4462 4463/// isZeroExtended - Check if a node is a vector value that is zero-extended 4464/// or a constant BUILD_VECTOR with zero-extended elements. 4465static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4466 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4467 return true; 4468 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4469 return true; 4470 return false; 4471} 4472 4473/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4474/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4475static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4476 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4477 return N->getOperand(0); 4478 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4479 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4480 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4481 LD->isNonTemporal(), LD->getAlignment()); 4482 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4483 // have been legalized as a BITCAST from v4i32. 4484 if (N->getOpcode() == ISD::BITCAST) { 4485 SDNode *BVN = N->getOperand(0).getNode(); 4486 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4487 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4488 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4489 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4490 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4491 } 4492 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4493 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4494 EVT VT = N->getValueType(0); 4495 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4496 unsigned NumElts = VT.getVectorNumElements(); 4497 MVT TruncVT = MVT::getIntegerVT(EltSize); 4498 SmallVector<SDValue, 8> Ops; 4499 for (unsigned i = 0; i != NumElts; ++i) { 4500 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4501 const APInt &CInt = C->getAPIntValue(); 4502 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4503 } 4504 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4505 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4506} 4507 4508static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4509 unsigned Opcode = N->getOpcode(); 4510 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4511 SDNode *N0 = N->getOperand(0).getNode(); 4512 SDNode *N1 = N->getOperand(1).getNode(); 4513 return N0->hasOneUse() && N1->hasOneUse() && 4514 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4515 } 4516 return false; 4517} 4518 4519static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4520 unsigned Opcode = N->getOpcode(); 4521 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4522 SDNode *N0 = N->getOperand(0).getNode(); 4523 SDNode *N1 = N->getOperand(1).getNode(); 4524 return N0->hasOneUse() && N1->hasOneUse() && 4525 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4526 } 4527 return false; 4528} 4529 4530static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4531 // Multiplications are only custom-lowered for 128-bit vectors so that 4532 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4533 EVT VT = Op.getValueType(); 4534 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4535 SDNode *N0 = Op.getOperand(0).getNode(); 4536 SDNode *N1 = Op.getOperand(1).getNode(); 4537 unsigned NewOpc = 0; 4538 bool isMLA = false; 4539 bool isN0SExt = isSignExtended(N0, DAG); 4540 bool isN1SExt = isSignExtended(N1, DAG); 4541 if (isN0SExt && isN1SExt) 4542 NewOpc = ARMISD::VMULLs; 4543 else { 4544 bool isN0ZExt = isZeroExtended(N0, DAG); 4545 bool isN1ZExt = isZeroExtended(N1, DAG); 4546 if (isN0ZExt && isN1ZExt) 4547 NewOpc = ARMISD::VMULLu; 4548 else if (isN1SExt || isN1ZExt) { 4549 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4550 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4551 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4552 NewOpc = ARMISD::VMULLs; 4553 isMLA = true; 4554 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4555 NewOpc = ARMISD::VMULLu; 4556 isMLA = true; 4557 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4558 std::swap(N0, N1); 4559 NewOpc = ARMISD::VMULLu; 4560 isMLA = true; 4561 } 4562 } 4563 4564 if (!NewOpc) { 4565 if (VT == MVT::v2i64) 4566 // Fall through to expand this. It is not legal. 4567 return SDValue(); 4568 else 4569 // Other vector multiplications are legal. 4570 return Op; 4571 } 4572 } 4573 4574 // Legalize to a VMULL instruction. 4575 DebugLoc DL = Op.getDebugLoc(); 4576 SDValue Op0; 4577 SDValue Op1 = SkipExtension(N1, DAG); 4578 if (!isMLA) { 4579 Op0 = SkipExtension(N0, DAG); 4580 assert(Op0.getValueType().is64BitVector() && 4581 Op1.getValueType().is64BitVector() && 4582 "unexpected types for extended operands to VMULL"); 4583 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4584 } 4585 4586 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4587 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4588 // vmull q0, d4, d6 4589 // vmlal q0, d5, d6 4590 // is faster than 4591 // vaddl q0, d4, d5 4592 // vmovl q1, d6 4593 // vmul q0, q0, q1 4594 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4595 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4596 EVT Op1VT = Op1.getValueType(); 4597 return DAG.getNode(N0->getOpcode(), DL, VT, 4598 DAG.getNode(NewOpc, DL, VT, 4599 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4600 DAG.getNode(NewOpc, DL, VT, 4601 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4602} 4603 4604static SDValue 4605LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4606 // Convert to float 4607 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4608 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4609 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4610 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4611 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4612 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4613 // Get reciprocal estimate. 4614 // float4 recip = vrecpeq_f32(yf); 4615 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4616 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4617 // Because char has a smaller range than uchar, we can actually get away 4618 // without any newton steps. This requires that we use a weird bias 4619 // of 0xb000, however (again, this has been exhaustively tested). 4620 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4621 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4622 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4623 Y = DAG.getConstant(0xb000, MVT::i32); 4624 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4625 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4626 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4627 // Convert back to short. 4628 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4629 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4630 return X; 4631} 4632 4633static SDValue 4634LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4635 SDValue N2; 4636 // Convert to float. 4637 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4638 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4639 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4640 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4641 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4642 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4643 4644 // Use reciprocal estimate and one refinement step. 4645 // float4 recip = vrecpeq_f32(yf); 4646 // recip *= vrecpsq_f32(yf, recip); 4647 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4648 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4649 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4650 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4651 N1, N2); 4652 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4653 // Because short has a smaller range than ushort, we can actually get away 4654 // with only a single newton step. This requires that we use a weird bias 4655 // of 89, however (again, this has been exhaustively tested). 4656 // float4 result = as_float4(as_int4(xf*recip) + 89); 4657 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4658 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4659 N1 = DAG.getConstant(89, MVT::i32); 4660 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4661 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4662 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4663 // Convert back to integer and return. 4664 // return vmovn_s32(vcvt_s32_f32(result)); 4665 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4666 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4667 return N0; 4668} 4669 4670static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4671 EVT VT = Op.getValueType(); 4672 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4673 "unexpected type for custom-lowering ISD::SDIV"); 4674 4675 DebugLoc dl = Op.getDebugLoc(); 4676 SDValue N0 = Op.getOperand(0); 4677 SDValue N1 = Op.getOperand(1); 4678 SDValue N2, N3; 4679 4680 if (VT == MVT::v8i8) { 4681 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4682 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4683 4684 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4685 DAG.getIntPtrConstant(4)); 4686 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4687 DAG.getIntPtrConstant(4)); 4688 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4689 DAG.getIntPtrConstant(0)); 4690 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4691 DAG.getIntPtrConstant(0)); 4692 4693 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4694 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4695 4696 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4697 N0 = LowerCONCAT_VECTORS(N0, DAG); 4698 4699 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4700 return N0; 4701 } 4702 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4703} 4704 4705static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4706 EVT VT = Op.getValueType(); 4707 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4708 "unexpected type for custom-lowering ISD::UDIV"); 4709 4710 DebugLoc dl = Op.getDebugLoc(); 4711 SDValue N0 = Op.getOperand(0); 4712 SDValue N1 = Op.getOperand(1); 4713 SDValue N2, N3; 4714 4715 if (VT == MVT::v8i8) { 4716 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4717 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4718 4719 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4720 DAG.getIntPtrConstant(4)); 4721 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4722 DAG.getIntPtrConstant(4)); 4723 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4724 DAG.getIntPtrConstant(0)); 4725 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4726 DAG.getIntPtrConstant(0)); 4727 4728 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4729 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4730 4731 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4732 N0 = LowerCONCAT_VECTORS(N0, DAG); 4733 4734 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4735 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4736 N0); 4737 return N0; 4738 } 4739 4740 // v4i16 sdiv ... Convert to float. 4741 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4742 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4743 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4744 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4745 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4746 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4747 4748 // Use reciprocal estimate and two refinement steps. 4749 // float4 recip = vrecpeq_f32(yf); 4750 // recip *= vrecpsq_f32(yf, recip); 4751 // recip *= vrecpsq_f32(yf, recip); 4752 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4753 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4754 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4755 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4756 N1, N2); 4757 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4758 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4759 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4760 N1, N2); 4761 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4762 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4763 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4764 // and that it will never cause us to return an answer too large). 4765 // float4 result = as_float4(as_int4(xf*recip) + 89); 4766 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4767 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4768 N1 = DAG.getConstant(2, MVT::i32); 4769 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4770 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4771 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4772 // Convert back to integer and return. 4773 // return vmovn_u32(vcvt_s32_f32(result)); 4774 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4775 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4776 return N0; 4777} 4778 4779SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4780 switch (Op.getOpcode()) { 4781 default: llvm_unreachable("Don't know how to custom lower this!"); 4782 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4783 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4784 case ISD::GlobalAddress: 4785 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4786 LowerGlobalAddressELF(Op, DAG); 4787 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4788 case ISD::SELECT: return LowerSELECT(Op, DAG); 4789 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4790 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4791 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4792 case ISD::VASTART: return LowerVASTART(Op, DAG); 4793 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4794 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4795 case ISD::SINT_TO_FP: 4796 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4797 case ISD::FP_TO_SINT: 4798 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4799 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4800 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4801 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4802 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4803 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4804 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4805 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4806 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4807 Subtarget); 4808 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4809 case ISD::SHL: 4810 case ISD::SRL: 4811 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4812 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4813 case ISD::SRL_PARTS: 4814 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4815 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4816 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 4817 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4818 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4819 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4820 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4821 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4822 case ISD::MUL: return LowerMUL(Op, DAG); 4823 case ISD::SDIV: return LowerSDIV(Op, DAG); 4824 case ISD::UDIV: return LowerUDIV(Op, DAG); 4825 } 4826 return SDValue(); 4827} 4828 4829/// ReplaceNodeResults - Replace the results of node with an illegal result 4830/// type with new values built out of custom code. 4831void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4832 SmallVectorImpl<SDValue>&Results, 4833 SelectionDAG &DAG) const { 4834 SDValue Res; 4835 switch (N->getOpcode()) { 4836 default: 4837 llvm_unreachable("Don't know how to custom expand this!"); 4838 break; 4839 case ISD::BITCAST: 4840 Res = ExpandBITCAST(N, DAG); 4841 break; 4842 case ISD::SRL: 4843 case ISD::SRA: 4844 Res = Expand64BitShift(N, DAG, Subtarget); 4845 break; 4846 } 4847 if (Res.getNode()) 4848 Results.push_back(Res); 4849} 4850 4851//===----------------------------------------------------------------------===// 4852// ARM Scheduler Hooks 4853//===----------------------------------------------------------------------===// 4854 4855MachineBasicBlock * 4856ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 4857 MachineBasicBlock *BB, 4858 unsigned Size) const { 4859 unsigned dest = MI->getOperand(0).getReg(); 4860 unsigned ptr = MI->getOperand(1).getReg(); 4861 unsigned oldval = MI->getOperand(2).getReg(); 4862 unsigned newval = MI->getOperand(3).getReg(); 4863 unsigned scratch = BB->getParent()->getRegInfo() 4864 .createVirtualRegister(ARM::GPRRegisterClass); 4865 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4866 DebugLoc dl = MI->getDebugLoc(); 4867 bool isThumb2 = Subtarget->isThumb2(); 4868 4869 unsigned ldrOpc, strOpc; 4870 switch (Size) { 4871 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4872 case 1: 4873 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4874 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4875 break; 4876 case 2: 4877 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4878 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4879 break; 4880 case 4: 4881 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4882 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4883 break; 4884 } 4885 4886 MachineFunction *MF = BB->getParent(); 4887 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4888 MachineFunction::iterator It = BB; 4889 ++It; // insert the new blocks after the current block 4890 4891 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4892 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4893 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4894 MF->insert(It, loop1MBB); 4895 MF->insert(It, loop2MBB); 4896 MF->insert(It, exitMBB); 4897 4898 // Transfer the remainder of BB and its successor edges to exitMBB. 4899 exitMBB->splice(exitMBB->begin(), BB, 4900 llvm::next(MachineBasicBlock::iterator(MI)), 4901 BB->end()); 4902 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4903 4904 // thisMBB: 4905 // ... 4906 // fallthrough --> loop1MBB 4907 BB->addSuccessor(loop1MBB); 4908 4909 // loop1MBB: 4910 // ldrex dest, [ptr] 4911 // cmp dest, oldval 4912 // bne exitMBB 4913 BB = loop1MBB; 4914 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4915 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4916 .addReg(dest).addReg(oldval)); 4917 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4918 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4919 BB->addSuccessor(loop2MBB); 4920 BB->addSuccessor(exitMBB); 4921 4922 // loop2MBB: 4923 // strex scratch, newval, [ptr] 4924 // cmp scratch, #0 4925 // bne loop1MBB 4926 BB = loop2MBB; 4927 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 4928 .addReg(ptr)); 4929 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4930 .addReg(scratch).addImm(0)); 4931 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4932 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4933 BB->addSuccessor(loop1MBB); 4934 BB->addSuccessor(exitMBB); 4935 4936 // exitMBB: 4937 // ... 4938 BB = exitMBB; 4939 4940 MI->eraseFromParent(); // The instruction is gone now. 4941 4942 return BB; 4943} 4944 4945MachineBasicBlock * 4946ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4947 unsigned Size, unsigned BinOpcode) const { 4948 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4949 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4950 4951 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4952 MachineFunction *MF = BB->getParent(); 4953 MachineFunction::iterator It = BB; 4954 ++It; 4955 4956 unsigned dest = MI->getOperand(0).getReg(); 4957 unsigned ptr = MI->getOperand(1).getReg(); 4958 unsigned incr = MI->getOperand(2).getReg(); 4959 DebugLoc dl = MI->getDebugLoc(); 4960 4961 bool isThumb2 = Subtarget->isThumb2(); 4962 unsigned ldrOpc, strOpc; 4963 switch (Size) { 4964 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4965 case 1: 4966 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4967 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4968 break; 4969 case 2: 4970 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4971 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4972 break; 4973 case 4: 4974 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4975 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4976 break; 4977 } 4978 4979 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4980 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4981 MF->insert(It, loopMBB); 4982 MF->insert(It, exitMBB); 4983 4984 // Transfer the remainder of BB and its successor edges to exitMBB. 4985 exitMBB->splice(exitMBB->begin(), BB, 4986 llvm::next(MachineBasicBlock::iterator(MI)), 4987 BB->end()); 4988 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4989 4990 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4991 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4992 unsigned scratch2 = (!BinOpcode) ? incr : 4993 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4994 4995 // thisMBB: 4996 // ... 4997 // fallthrough --> loopMBB 4998 BB->addSuccessor(loopMBB); 4999 5000 // loopMBB: 5001 // ldrex dest, ptr 5002 // <binop> scratch2, dest, incr 5003 // strex scratch, scratch2, ptr 5004 // cmp scratch, #0 5005 // bne- loopMBB 5006 // fallthrough --> exitMBB 5007 BB = loopMBB; 5008 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 5009 if (BinOpcode) { 5010 // operand order needs to go the other way for NAND 5011 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5012 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5013 addReg(incr).addReg(dest)).addReg(0); 5014 else 5015 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5016 addReg(dest).addReg(incr)).addReg(0); 5017 } 5018 5019 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 5020 .addReg(ptr)); 5021 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5022 .addReg(scratch).addImm(0)); 5023 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5024 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5025 5026 BB->addSuccessor(loopMBB); 5027 BB->addSuccessor(exitMBB); 5028 5029 // exitMBB: 5030 // ... 5031 BB = exitMBB; 5032 5033 MI->eraseFromParent(); // The instruction is gone now. 5034 5035 return BB; 5036} 5037 5038MachineBasicBlock * 5039ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5040 MachineBasicBlock *BB, 5041 unsigned Size, 5042 bool signExtend, 5043 ARMCC::CondCodes Cond) const { 5044 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5045 5046 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5047 MachineFunction *MF = BB->getParent(); 5048 MachineFunction::iterator It = BB; 5049 ++It; 5050 5051 unsigned dest = MI->getOperand(0).getReg(); 5052 unsigned ptr = MI->getOperand(1).getReg(); 5053 unsigned incr = MI->getOperand(2).getReg(); 5054 unsigned oldval = dest; 5055 DebugLoc dl = MI->getDebugLoc(); 5056 5057 bool isThumb2 = Subtarget->isThumb2(); 5058 unsigned ldrOpc, strOpc, extendOpc; 5059 switch (Size) { 5060 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5061 case 1: 5062 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5063 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5064 extendOpc = isThumb2 ? ARM::t2SXTBr : ARM::SXTBr; 5065 break; 5066 case 2: 5067 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5068 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5069 extendOpc = isThumb2 ? ARM::t2SXTHr : ARM::SXTHr; 5070 break; 5071 case 4: 5072 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5073 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5074 extendOpc = 0; 5075 break; 5076 } 5077 5078 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5079 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5080 MF->insert(It, loopMBB); 5081 MF->insert(It, exitMBB); 5082 5083 // Transfer the remainder of BB and its successor edges to exitMBB. 5084 exitMBB->splice(exitMBB->begin(), BB, 5085 llvm::next(MachineBasicBlock::iterator(MI)), 5086 BB->end()); 5087 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5088 5089 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 5090 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 5091 unsigned scratch2 = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 5092 5093 // thisMBB: 5094 // ... 5095 // fallthrough --> loopMBB 5096 BB->addSuccessor(loopMBB); 5097 5098 // loopMBB: 5099 // ldrex dest, ptr 5100 // (sign extend dest, if required) 5101 // cmp dest, incr 5102 // cmov.cond scratch2, dest, incr 5103 // strex scratch, scratch2, ptr 5104 // cmp scratch, #0 5105 // bne- loopMBB 5106 // fallthrough --> exitMBB 5107 BB = loopMBB; 5108 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 5109 5110 // Sign extend the value, if necessary. 5111 if (signExtend && extendOpc) { 5112 oldval = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 5113 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval).addReg(dest)); 5114 } 5115 5116 // Build compare and cmov instructions. 5117 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5118 .addReg(oldval).addReg(incr)); 5119 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5120 .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); 5121 5122 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 5123 .addReg(ptr)); 5124 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5125 .addReg(scratch).addImm(0)); 5126 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5127 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5128 5129 BB->addSuccessor(loopMBB); 5130 BB->addSuccessor(exitMBB); 5131 5132 // exitMBB: 5133 // ... 5134 BB = exitMBB; 5135 5136 MI->eraseFromParent(); // The instruction is gone now. 5137 5138 return BB; 5139} 5140 5141static 5142MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 5143 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 5144 E = MBB->succ_end(); I != E; ++I) 5145 if (*I != Succ) 5146 return *I; 5147 llvm_unreachable("Expecting a BB with two successors!"); 5148} 5149 5150// FIXME: This opcode table should obviously be expressed in the target 5151// description. We probably just need a "machine opcode" value in the pseudo 5152// instruction. But the ideal solution maybe to simply remove the "S" version 5153// of the opcode altogether. 5154struct AddSubFlagsOpcodePair { 5155 unsigned PseudoOpc; 5156 unsigned MachineOpc; 5157}; 5158 5159static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 5160 {ARM::ADCSri, ARM::ADCri}, 5161 {ARM::ADCSrr, ARM::ADCrr}, 5162 {ARM::ADCSrs, ARM::ADCrs}, 5163 {ARM::SBCSri, ARM::SBCri}, 5164 {ARM::SBCSrr, ARM::SBCrr}, 5165 {ARM::SBCSrs, ARM::SBCrs}, 5166 {ARM::RSBSri, ARM::RSBri}, 5167 {ARM::RSBSrr, ARM::RSBrr}, 5168 {ARM::RSBSrs, ARM::RSBrs}, 5169 {ARM::RSCSri, ARM::RSCri}, 5170 {ARM::RSCSrs, ARM::RSCrs}, 5171 {ARM::t2ADCSri, ARM::t2ADCri}, 5172 {ARM::t2ADCSrr, ARM::t2ADCrr}, 5173 {ARM::t2ADCSrs, ARM::t2ADCrs}, 5174 {ARM::t2SBCSri, ARM::t2SBCri}, 5175 {ARM::t2SBCSrr, ARM::t2SBCrr}, 5176 {ARM::t2SBCSrs, ARM::t2SBCrs}, 5177 {ARM::t2RSBSri, ARM::t2RSBri}, 5178 {ARM::t2RSBSrs, ARM::t2RSBrs}, 5179}; 5180 5181// Convert and Add or Subtract with Carry and Flags to a generic opcode with 5182// CPSR<def> operand. e.g. ADCS (...) -> ADC (... CPSR<def>). 5183// 5184// FIXME: Somewhere we should assert that CPSR<def> is in the correct 5185// position to be recognized by the target descrition as the 'S' bit. 5186bool ARMTargetLowering::RemapAddSubWithFlags(MachineInstr *MI, 5187 MachineBasicBlock *BB) const { 5188 unsigned OldOpc = MI->getOpcode(); 5189 unsigned NewOpc = 0; 5190 5191 // This is only called for instructions that need remapping, so iterating over 5192 // the tiny opcode table is not costly. 5193 static const int NPairs = 5194 sizeof(AddSubFlagsOpcodeMap) / sizeof(AddSubFlagsOpcodePair); 5195 for (AddSubFlagsOpcodePair *Pair = &AddSubFlagsOpcodeMap[0], 5196 *End = &AddSubFlagsOpcodeMap[NPairs]; Pair != End; ++Pair) { 5197 if (OldOpc == Pair->PseudoOpc) { 5198 NewOpc = Pair->MachineOpc; 5199 break; 5200 } 5201 } 5202 if (!NewOpc) 5203 return false; 5204 5205 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5206 DebugLoc dl = MI->getDebugLoc(); 5207 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 5208 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 5209 MIB.addOperand(MI->getOperand(i)); 5210 AddDefaultPred(MIB); 5211 MIB.addReg(ARM::CPSR, RegState::Define); // S bit 5212 MI->eraseFromParent(); 5213 return true; 5214} 5215 5216MachineBasicBlock * 5217ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5218 MachineBasicBlock *BB) const { 5219 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5220 DebugLoc dl = MI->getDebugLoc(); 5221 bool isThumb2 = Subtarget->isThumb2(); 5222 switch (MI->getOpcode()) { 5223 default: { 5224 if (RemapAddSubWithFlags(MI, BB)) 5225 return BB; 5226 5227 MI->dump(); 5228 llvm_unreachable("Unexpected instr type to insert"); 5229 } 5230 case ARM::ATOMIC_LOAD_ADD_I8: 5231 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5232 case ARM::ATOMIC_LOAD_ADD_I16: 5233 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5234 case ARM::ATOMIC_LOAD_ADD_I32: 5235 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5236 5237 case ARM::ATOMIC_LOAD_AND_I8: 5238 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5239 case ARM::ATOMIC_LOAD_AND_I16: 5240 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5241 case ARM::ATOMIC_LOAD_AND_I32: 5242 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5243 5244 case ARM::ATOMIC_LOAD_OR_I8: 5245 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5246 case ARM::ATOMIC_LOAD_OR_I16: 5247 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5248 case ARM::ATOMIC_LOAD_OR_I32: 5249 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5250 5251 case ARM::ATOMIC_LOAD_XOR_I8: 5252 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5253 case ARM::ATOMIC_LOAD_XOR_I16: 5254 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5255 case ARM::ATOMIC_LOAD_XOR_I32: 5256 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5257 5258 case ARM::ATOMIC_LOAD_NAND_I8: 5259 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5260 case ARM::ATOMIC_LOAD_NAND_I16: 5261 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5262 case ARM::ATOMIC_LOAD_NAND_I32: 5263 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5264 5265 case ARM::ATOMIC_LOAD_SUB_I8: 5266 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5267 case ARM::ATOMIC_LOAD_SUB_I16: 5268 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5269 case ARM::ATOMIC_LOAD_SUB_I32: 5270 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5271 5272 case ARM::ATOMIC_LOAD_MIN_I8: 5273 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 5274 case ARM::ATOMIC_LOAD_MIN_I16: 5275 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 5276 case ARM::ATOMIC_LOAD_MIN_I32: 5277 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 5278 5279 case ARM::ATOMIC_LOAD_MAX_I8: 5280 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 5281 case ARM::ATOMIC_LOAD_MAX_I16: 5282 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 5283 case ARM::ATOMIC_LOAD_MAX_I32: 5284 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 5285 5286 case ARM::ATOMIC_LOAD_UMIN_I8: 5287 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 5288 case ARM::ATOMIC_LOAD_UMIN_I16: 5289 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 5290 case ARM::ATOMIC_LOAD_UMIN_I32: 5291 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 5292 5293 case ARM::ATOMIC_LOAD_UMAX_I8: 5294 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 5295 case ARM::ATOMIC_LOAD_UMAX_I16: 5296 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 5297 case ARM::ATOMIC_LOAD_UMAX_I32: 5298 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 5299 5300 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 5301 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 5302 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 5303 5304 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 5305 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 5306 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 5307 5308 case ARM::tMOVCCr_pseudo: { 5309 // To "insert" a SELECT_CC instruction, we actually have to insert the 5310 // diamond control-flow pattern. The incoming instruction knows the 5311 // destination vreg to set, the condition code register to branch on, the 5312 // true/false values to select between, and a branch opcode to use. 5313 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5314 MachineFunction::iterator It = BB; 5315 ++It; 5316 5317 // thisMBB: 5318 // ... 5319 // TrueVal = ... 5320 // cmpTY ccX, r1, r2 5321 // bCC copy1MBB 5322 // fallthrough --> copy0MBB 5323 MachineBasicBlock *thisMBB = BB; 5324 MachineFunction *F = BB->getParent(); 5325 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 5326 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 5327 F->insert(It, copy0MBB); 5328 F->insert(It, sinkMBB); 5329 5330 // Transfer the remainder of BB and its successor edges to sinkMBB. 5331 sinkMBB->splice(sinkMBB->begin(), BB, 5332 llvm::next(MachineBasicBlock::iterator(MI)), 5333 BB->end()); 5334 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 5335 5336 BB->addSuccessor(copy0MBB); 5337 BB->addSuccessor(sinkMBB); 5338 5339 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 5340 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 5341 5342 // copy0MBB: 5343 // %FalseValue = ... 5344 // # fallthrough to sinkMBB 5345 BB = copy0MBB; 5346 5347 // Update machine-CFG edges 5348 BB->addSuccessor(sinkMBB); 5349 5350 // sinkMBB: 5351 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5352 // ... 5353 BB = sinkMBB; 5354 BuildMI(*BB, BB->begin(), dl, 5355 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 5356 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5357 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5358 5359 MI->eraseFromParent(); // The pseudo instruction is gone now. 5360 return BB; 5361 } 5362 5363 case ARM::BCCi64: 5364 case ARM::BCCZi64: { 5365 // If there is an unconditional branch to the other successor, remove it. 5366 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 5367 5368 // Compare both parts that make up the double comparison separately for 5369 // equality. 5370 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 5371 5372 unsigned LHS1 = MI->getOperand(1).getReg(); 5373 unsigned LHS2 = MI->getOperand(2).getReg(); 5374 if (RHSisZero) { 5375 AddDefaultPred(BuildMI(BB, dl, 5376 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5377 .addReg(LHS1).addImm(0)); 5378 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5379 .addReg(LHS2).addImm(0) 5380 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5381 } else { 5382 unsigned RHS1 = MI->getOperand(3).getReg(); 5383 unsigned RHS2 = MI->getOperand(4).getReg(); 5384 AddDefaultPred(BuildMI(BB, dl, 5385 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5386 .addReg(LHS1).addReg(RHS1)); 5387 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5388 .addReg(LHS2).addReg(RHS2) 5389 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5390 } 5391 5392 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 5393 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 5394 if (MI->getOperand(0).getImm() == ARMCC::NE) 5395 std::swap(destMBB, exitMBB); 5396 5397 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5398 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 5399 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 5400 .addMBB(exitMBB); 5401 5402 MI->eraseFromParent(); // The pseudo instruction is gone now. 5403 return BB; 5404 } 5405 } 5406} 5407 5408//===----------------------------------------------------------------------===// 5409// ARM Optimization Hooks 5410//===----------------------------------------------------------------------===// 5411 5412static 5413SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 5414 TargetLowering::DAGCombinerInfo &DCI) { 5415 SelectionDAG &DAG = DCI.DAG; 5416 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5417 EVT VT = N->getValueType(0); 5418 unsigned Opc = N->getOpcode(); 5419 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 5420 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 5421 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 5422 ISD::CondCode CC = ISD::SETCC_INVALID; 5423 5424 if (isSlctCC) { 5425 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 5426 } else { 5427 SDValue CCOp = Slct.getOperand(0); 5428 if (CCOp.getOpcode() == ISD::SETCC) 5429 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 5430 } 5431 5432 bool DoXform = false; 5433 bool InvCC = false; 5434 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 5435 "Bad input!"); 5436 5437 if (LHS.getOpcode() == ISD::Constant && 5438 cast<ConstantSDNode>(LHS)->isNullValue()) { 5439 DoXform = true; 5440 } else if (CC != ISD::SETCC_INVALID && 5441 RHS.getOpcode() == ISD::Constant && 5442 cast<ConstantSDNode>(RHS)->isNullValue()) { 5443 std::swap(LHS, RHS); 5444 SDValue Op0 = Slct.getOperand(0); 5445 EVT OpVT = isSlctCC ? Op0.getValueType() : 5446 Op0.getOperand(0).getValueType(); 5447 bool isInt = OpVT.isInteger(); 5448 CC = ISD::getSetCCInverse(CC, isInt); 5449 5450 if (!TLI.isCondCodeLegal(CC, OpVT)) 5451 return SDValue(); // Inverse operator isn't legal. 5452 5453 DoXform = true; 5454 InvCC = true; 5455 } 5456 5457 if (DoXform) { 5458 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 5459 if (isSlctCC) 5460 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 5461 Slct.getOperand(0), Slct.getOperand(1), CC); 5462 SDValue CCOp = Slct.getOperand(0); 5463 if (InvCC) 5464 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 5465 CCOp.getOperand(0), CCOp.getOperand(1), CC); 5466 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 5467 CCOp, OtherOp, Result); 5468 } 5469 return SDValue(); 5470} 5471 5472/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 5473/// operands N0 and N1. This is a helper for PerformADDCombine that is 5474/// called with the default operands, and if that fails, with commuted 5475/// operands. 5476static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 5477 TargetLowering::DAGCombinerInfo &DCI) { 5478 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 5479 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 5480 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 5481 if (Result.getNode()) return Result; 5482 } 5483 return SDValue(); 5484} 5485 5486/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 5487/// 5488static SDValue PerformADDCombine(SDNode *N, 5489 TargetLowering::DAGCombinerInfo &DCI) { 5490 SDValue N0 = N->getOperand(0); 5491 SDValue N1 = N->getOperand(1); 5492 5493 // First try with the default operand order. 5494 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 5495 if (Result.getNode()) 5496 return Result; 5497 5498 // If that didn't work, try again with the operands commuted. 5499 return PerformADDCombineWithOperands(N, N1, N0, DCI); 5500} 5501 5502/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 5503/// 5504static SDValue PerformSUBCombine(SDNode *N, 5505 TargetLowering::DAGCombinerInfo &DCI) { 5506 SDValue N0 = N->getOperand(0); 5507 SDValue N1 = N->getOperand(1); 5508 5509 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 5510 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 5511 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 5512 if (Result.getNode()) return Result; 5513 } 5514 5515 return SDValue(); 5516} 5517 5518/// PerformVMULCombine 5519/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 5520/// special multiplier accumulator forwarding. 5521/// vmul d3, d0, d2 5522/// vmla d3, d1, d2 5523/// is faster than 5524/// vadd d3, d0, d1 5525/// vmul d3, d3, d2 5526static SDValue PerformVMULCombine(SDNode *N, 5527 TargetLowering::DAGCombinerInfo &DCI, 5528 const ARMSubtarget *Subtarget) { 5529 if (!Subtarget->hasVMLxForwarding()) 5530 return SDValue(); 5531 5532 SelectionDAG &DAG = DCI.DAG; 5533 SDValue N0 = N->getOperand(0); 5534 SDValue N1 = N->getOperand(1); 5535 unsigned Opcode = N0.getOpcode(); 5536 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 5537 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 5538 Opcode = N0.getOpcode(); 5539 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 5540 Opcode != ISD::FADD && Opcode != ISD::FSUB) 5541 return SDValue(); 5542 std::swap(N0, N1); 5543 } 5544 5545 EVT VT = N->getValueType(0); 5546 DebugLoc DL = N->getDebugLoc(); 5547 SDValue N00 = N0->getOperand(0); 5548 SDValue N01 = N0->getOperand(1); 5549 return DAG.getNode(Opcode, DL, VT, 5550 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 5551 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 5552} 5553 5554static SDValue PerformMULCombine(SDNode *N, 5555 TargetLowering::DAGCombinerInfo &DCI, 5556 const ARMSubtarget *Subtarget) { 5557 SelectionDAG &DAG = DCI.DAG; 5558 5559 if (Subtarget->isThumb1Only()) 5560 return SDValue(); 5561 5562 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 5563 return SDValue(); 5564 5565 EVT VT = N->getValueType(0); 5566 if (VT.is64BitVector() || VT.is128BitVector()) 5567 return PerformVMULCombine(N, DCI, Subtarget); 5568 if (VT != MVT::i32) 5569 return SDValue(); 5570 5571 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5572 if (!C) 5573 return SDValue(); 5574 5575 uint64_t MulAmt = C->getZExtValue(); 5576 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 5577 ShiftAmt = ShiftAmt & (32 - 1); 5578 SDValue V = N->getOperand(0); 5579 DebugLoc DL = N->getDebugLoc(); 5580 5581 SDValue Res; 5582 MulAmt >>= ShiftAmt; 5583 if (isPowerOf2_32(MulAmt - 1)) { 5584 // (mul x, 2^N + 1) => (add (shl x, N), x) 5585 Res = DAG.getNode(ISD::ADD, DL, VT, 5586 V, DAG.getNode(ISD::SHL, DL, VT, 5587 V, DAG.getConstant(Log2_32(MulAmt-1), 5588 MVT::i32))); 5589 } else if (isPowerOf2_32(MulAmt + 1)) { 5590 // (mul x, 2^N - 1) => (sub (shl x, N), x) 5591 Res = DAG.getNode(ISD::SUB, DL, VT, 5592 DAG.getNode(ISD::SHL, DL, VT, 5593 V, DAG.getConstant(Log2_32(MulAmt+1), 5594 MVT::i32)), 5595 V); 5596 } else 5597 return SDValue(); 5598 5599 if (ShiftAmt != 0) 5600 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 5601 DAG.getConstant(ShiftAmt, MVT::i32)); 5602 5603 // Do not add new nodes to DAG combiner worklist. 5604 DCI.CombineTo(N, Res, false); 5605 return SDValue(); 5606} 5607 5608static SDValue PerformANDCombine(SDNode *N, 5609 TargetLowering::DAGCombinerInfo &DCI) { 5610 5611 // Attempt to use immediate-form VBIC 5612 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 5613 DebugLoc dl = N->getDebugLoc(); 5614 EVT VT = N->getValueType(0); 5615 SelectionDAG &DAG = DCI.DAG; 5616 5617 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 5618 return SDValue(); 5619 5620 APInt SplatBits, SplatUndef; 5621 unsigned SplatBitSize; 5622 bool HasAnyUndefs; 5623 if (BVN && 5624 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5625 if (SplatBitSize <= 64) { 5626 EVT VbicVT; 5627 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 5628 SplatUndef.getZExtValue(), SplatBitSize, 5629 DAG, VbicVT, VT.is128BitVector(), 5630 OtherModImm); 5631 if (Val.getNode()) { 5632 SDValue Input = 5633 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 5634 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 5635 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 5636 } 5637 } 5638 } 5639 5640 return SDValue(); 5641} 5642 5643/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 5644static SDValue PerformORCombine(SDNode *N, 5645 TargetLowering::DAGCombinerInfo &DCI, 5646 const ARMSubtarget *Subtarget) { 5647 // Attempt to use immediate-form VORR 5648 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 5649 DebugLoc dl = N->getDebugLoc(); 5650 EVT VT = N->getValueType(0); 5651 SelectionDAG &DAG = DCI.DAG; 5652 5653 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 5654 return SDValue(); 5655 5656 APInt SplatBits, SplatUndef; 5657 unsigned SplatBitSize; 5658 bool HasAnyUndefs; 5659 if (BVN && Subtarget->hasNEON() && 5660 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5661 if (SplatBitSize <= 64) { 5662 EVT VorrVT; 5663 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 5664 SplatUndef.getZExtValue(), SplatBitSize, 5665 DAG, VorrVT, VT.is128BitVector(), 5666 OtherModImm); 5667 if (Val.getNode()) { 5668 SDValue Input = 5669 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 5670 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 5671 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 5672 } 5673 } 5674 } 5675 5676 SDValue N0 = N->getOperand(0); 5677 if (N0.getOpcode() != ISD::AND) 5678 return SDValue(); 5679 SDValue N1 = N->getOperand(1); 5680 5681 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 5682 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 5683 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 5684 APInt SplatUndef; 5685 unsigned SplatBitSize; 5686 bool HasAnyUndefs; 5687 5688 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 5689 APInt SplatBits0; 5690 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 5691 HasAnyUndefs) && !HasAnyUndefs) { 5692 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 5693 APInt SplatBits1; 5694 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 5695 HasAnyUndefs) && !HasAnyUndefs && 5696 SplatBits0 == ~SplatBits1) { 5697 // Canonicalize the vector type to make instruction selection simpler. 5698 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 5699 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 5700 N0->getOperand(1), N0->getOperand(0), 5701 N1->getOperand(0)); 5702 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 5703 } 5704 } 5705 } 5706 5707 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 5708 // reasonable. 5709 5710 // BFI is only available on V6T2+ 5711 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 5712 return SDValue(); 5713 5714 DebugLoc DL = N->getDebugLoc(); 5715 // 1) or (and A, mask), val => ARMbfi A, val, mask 5716 // iff (val & mask) == val 5717 // 5718 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 5719 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 5720 // && mask == ~mask2 5721 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 5722 // && ~mask == mask2 5723 // (i.e., copy a bitfield value into another bitfield of the same width) 5724 5725 if (VT != MVT::i32) 5726 return SDValue(); 5727 5728 SDValue N00 = N0.getOperand(0); 5729 5730 // The value and the mask need to be constants so we can verify this is 5731 // actually a bitfield set. If the mask is 0xffff, we can do better 5732 // via a movt instruction, so don't use BFI in that case. 5733 SDValue MaskOp = N0.getOperand(1); 5734 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 5735 if (!MaskC) 5736 return SDValue(); 5737 unsigned Mask = MaskC->getZExtValue(); 5738 if (Mask == 0xffff) 5739 return SDValue(); 5740 SDValue Res; 5741 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 5742 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5743 if (N1C) { 5744 unsigned Val = N1C->getZExtValue(); 5745 if ((Val & ~Mask) != Val) 5746 return SDValue(); 5747 5748 if (ARM::isBitFieldInvertedMask(Mask)) { 5749 Val >>= CountTrailingZeros_32(~Mask); 5750 5751 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 5752 DAG.getConstant(Val, MVT::i32), 5753 DAG.getConstant(Mask, MVT::i32)); 5754 5755 // Do not add new nodes to DAG combiner worklist. 5756 DCI.CombineTo(N, Res, false); 5757 return SDValue(); 5758 } 5759 } else if (N1.getOpcode() == ISD::AND) { 5760 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 5761 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 5762 if (!N11C) 5763 return SDValue(); 5764 unsigned Mask2 = N11C->getZExtValue(); 5765 5766 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 5767 // as is to match. 5768 if (ARM::isBitFieldInvertedMask(Mask) && 5769 (Mask == ~Mask2)) { 5770 // The pack halfword instruction works better for masks that fit it, 5771 // so use that when it's available. 5772 if (Subtarget->hasT2ExtractPack() && 5773 (Mask == 0xffff || Mask == 0xffff0000)) 5774 return SDValue(); 5775 // 2a 5776 unsigned amt = CountTrailingZeros_32(Mask2); 5777 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 5778 DAG.getConstant(amt, MVT::i32)); 5779 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 5780 DAG.getConstant(Mask, MVT::i32)); 5781 // Do not add new nodes to DAG combiner worklist. 5782 DCI.CombineTo(N, Res, false); 5783 return SDValue(); 5784 } else if (ARM::isBitFieldInvertedMask(~Mask) && 5785 (~Mask == Mask2)) { 5786 // The pack halfword instruction works better for masks that fit it, 5787 // so use that when it's available. 5788 if (Subtarget->hasT2ExtractPack() && 5789 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 5790 return SDValue(); 5791 // 2b 5792 unsigned lsb = CountTrailingZeros_32(Mask); 5793 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 5794 DAG.getConstant(lsb, MVT::i32)); 5795 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 5796 DAG.getConstant(Mask2, MVT::i32)); 5797 // Do not add new nodes to DAG combiner worklist. 5798 DCI.CombineTo(N, Res, false); 5799 return SDValue(); 5800 } 5801 } 5802 5803 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 5804 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 5805 ARM::isBitFieldInvertedMask(~Mask)) { 5806 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 5807 // where lsb(mask) == #shamt and masked bits of B are known zero. 5808 SDValue ShAmt = N00.getOperand(1); 5809 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 5810 unsigned LSB = CountTrailingZeros_32(Mask); 5811 if (ShAmtC != LSB) 5812 return SDValue(); 5813 5814 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 5815 DAG.getConstant(~Mask, MVT::i32)); 5816 5817 // Do not add new nodes to DAG combiner worklist. 5818 DCI.CombineTo(N, Res, false); 5819 } 5820 5821 return SDValue(); 5822} 5823 5824/// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff 5825/// C1 & C2 == C1. 5826static SDValue PerformBFICombine(SDNode *N, 5827 TargetLowering::DAGCombinerInfo &DCI) { 5828 SDValue N1 = N->getOperand(1); 5829 if (N1.getOpcode() == ISD::AND) { 5830 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 5831 if (!N11C) 5832 return SDValue(); 5833 unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 5834 unsigned Mask2 = N11C->getZExtValue(); 5835 if ((Mask & Mask2) == Mask2) 5836 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 5837 N->getOperand(0), N1.getOperand(0), 5838 N->getOperand(2)); 5839 } 5840 return SDValue(); 5841} 5842 5843/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 5844/// ARMISD::VMOVRRD. 5845static SDValue PerformVMOVRRDCombine(SDNode *N, 5846 TargetLowering::DAGCombinerInfo &DCI) { 5847 // vmovrrd(vmovdrr x, y) -> x,y 5848 SDValue InDouble = N->getOperand(0); 5849 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 5850 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 5851 5852 // vmovrrd(load f64) -> (load i32), (load i32) 5853 SDNode *InNode = InDouble.getNode(); 5854 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 5855 InNode->getValueType(0) == MVT::f64 && 5856 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 5857 !cast<LoadSDNode>(InNode)->isVolatile()) { 5858 // TODO: Should this be done for non-FrameIndex operands? 5859 LoadSDNode *LD = cast<LoadSDNode>(InNode); 5860 5861 SelectionDAG &DAG = DCI.DAG; 5862 DebugLoc DL = LD->getDebugLoc(); 5863 SDValue BasePtr = LD->getBasePtr(); 5864 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 5865 LD->getPointerInfo(), LD->isVolatile(), 5866 LD->isNonTemporal(), LD->getAlignment()); 5867 5868 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 5869 DAG.getConstant(4, MVT::i32)); 5870 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 5871 LD->getPointerInfo(), LD->isVolatile(), 5872 LD->isNonTemporal(), 5873 std::min(4U, LD->getAlignment() / 2)); 5874 5875 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 5876 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 5877 DCI.RemoveFromWorklist(LD); 5878 DAG.DeleteNode(LD); 5879 return Result; 5880 } 5881 5882 return SDValue(); 5883} 5884 5885/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 5886/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 5887static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 5888 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 5889 SDValue Op0 = N->getOperand(0); 5890 SDValue Op1 = N->getOperand(1); 5891 if (Op0.getOpcode() == ISD::BITCAST) 5892 Op0 = Op0.getOperand(0); 5893 if (Op1.getOpcode() == ISD::BITCAST) 5894 Op1 = Op1.getOperand(0); 5895 if (Op0.getOpcode() == ARMISD::VMOVRRD && 5896 Op0.getNode() == Op1.getNode() && 5897 Op0.getResNo() == 0 && Op1.getResNo() == 1) 5898 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 5899 N->getValueType(0), Op0.getOperand(0)); 5900 return SDValue(); 5901} 5902 5903/// PerformSTORECombine - Target-specific dag combine xforms for 5904/// ISD::STORE. 5905static SDValue PerformSTORECombine(SDNode *N, 5906 TargetLowering::DAGCombinerInfo &DCI) { 5907 // Bitcast an i64 store extracted from a vector to f64. 5908 // Otherwise, the i64 value will be legalized to a pair of i32 values. 5909 StoreSDNode *St = cast<StoreSDNode>(N); 5910 SDValue StVal = St->getValue(); 5911 if (!ISD::isNormalStore(St) || St->isVolatile()) 5912 return SDValue(); 5913 5914 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 5915 StVal.getNode()->hasOneUse() && !St->isVolatile()) { 5916 SelectionDAG &DAG = DCI.DAG; 5917 DebugLoc DL = St->getDebugLoc(); 5918 SDValue BasePtr = St->getBasePtr(); 5919 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 5920 StVal.getNode()->getOperand(0), BasePtr, 5921 St->getPointerInfo(), St->isVolatile(), 5922 St->isNonTemporal(), St->getAlignment()); 5923 5924 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 5925 DAG.getConstant(4, MVT::i32)); 5926 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 5927 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 5928 St->isNonTemporal(), 5929 std::min(4U, St->getAlignment() / 2)); 5930 } 5931 5932 if (StVal.getValueType() != MVT::i64 || 5933 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5934 return SDValue(); 5935 5936 SelectionDAG &DAG = DCI.DAG; 5937 DebugLoc dl = StVal.getDebugLoc(); 5938 SDValue IntVec = StVal.getOperand(0); 5939 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 5940 IntVec.getValueType().getVectorNumElements()); 5941 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 5942 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 5943 Vec, StVal.getOperand(1)); 5944 dl = N->getDebugLoc(); 5945 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 5946 // Make the DAGCombiner fold the bitcasts. 5947 DCI.AddToWorklist(Vec.getNode()); 5948 DCI.AddToWorklist(ExtElt.getNode()); 5949 DCI.AddToWorklist(V.getNode()); 5950 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 5951 St->getPointerInfo(), St->isVolatile(), 5952 St->isNonTemporal(), St->getAlignment(), 5953 St->getTBAAInfo()); 5954} 5955 5956/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 5957/// are normal, non-volatile loads. If so, it is profitable to bitcast an 5958/// i64 vector to have f64 elements, since the value can then be loaded 5959/// directly into a VFP register. 5960static bool hasNormalLoadOperand(SDNode *N) { 5961 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 5962 for (unsigned i = 0; i < NumElts; ++i) { 5963 SDNode *Elt = N->getOperand(i).getNode(); 5964 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 5965 return true; 5966 } 5967 return false; 5968} 5969 5970/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 5971/// ISD::BUILD_VECTOR. 5972static SDValue PerformBUILD_VECTORCombine(SDNode *N, 5973 TargetLowering::DAGCombinerInfo &DCI){ 5974 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 5975 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 5976 // into a pair of GPRs, which is fine when the value is used as a scalar, 5977 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 5978 SelectionDAG &DAG = DCI.DAG; 5979 if (N->getNumOperands() == 2) { 5980 SDValue RV = PerformVMOVDRRCombine(N, DAG); 5981 if (RV.getNode()) 5982 return RV; 5983 } 5984 5985 // Load i64 elements as f64 values so that type legalization does not split 5986 // them up into i32 values. 5987 EVT VT = N->getValueType(0); 5988 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 5989 return SDValue(); 5990 DebugLoc dl = N->getDebugLoc(); 5991 SmallVector<SDValue, 8> Ops; 5992 unsigned NumElts = VT.getVectorNumElements(); 5993 for (unsigned i = 0; i < NumElts; ++i) { 5994 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 5995 Ops.push_back(V); 5996 // Make the DAGCombiner fold the bitcast. 5997 DCI.AddToWorklist(V.getNode()); 5998 } 5999 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 6000 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 6001 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 6002} 6003 6004/// PerformInsertEltCombine - Target-specific dag combine xforms for 6005/// ISD::INSERT_VECTOR_ELT. 6006static SDValue PerformInsertEltCombine(SDNode *N, 6007 TargetLowering::DAGCombinerInfo &DCI) { 6008 // Bitcast an i64 load inserted into a vector to f64. 6009 // Otherwise, the i64 value will be legalized to a pair of i32 values. 6010 EVT VT = N->getValueType(0); 6011 SDNode *Elt = N->getOperand(1).getNode(); 6012 if (VT.getVectorElementType() != MVT::i64 || 6013 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 6014 return SDValue(); 6015 6016 SelectionDAG &DAG = DCI.DAG; 6017 DebugLoc dl = N->getDebugLoc(); 6018 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 6019 VT.getVectorNumElements()); 6020 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 6021 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 6022 // Make the DAGCombiner fold the bitcasts. 6023 DCI.AddToWorklist(Vec.getNode()); 6024 DCI.AddToWorklist(V.getNode()); 6025 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 6026 Vec, V, N->getOperand(2)); 6027 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 6028} 6029 6030/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 6031/// ISD::VECTOR_SHUFFLE. 6032static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 6033 // The LLVM shufflevector instruction does not require the shuffle mask 6034 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 6035 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 6036 // operands do not match the mask length, they are extended by concatenating 6037 // them with undef vectors. That is probably the right thing for other 6038 // targets, but for NEON it is better to concatenate two double-register 6039 // size vector operands into a single quad-register size vector. Do that 6040 // transformation here: 6041 // shuffle(concat(v1, undef), concat(v2, undef)) -> 6042 // shuffle(concat(v1, v2), undef) 6043 SDValue Op0 = N->getOperand(0); 6044 SDValue Op1 = N->getOperand(1); 6045 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 6046 Op1.getOpcode() != ISD::CONCAT_VECTORS || 6047 Op0.getNumOperands() != 2 || 6048 Op1.getNumOperands() != 2) 6049 return SDValue(); 6050 SDValue Concat0Op1 = Op0.getOperand(1); 6051 SDValue Concat1Op1 = Op1.getOperand(1); 6052 if (Concat0Op1.getOpcode() != ISD::UNDEF || 6053 Concat1Op1.getOpcode() != ISD::UNDEF) 6054 return SDValue(); 6055 // Skip the transformation if any of the types are illegal. 6056 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6057 EVT VT = N->getValueType(0); 6058 if (!TLI.isTypeLegal(VT) || 6059 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 6060 !TLI.isTypeLegal(Concat1Op1.getValueType())) 6061 return SDValue(); 6062 6063 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 6064 Op0.getOperand(0), Op1.getOperand(0)); 6065 // Translate the shuffle mask. 6066 SmallVector<int, 16> NewMask; 6067 unsigned NumElts = VT.getVectorNumElements(); 6068 unsigned HalfElts = NumElts/2; 6069 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 6070 for (unsigned n = 0; n < NumElts; ++n) { 6071 int MaskElt = SVN->getMaskElt(n); 6072 int NewElt = -1; 6073 if (MaskElt < (int)HalfElts) 6074 NewElt = MaskElt; 6075 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 6076 NewElt = HalfElts + MaskElt - NumElts; 6077 NewMask.push_back(NewElt); 6078 } 6079 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 6080 DAG.getUNDEF(VT), NewMask.data()); 6081} 6082 6083/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 6084/// NEON load/store intrinsics to merge base address updates. 6085static SDValue CombineBaseUpdate(SDNode *N, 6086 TargetLowering::DAGCombinerInfo &DCI) { 6087 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6088 return SDValue(); 6089 6090 SelectionDAG &DAG = DCI.DAG; 6091 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 6092 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 6093 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 6094 SDValue Addr = N->getOperand(AddrOpIdx); 6095 6096 // Search for a use of the address operand that is an increment. 6097 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 6098 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 6099 SDNode *User = *UI; 6100 if (User->getOpcode() != ISD::ADD || 6101 UI.getUse().getResNo() != Addr.getResNo()) 6102 continue; 6103 6104 // Check that the add is independent of the load/store. Otherwise, folding 6105 // it would create a cycle. 6106 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 6107 continue; 6108 6109 // Find the new opcode for the updating load/store. 6110 bool isLoad = true; 6111 bool isLaneOp = false; 6112 unsigned NewOpc = 0; 6113 unsigned NumVecs = 0; 6114 if (isIntrinsic) { 6115 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 6116 switch (IntNo) { 6117 default: assert(0 && "unexpected intrinsic for Neon base update"); 6118 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 6119 NumVecs = 1; break; 6120 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 6121 NumVecs = 2; break; 6122 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 6123 NumVecs = 3; break; 6124 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 6125 NumVecs = 4; break; 6126 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 6127 NumVecs = 2; isLaneOp = true; break; 6128 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 6129 NumVecs = 3; isLaneOp = true; break; 6130 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 6131 NumVecs = 4; isLaneOp = true; break; 6132 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 6133 NumVecs = 1; isLoad = false; break; 6134 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 6135 NumVecs = 2; isLoad = false; break; 6136 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 6137 NumVecs = 3; isLoad = false; break; 6138 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 6139 NumVecs = 4; isLoad = false; break; 6140 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 6141 NumVecs = 2; isLoad = false; isLaneOp = true; break; 6142 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 6143 NumVecs = 3; isLoad = false; isLaneOp = true; break; 6144 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 6145 NumVecs = 4; isLoad = false; isLaneOp = true; break; 6146 } 6147 } else { 6148 isLaneOp = true; 6149 switch (N->getOpcode()) { 6150 default: assert(0 && "unexpected opcode for Neon base update"); 6151 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 6152 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 6153 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 6154 } 6155 } 6156 6157 // Find the size of memory referenced by the load/store. 6158 EVT VecTy; 6159 if (isLoad) 6160 VecTy = N->getValueType(0); 6161 else 6162 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 6163 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 6164 if (isLaneOp) 6165 NumBytes /= VecTy.getVectorNumElements(); 6166 6167 // If the increment is a constant, it must match the memory ref size. 6168 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 6169 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 6170 uint64_t IncVal = CInc->getZExtValue(); 6171 if (IncVal != NumBytes) 6172 continue; 6173 } else if (NumBytes >= 3 * 16) { 6174 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 6175 // separate instructions that make it harder to use a non-constant update. 6176 continue; 6177 } 6178 6179 // Create the new updating load/store node. 6180 EVT Tys[6]; 6181 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 6182 unsigned n; 6183 for (n = 0; n < NumResultVecs; ++n) 6184 Tys[n] = VecTy; 6185 Tys[n++] = MVT::i32; 6186 Tys[n] = MVT::Other; 6187 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 6188 SmallVector<SDValue, 8> Ops; 6189 Ops.push_back(N->getOperand(0)); // incoming chain 6190 Ops.push_back(N->getOperand(AddrOpIdx)); 6191 Ops.push_back(Inc); 6192 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 6193 Ops.push_back(N->getOperand(i)); 6194 } 6195 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 6196 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 6197 Ops.data(), Ops.size(), 6198 MemInt->getMemoryVT(), 6199 MemInt->getMemOperand()); 6200 6201 // Update the uses. 6202 std::vector<SDValue> NewResults; 6203 for (unsigned i = 0; i < NumResultVecs; ++i) { 6204 NewResults.push_back(SDValue(UpdN.getNode(), i)); 6205 } 6206 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 6207 DCI.CombineTo(N, NewResults); 6208 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 6209 6210 break; 6211 } 6212 return SDValue(); 6213} 6214 6215/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 6216/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 6217/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 6218/// return true. 6219static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 6220 SelectionDAG &DAG = DCI.DAG; 6221 EVT VT = N->getValueType(0); 6222 // vldN-dup instructions only support 64-bit vectors for N > 1. 6223 if (!VT.is64BitVector()) 6224 return false; 6225 6226 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 6227 SDNode *VLD = N->getOperand(0).getNode(); 6228 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 6229 return false; 6230 unsigned NumVecs = 0; 6231 unsigned NewOpc = 0; 6232 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 6233 if (IntNo == Intrinsic::arm_neon_vld2lane) { 6234 NumVecs = 2; 6235 NewOpc = ARMISD::VLD2DUP; 6236 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 6237 NumVecs = 3; 6238 NewOpc = ARMISD::VLD3DUP; 6239 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 6240 NumVecs = 4; 6241 NewOpc = ARMISD::VLD4DUP; 6242 } else { 6243 return false; 6244 } 6245 6246 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 6247 // numbers match the load. 6248 unsigned VLDLaneNo = 6249 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 6250 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 6251 UI != UE; ++UI) { 6252 // Ignore uses of the chain result. 6253 if (UI.getUse().getResNo() == NumVecs) 6254 continue; 6255 SDNode *User = *UI; 6256 if (User->getOpcode() != ARMISD::VDUPLANE || 6257 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 6258 return false; 6259 } 6260 6261 // Create the vldN-dup node. 6262 EVT Tys[5]; 6263 unsigned n; 6264 for (n = 0; n < NumVecs; ++n) 6265 Tys[n] = VT; 6266 Tys[n] = MVT::Other; 6267 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 6268 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 6269 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 6270 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 6271 Ops, 2, VLDMemInt->getMemoryVT(), 6272 VLDMemInt->getMemOperand()); 6273 6274 // Update the uses. 6275 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 6276 UI != UE; ++UI) { 6277 unsigned ResNo = UI.getUse().getResNo(); 6278 // Ignore uses of the chain result. 6279 if (ResNo == NumVecs) 6280 continue; 6281 SDNode *User = *UI; 6282 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 6283 } 6284 6285 // Now the vldN-lane intrinsic is dead except for its chain result. 6286 // Update uses of the chain. 6287 std::vector<SDValue> VLDDupResults; 6288 for (unsigned n = 0; n < NumVecs; ++n) 6289 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 6290 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 6291 DCI.CombineTo(VLD, VLDDupResults); 6292 6293 return true; 6294} 6295 6296/// PerformVDUPLANECombine - Target-specific dag combine xforms for 6297/// ARMISD::VDUPLANE. 6298static SDValue PerformVDUPLANECombine(SDNode *N, 6299 TargetLowering::DAGCombinerInfo &DCI) { 6300 SDValue Op = N->getOperand(0); 6301 6302 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 6303 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 6304 if (CombineVLDDUP(N, DCI)) 6305 return SDValue(N, 0); 6306 6307 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 6308 // redundant. Ignore bit_converts for now; element sizes are checked below. 6309 while (Op.getOpcode() == ISD::BITCAST) 6310 Op = Op.getOperand(0); 6311 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 6312 return SDValue(); 6313 6314 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 6315 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 6316 // The canonical VMOV for a zero vector uses a 32-bit element size. 6317 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6318 unsigned EltBits; 6319 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 6320 EltSize = 8; 6321 EVT VT = N->getValueType(0); 6322 if (EltSize > VT.getVectorElementType().getSizeInBits()) 6323 return SDValue(); 6324 6325 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 6326} 6327 6328/// getVShiftImm - Check if this is a valid build_vector for the immediate 6329/// operand of a vector shift operation, where all the elements of the 6330/// build_vector must have the same constant integer value. 6331static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 6332 // Ignore bit_converts. 6333 while (Op.getOpcode() == ISD::BITCAST) 6334 Op = Op.getOperand(0); 6335 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6336 APInt SplatBits, SplatUndef; 6337 unsigned SplatBitSize; 6338 bool HasAnyUndefs; 6339 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 6340 HasAnyUndefs, ElementBits) || 6341 SplatBitSize > ElementBits) 6342 return false; 6343 Cnt = SplatBits.getSExtValue(); 6344 return true; 6345} 6346 6347/// isVShiftLImm - Check if this is a valid build_vector for the immediate 6348/// operand of a vector shift left operation. That value must be in the range: 6349/// 0 <= Value < ElementBits for a left shift; or 6350/// 0 <= Value <= ElementBits for a long left shift. 6351static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 6352 assert(VT.isVector() && "vector shift count is not a vector type"); 6353 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6354 if (! getVShiftImm(Op, ElementBits, Cnt)) 6355 return false; 6356 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 6357} 6358 6359/// isVShiftRImm - Check if this is a valid build_vector for the immediate 6360/// operand of a vector shift right operation. For a shift opcode, the value 6361/// is positive, but for an intrinsic the value count must be negative. The 6362/// absolute value must be in the range: 6363/// 1 <= |Value| <= ElementBits for a right shift; or 6364/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 6365static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 6366 int64_t &Cnt) { 6367 assert(VT.isVector() && "vector shift count is not a vector type"); 6368 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6369 if (! getVShiftImm(Op, ElementBits, Cnt)) 6370 return false; 6371 if (isIntrinsic) 6372 Cnt = -Cnt; 6373 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 6374} 6375 6376/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 6377static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 6378 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 6379 switch (IntNo) { 6380 default: 6381 // Don't do anything for most intrinsics. 6382 break; 6383 6384 // Vector shifts: check for immediate versions and lower them. 6385 // Note: This is done during DAG combining instead of DAG legalizing because 6386 // the build_vectors for 64-bit vector element shift counts are generally 6387 // not legal, and it is hard to see their values after they get legalized to 6388 // loads from a constant pool. 6389 case Intrinsic::arm_neon_vshifts: 6390 case Intrinsic::arm_neon_vshiftu: 6391 case Intrinsic::arm_neon_vshiftls: 6392 case Intrinsic::arm_neon_vshiftlu: 6393 case Intrinsic::arm_neon_vshiftn: 6394 case Intrinsic::arm_neon_vrshifts: 6395 case Intrinsic::arm_neon_vrshiftu: 6396 case Intrinsic::arm_neon_vrshiftn: 6397 case Intrinsic::arm_neon_vqshifts: 6398 case Intrinsic::arm_neon_vqshiftu: 6399 case Intrinsic::arm_neon_vqshiftsu: 6400 case Intrinsic::arm_neon_vqshiftns: 6401 case Intrinsic::arm_neon_vqshiftnu: 6402 case Intrinsic::arm_neon_vqshiftnsu: 6403 case Intrinsic::arm_neon_vqrshiftns: 6404 case Intrinsic::arm_neon_vqrshiftnu: 6405 case Intrinsic::arm_neon_vqrshiftnsu: { 6406 EVT VT = N->getOperand(1).getValueType(); 6407 int64_t Cnt; 6408 unsigned VShiftOpc = 0; 6409 6410 switch (IntNo) { 6411 case Intrinsic::arm_neon_vshifts: 6412 case Intrinsic::arm_neon_vshiftu: 6413 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 6414 VShiftOpc = ARMISD::VSHL; 6415 break; 6416 } 6417 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 6418 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 6419 ARMISD::VSHRs : ARMISD::VSHRu); 6420 break; 6421 } 6422 return SDValue(); 6423 6424 case Intrinsic::arm_neon_vshiftls: 6425 case Intrinsic::arm_neon_vshiftlu: 6426 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 6427 break; 6428 llvm_unreachable("invalid shift count for vshll intrinsic"); 6429 6430 case Intrinsic::arm_neon_vrshifts: 6431 case Intrinsic::arm_neon_vrshiftu: 6432 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 6433 break; 6434 return SDValue(); 6435 6436 case Intrinsic::arm_neon_vqshifts: 6437 case Intrinsic::arm_neon_vqshiftu: 6438 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 6439 break; 6440 return SDValue(); 6441 6442 case Intrinsic::arm_neon_vqshiftsu: 6443 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 6444 break; 6445 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 6446 6447 case Intrinsic::arm_neon_vshiftn: 6448 case Intrinsic::arm_neon_vrshiftn: 6449 case Intrinsic::arm_neon_vqshiftns: 6450 case Intrinsic::arm_neon_vqshiftnu: 6451 case Intrinsic::arm_neon_vqshiftnsu: 6452 case Intrinsic::arm_neon_vqrshiftns: 6453 case Intrinsic::arm_neon_vqrshiftnu: 6454 case Intrinsic::arm_neon_vqrshiftnsu: 6455 // Narrowing shifts require an immediate right shift. 6456 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 6457 break; 6458 llvm_unreachable("invalid shift count for narrowing vector shift " 6459 "intrinsic"); 6460 6461 default: 6462 llvm_unreachable("unhandled vector shift"); 6463 } 6464 6465 switch (IntNo) { 6466 case Intrinsic::arm_neon_vshifts: 6467 case Intrinsic::arm_neon_vshiftu: 6468 // Opcode already set above. 6469 break; 6470 case Intrinsic::arm_neon_vshiftls: 6471 case Intrinsic::arm_neon_vshiftlu: 6472 if (Cnt == VT.getVectorElementType().getSizeInBits()) 6473 VShiftOpc = ARMISD::VSHLLi; 6474 else 6475 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 6476 ARMISD::VSHLLs : ARMISD::VSHLLu); 6477 break; 6478 case Intrinsic::arm_neon_vshiftn: 6479 VShiftOpc = ARMISD::VSHRN; break; 6480 case Intrinsic::arm_neon_vrshifts: 6481 VShiftOpc = ARMISD::VRSHRs; break; 6482 case Intrinsic::arm_neon_vrshiftu: 6483 VShiftOpc = ARMISD::VRSHRu; break; 6484 case Intrinsic::arm_neon_vrshiftn: 6485 VShiftOpc = ARMISD::VRSHRN; break; 6486 case Intrinsic::arm_neon_vqshifts: 6487 VShiftOpc = ARMISD::VQSHLs; break; 6488 case Intrinsic::arm_neon_vqshiftu: 6489 VShiftOpc = ARMISD::VQSHLu; break; 6490 case Intrinsic::arm_neon_vqshiftsu: 6491 VShiftOpc = ARMISD::VQSHLsu; break; 6492 case Intrinsic::arm_neon_vqshiftns: 6493 VShiftOpc = ARMISD::VQSHRNs; break; 6494 case Intrinsic::arm_neon_vqshiftnu: 6495 VShiftOpc = ARMISD::VQSHRNu; break; 6496 case Intrinsic::arm_neon_vqshiftnsu: 6497 VShiftOpc = ARMISD::VQSHRNsu; break; 6498 case Intrinsic::arm_neon_vqrshiftns: 6499 VShiftOpc = ARMISD::VQRSHRNs; break; 6500 case Intrinsic::arm_neon_vqrshiftnu: 6501 VShiftOpc = ARMISD::VQRSHRNu; break; 6502 case Intrinsic::arm_neon_vqrshiftnsu: 6503 VShiftOpc = ARMISD::VQRSHRNsu; break; 6504 } 6505 6506 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 6507 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 6508 } 6509 6510 case Intrinsic::arm_neon_vshiftins: { 6511 EVT VT = N->getOperand(1).getValueType(); 6512 int64_t Cnt; 6513 unsigned VShiftOpc = 0; 6514 6515 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 6516 VShiftOpc = ARMISD::VSLI; 6517 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 6518 VShiftOpc = ARMISD::VSRI; 6519 else { 6520 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 6521 } 6522 6523 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 6524 N->getOperand(1), N->getOperand(2), 6525 DAG.getConstant(Cnt, MVT::i32)); 6526 } 6527 6528 case Intrinsic::arm_neon_vqrshifts: 6529 case Intrinsic::arm_neon_vqrshiftu: 6530 // No immediate versions of these to check for. 6531 break; 6532 } 6533 6534 return SDValue(); 6535} 6536 6537/// PerformShiftCombine - Checks for immediate versions of vector shifts and 6538/// lowers them. As with the vector shift intrinsics, this is done during DAG 6539/// combining instead of DAG legalizing because the build_vectors for 64-bit 6540/// vector element shift counts are generally not legal, and it is hard to see 6541/// their values after they get legalized to loads from a constant pool. 6542static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 6543 const ARMSubtarget *ST) { 6544 EVT VT = N->getValueType(0); 6545 6546 // Nothing to be done for scalar shifts. 6547 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6548 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 6549 return SDValue(); 6550 6551 assert(ST->hasNEON() && "unexpected vector shift"); 6552 int64_t Cnt; 6553 6554 switch (N->getOpcode()) { 6555 default: llvm_unreachable("unexpected shift opcode"); 6556 6557 case ISD::SHL: 6558 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 6559 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 6560 DAG.getConstant(Cnt, MVT::i32)); 6561 break; 6562 6563 case ISD::SRA: 6564 case ISD::SRL: 6565 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 6566 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 6567 ARMISD::VSHRs : ARMISD::VSHRu); 6568 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 6569 DAG.getConstant(Cnt, MVT::i32)); 6570 } 6571 } 6572 return SDValue(); 6573} 6574 6575/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 6576/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 6577static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 6578 const ARMSubtarget *ST) { 6579 SDValue N0 = N->getOperand(0); 6580 6581 // Check for sign- and zero-extensions of vector extract operations of 8- 6582 // and 16-bit vector elements. NEON supports these directly. They are 6583 // handled during DAG combining because type legalization will promote them 6584 // to 32-bit types and it is messy to recognize the operations after that. 6585 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 6586 SDValue Vec = N0.getOperand(0); 6587 SDValue Lane = N0.getOperand(1); 6588 EVT VT = N->getValueType(0); 6589 EVT EltVT = N0.getValueType(); 6590 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6591 6592 if (VT == MVT::i32 && 6593 (EltVT == MVT::i8 || EltVT == MVT::i16) && 6594 TLI.isTypeLegal(Vec.getValueType()) && 6595 isa<ConstantSDNode>(Lane)) { 6596 6597 unsigned Opc = 0; 6598 switch (N->getOpcode()) { 6599 default: llvm_unreachable("unexpected opcode"); 6600 case ISD::SIGN_EXTEND: 6601 Opc = ARMISD::VGETLANEs; 6602 break; 6603 case ISD::ZERO_EXTEND: 6604 case ISD::ANY_EXTEND: 6605 Opc = ARMISD::VGETLANEu; 6606 break; 6607 } 6608 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 6609 } 6610 } 6611 6612 return SDValue(); 6613} 6614 6615/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 6616/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 6617static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 6618 const ARMSubtarget *ST) { 6619 // If the target supports NEON, try to use vmax/vmin instructions for f32 6620 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 6621 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 6622 // a NaN; only do the transformation when it matches that behavior. 6623 6624 // For now only do this when using NEON for FP operations; if using VFP, it 6625 // is not obvious that the benefit outweighs the cost of switching to the 6626 // NEON pipeline. 6627 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 6628 N->getValueType(0) != MVT::f32) 6629 return SDValue(); 6630 6631 SDValue CondLHS = N->getOperand(0); 6632 SDValue CondRHS = N->getOperand(1); 6633 SDValue LHS = N->getOperand(2); 6634 SDValue RHS = N->getOperand(3); 6635 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 6636 6637 unsigned Opcode = 0; 6638 bool IsReversed; 6639 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 6640 IsReversed = false; // x CC y ? x : y 6641 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 6642 IsReversed = true ; // x CC y ? y : x 6643 } else { 6644 return SDValue(); 6645 } 6646 6647 bool IsUnordered; 6648 switch (CC) { 6649 default: break; 6650 case ISD::SETOLT: 6651 case ISD::SETOLE: 6652 case ISD::SETLT: 6653 case ISD::SETLE: 6654 case ISD::SETULT: 6655 case ISD::SETULE: 6656 // If LHS is NaN, an ordered comparison will be false and the result will 6657 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 6658 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 6659 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 6660 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 6661 break; 6662 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 6663 // will return -0, so vmin can only be used for unsafe math or if one of 6664 // the operands is known to be nonzero. 6665 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 6666 !UnsafeFPMath && 6667 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 6668 break; 6669 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 6670 break; 6671 6672 case ISD::SETOGT: 6673 case ISD::SETOGE: 6674 case ISD::SETGT: 6675 case ISD::SETGE: 6676 case ISD::SETUGT: 6677 case ISD::SETUGE: 6678 // If LHS is NaN, an ordered comparison will be false and the result will 6679 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 6680 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 6681 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 6682 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 6683 break; 6684 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 6685 // will return +0, so vmax can only be used for unsafe math or if one of 6686 // the operands is known to be nonzero. 6687 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 6688 !UnsafeFPMath && 6689 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 6690 break; 6691 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 6692 break; 6693 } 6694 6695 if (!Opcode) 6696 return SDValue(); 6697 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 6698} 6699 6700SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 6701 DAGCombinerInfo &DCI) const { 6702 switch (N->getOpcode()) { 6703 default: break; 6704 case ISD::ADD: return PerformADDCombine(N, DCI); 6705 case ISD::SUB: return PerformSUBCombine(N, DCI); 6706 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 6707 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 6708 case ISD::AND: return PerformANDCombine(N, DCI); 6709 case ARMISD::BFI: return PerformBFICombine(N, DCI); 6710 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 6711 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 6712 case ISD::STORE: return PerformSTORECombine(N, DCI); 6713 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 6714 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 6715 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 6716 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 6717 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 6718 case ISD::SHL: 6719 case ISD::SRA: 6720 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 6721 case ISD::SIGN_EXTEND: 6722 case ISD::ZERO_EXTEND: 6723 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 6724 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 6725 case ARMISD::VLD2DUP: 6726 case ARMISD::VLD3DUP: 6727 case ARMISD::VLD4DUP: 6728 return CombineBaseUpdate(N, DCI); 6729 case ISD::INTRINSIC_VOID: 6730 case ISD::INTRINSIC_W_CHAIN: 6731 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 6732 case Intrinsic::arm_neon_vld1: 6733 case Intrinsic::arm_neon_vld2: 6734 case Intrinsic::arm_neon_vld3: 6735 case Intrinsic::arm_neon_vld4: 6736 case Intrinsic::arm_neon_vld2lane: 6737 case Intrinsic::arm_neon_vld3lane: 6738 case Intrinsic::arm_neon_vld4lane: 6739 case Intrinsic::arm_neon_vst1: 6740 case Intrinsic::arm_neon_vst2: 6741 case Intrinsic::arm_neon_vst3: 6742 case Intrinsic::arm_neon_vst4: 6743 case Intrinsic::arm_neon_vst2lane: 6744 case Intrinsic::arm_neon_vst3lane: 6745 case Intrinsic::arm_neon_vst4lane: 6746 return CombineBaseUpdate(N, DCI); 6747 default: break; 6748 } 6749 break; 6750 } 6751 return SDValue(); 6752} 6753 6754bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 6755 EVT VT) const { 6756 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 6757} 6758 6759bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 6760 if (!Subtarget->allowsUnalignedMem()) 6761 return false; 6762 6763 switch (VT.getSimpleVT().SimpleTy) { 6764 default: 6765 return false; 6766 case MVT::i8: 6767 case MVT::i16: 6768 case MVT::i32: 6769 return true; 6770 // FIXME: VLD1 etc with standard alignment is legal. 6771 } 6772} 6773 6774static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 6775 if (V < 0) 6776 return false; 6777 6778 unsigned Scale = 1; 6779 switch (VT.getSimpleVT().SimpleTy) { 6780 default: return false; 6781 case MVT::i1: 6782 case MVT::i8: 6783 // Scale == 1; 6784 break; 6785 case MVT::i16: 6786 // Scale == 2; 6787 Scale = 2; 6788 break; 6789 case MVT::i32: 6790 // Scale == 4; 6791 Scale = 4; 6792 break; 6793 } 6794 6795 if ((V & (Scale - 1)) != 0) 6796 return false; 6797 V /= Scale; 6798 return V == (V & ((1LL << 5) - 1)); 6799} 6800 6801static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 6802 const ARMSubtarget *Subtarget) { 6803 bool isNeg = false; 6804 if (V < 0) { 6805 isNeg = true; 6806 V = - V; 6807 } 6808 6809 switch (VT.getSimpleVT().SimpleTy) { 6810 default: return false; 6811 case MVT::i1: 6812 case MVT::i8: 6813 case MVT::i16: 6814 case MVT::i32: 6815 // + imm12 or - imm8 6816 if (isNeg) 6817 return V == (V & ((1LL << 8) - 1)); 6818 return V == (V & ((1LL << 12) - 1)); 6819 case MVT::f32: 6820 case MVT::f64: 6821 // Same as ARM mode. FIXME: NEON? 6822 if (!Subtarget->hasVFP2()) 6823 return false; 6824 if ((V & 3) != 0) 6825 return false; 6826 V >>= 2; 6827 return V == (V & ((1LL << 8) - 1)); 6828 } 6829} 6830 6831/// isLegalAddressImmediate - Return true if the integer value can be used 6832/// as the offset of the target addressing mode for load / store of the 6833/// given type. 6834static bool isLegalAddressImmediate(int64_t V, EVT VT, 6835 const ARMSubtarget *Subtarget) { 6836 if (V == 0) 6837 return true; 6838 6839 if (!VT.isSimple()) 6840 return false; 6841 6842 if (Subtarget->isThumb1Only()) 6843 return isLegalT1AddressImmediate(V, VT); 6844 else if (Subtarget->isThumb2()) 6845 return isLegalT2AddressImmediate(V, VT, Subtarget); 6846 6847 // ARM mode. 6848 if (V < 0) 6849 V = - V; 6850 switch (VT.getSimpleVT().SimpleTy) { 6851 default: return false; 6852 case MVT::i1: 6853 case MVT::i8: 6854 case MVT::i32: 6855 // +- imm12 6856 return V == (V & ((1LL << 12) - 1)); 6857 case MVT::i16: 6858 // +- imm8 6859 return V == (V & ((1LL << 8) - 1)); 6860 case MVT::f32: 6861 case MVT::f64: 6862 if (!Subtarget->hasVFP2()) // FIXME: NEON? 6863 return false; 6864 if ((V & 3) != 0) 6865 return false; 6866 V >>= 2; 6867 return V == (V & ((1LL << 8) - 1)); 6868 } 6869} 6870 6871bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 6872 EVT VT) const { 6873 int Scale = AM.Scale; 6874 if (Scale < 0) 6875 return false; 6876 6877 switch (VT.getSimpleVT().SimpleTy) { 6878 default: return false; 6879 case MVT::i1: 6880 case MVT::i8: 6881 case MVT::i16: 6882 case MVT::i32: 6883 if (Scale == 1) 6884 return true; 6885 // r + r << imm 6886 Scale = Scale & ~1; 6887 return Scale == 2 || Scale == 4 || Scale == 8; 6888 case MVT::i64: 6889 // r + r 6890 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 6891 return true; 6892 return false; 6893 case MVT::isVoid: 6894 // Note, we allow "void" uses (basically, uses that aren't loads or 6895 // stores), because arm allows folding a scale into many arithmetic 6896 // operations. This should be made more precise and revisited later. 6897 6898 // Allow r << imm, but the imm has to be a multiple of two. 6899 if (Scale & 1) return false; 6900 return isPowerOf2_32(Scale); 6901 } 6902} 6903 6904/// isLegalAddressingMode - Return true if the addressing mode represented 6905/// by AM is legal for this target, for a load/store of the specified type. 6906bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 6907 const Type *Ty) const { 6908 EVT VT = getValueType(Ty, true); 6909 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 6910 return false; 6911 6912 // Can never fold addr of global into load/store. 6913 if (AM.BaseGV) 6914 return false; 6915 6916 switch (AM.Scale) { 6917 case 0: // no scale reg, must be "r+i" or "r", or "i". 6918 break; 6919 case 1: 6920 if (Subtarget->isThumb1Only()) 6921 return false; 6922 // FALL THROUGH. 6923 default: 6924 // ARM doesn't support any R+R*scale+imm addr modes. 6925 if (AM.BaseOffs) 6926 return false; 6927 6928 if (!VT.isSimple()) 6929 return false; 6930 6931 if (Subtarget->isThumb2()) 6932 return isLegalT2ScaledAddressingMode(AM, VT); 6933 6934 int Scale = AM.Scale; 6935 switch (VT.getSimpleVT().SimpleTy) { 6936 default: return false; 6937 case MVT::i1: 6938 case MVT::i8: 6939 case MVT::i32: 6940 if (Scale < 0) Scale = -Scale; 6941 if (Scale == 1) 6942 return true; 6943 // r + r << imm 6944 return isPowerOf2_32(Scale & ~1); 6945 case MVT::i16: 6946 case MVT::i64: 6947 // r + r 6948 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 6949 return true; 6950 return false; 6951 6952 case MVT::isVoid: 6953 // Note, we allow "void" uses (basically, uses that aren't loads or 6954 // stores), because arm allows folding a scale into many arithmetic 6955 // operations. This should be made more precise and revisited later. 6956 6957 // Allow r << imm, but the imm has to be a multiple of two. 6958 if (Scale & 1) return false; 6959 return isPowerOf2_32(Scale); 6960 } 6961 break; 6962 } 6963 return true; 6964} 6965 6966/// isLegalICmpImmediate - Return true if the specified immediate is legal 6967/// icmp immediate, that is the target has icmp instructions which can compare 6968/// a register against the immediate without having to materialize the 6969/// immediate into a register. 6970bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 6971 if (!Subtarget->isThumb()) 6972 return ARM_AM::getSOImmVal(Imm) != -1; 6973 if (Subtarget->isThumb2()) 6974 return ARM_AM::getT2SOImmVal(Imm) != -1; 6975 return Imm >= 0 && Imm <= 255; 6976} 6977 6978/// isLegalAddImmediate - Return true if the specified immediate is legal 6979/// add immediate, that is the target has add instructions which can add 6980/// a register with the immediate without having to materialize the 6981/// immediate into a register. 6982bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 6983 return ARM_AM::getSOImmVal(Imm) != -1; 6984} 6985 6986static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 6987 bool isSEXTLoad, SDValue &Base, 6988 SDValue &Offset, bool &isInc, 6989 SelectionDAG &DAG) { 6990 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 6991 return false; 6992 6993 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 6994 // AddressingMode 3 6995 Base = Ptr->getOperand(0); 6996 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 6997 int RHSC = (int)RHS->getZExtValue(); 6998 if (RHSC < 0 && RHSC > -256) { 6999 assert(Ptr->getOpcode() == ISD::ADD); 7000 isInc = false; 7001 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7002 return true; 7003 } 7004 } 7005 isInc = (Ptr->getOpcode() == ISD::ADD); 7006 Offset = Ptr->getOperand(1); 7007 return true; 7008 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 7009 // AddressingMode 2 7010 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7011 int RHSC = (int)RHS->getZExtValue(); 7012 if (RHSC < 0 && RHSC > -0x1000) { 7013 assert(Ptr->getOpcode() == ISD::ADD); 7014 isInc = false; 7015 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7016 Base = Ptr->getOperand(0); 7017 return true; 7018 } 7019 } 7020 7021 if (Ptr->getOpcode() == ISD::ADD) { 7022 isInc = true; 7023 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 7024 if (ShOpcVal != ARM_AM::no_shift) { 7025 Base = Ptr->getOperand(1); 7026 Offset = Ptr->getOperand(0); 7027 } else { 7028 Base = Ptr->getOperand(0); 7029 Offset = Ptr->getOperand(1); 7030 } 7031 return true; 7032 } 7033 7034 isInc = (Ptr->getOpcode() == ISD::ADD); 7035 Base = Ptr->getOperand(0); 7036 Offset = Ptr->getOperand(1); 7037 return true; 7038 } 7039 7040 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 7041 return false; 7042} 7043 7044static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 7045 bool isSEXTLoad, SDValue &Base, 7046 SDValue &Offset, bool &isInc, 7047 SelectionDAG &DAG) { 7048 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 7049 return false; 7050 7051 Base = Ptr->getOperand(0); 7052 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7053 int RHSC = (int)RHS->getZExtValue(); 7054 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 7055 assert(Ptr->getOpcode() == ISD::ADD); 7056 isInc = false; 7057 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7058 return true; 7059 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 7060 isInc = Ptr->getOpcode() == ISD::ADD; 7061 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 7062 return true; 7063 } 7064 } 7065 7066 return false; 7067} 7068 7069/// getPreIndexedAddressParts - returns true by value, base pointer and 7070/// offset pointer and addressing mode by reference if the node's address 7071/// can be legally represented as pre-indexed load / store address. 7072bool 7073ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 7074 SDValue &Offset, 7075 ISD::MemIndexedMode &AM, 7076 SelectionDAG &DAG) const { 7077 if (Subtarget->isThumb1Only()) 7078 return false; 7079 7080 EVT VT; 7081 SDValue Ptr; 7082 bool isSEXTLoad = false; 7083 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7084 Ptr = LD->getBasePtr(); 7085 VT = LD->getMemoryVT(); 7086 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 7087 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7088 Ptr = ST->getBasePtr(); 7089 VT = ST->getMemoryVT(); 7090 } else 7091 return false; 7092 7093 bool isInc; 7094 bool isLegal = false; 7095 if (Subtarget->isThumb2()) 7096 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 7097 Offset, isInc, DAG); 7098 else 7099 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 7100 Offset, isInc, DAG); 7101 if (!isLegal) 7102 return false; 7103 7104 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 7105 return true; 7106} 7107 7108/// getPostIndexedAddressParts - returns true by value, base pointer and 7109/// offset pointer and addressing mode by reference if this node can be 7110/// combined with a load / store to form a post-indexed load / store. 7111bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 7112 SDValue &Base, 7113 SDValue &Offset, 7114 ISD::MemIndexedMode &AM, 7115 SelectionDAG &DAG) const { 7116 if (Subtarget->isThumb1Only()) 7117 return false; 7118 7119 EVT VT; 7120 SDValue Ptr; 7121 bool isSEXTLoad = false; 7122 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7123 VT = LD->getMemoryVT(); 7124 Ptr = LD->getBasePtr(); 7125 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 7126 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7127 VT = ST->getMemoryVT(); 7128 Ptr = ST->getBasePtr(); 7129 } else 7130 return false; 7131 7132 bool isInc; 7133 bool isLegal = false; 7134 if (Subtarget->isThumb2()) 7135 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 7136 isInc, DAG); 7137 else 7138 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 7139 isInc, DAG); 7140 if (!isLegal) 7141 return false; 7142 7143 if (Ptr != Base) { 7144 // Swap base ptr and offset to catch more post-index load / store when 7145 // it's legal. In Thumb2 mode, offset must be an immediate. 7146 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 7147 !Subtarget->isThumb2()) 7148 std::swap(Base, Offset); 7149 7150 // Post-indexed load / store update the base pointer. 7151 if (Ptr != Base) 7152 return false; 7153 } 7154 7155 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 7156 return true; 7157} 7158 7159void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 7160 const APInt &Mask, 7161 APInt &KnownZero, 7162 APInt &KnownOne, 7163 const SelectionDAG &DAG, 7164 unsigned Depth) const { 7165 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 7166 switch (Op.getOpcode()) { 7167 default: break; 7168 case ARMISD::CMOV: { 7169 // Bits are known zero/one if known on the LHS and RHS. 7170 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 7171 if (KnownZero == 0 && KnownOne == 0) return; 7172 7173 APInt KnownZeroRHS, KnownOneRHS; 7174 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 7175 KnownZeroRHS, KnownOneRHS, Depth+1); 7176 KnownZero &= KnownZeroRHS; 7177 KnownOne &= KnownOneRHS; 7178 return; 7179 } 7180 } 7181} 7182 7183//===----------------------------------------------------------------------===// 7184// ARM Inline Assembly Support 7185//===----------------------------------------------------------------------===// 7186 7187bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 7188 // Looking for "rev" which is V6+. 7189 if (!Subtarget->hasV6Ops()) 7190 return false; 7191 7192 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 7193 std::string AsmStr = IA->getAsmString(); 7194 SmallVector<StringRef, 4> AsmPieces; 7195 SplitString(AsmStr, AsmPieces, ";\n"); 7196 7197 switch (AsmPieces.size()) { 7198 default: return false; 7199 case 1: 7200 AsmStr = AsmPieces[0]; 7201 AsmPieces.clear(); 7202 SplitString(AsmStr, AsmPieces, " \t,"); 7203 7204 // rev $0, $1 7205 if (AsmPieces.size() == 3 && 7206 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 7207 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 7208 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 7209 if (Ty && Ty->getBitWidth() == 32) 7210 return IntrinsicLowering::LowerToByteSwap(CI); 7211 } 7212 break; 7213 } 7214 7215 return false; 7216} 7217 7218/// getConstraintType - Given a constraint letter, return the type of 7219/// constraint it is for this target. 7220ARMTargetLowering::ConstraintType 7221ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 7222 if (Constraint.size() == 1) { 7223 switch (Constraint[0]) { 7224 default: break; 7225 case 'l': return C_RegisterClass; 7226 case 'w': return C_RegisterClass; 7227 } 7228 } 7229 return TargetLowering::getConstraintType(Constraint); 7230} 7231 7232/// Examine constraint type and operand type and determine a weight value. 7233/// This object must already have been set up with the operand type 7234/// and the current alternative constraint selected. 7235TargetLowering::ConstraintWeight 7236ARMTargetLowering::getSingleConstraintMatchWeight( 7237 AsmOperandInfo &info, const char *constraint) const { 7238 ConstraintWeight weight = CW_Invalid; 7239 Value *CallOperandVal = info.CallOperandVal; 7240 // If we don't have a value, we can't do a match, 7241 // but allow it at the lowest weight. 7242 if (CallOperandVal == NULL) 7243 return CW_Default; 7244 const Type *type = CallOperandVal->getType(); 7245 // Look at the constraint type. 7246 switch (*constraint) { 7247 default: 7248 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 7249 break; 7250 case 'l': 7251 if (type->isIntegerTy()) { 7252 if (Subtarget->isThumb()) 7253 weight = CW_SpecificReg; 7254 else 7255 weight = CW_Register; 7256 } 7257 break; 7258 case 'w': 7259 if (type->isFloatingPointTy()) 7260 weight = CW_Register; 7261 break; 7262 } 7263 return weight; 7264} 7265 7266std::pair<unsigned, const TargetRegisterClass*> 7267ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 7268 EVT VT) const { 7269 if (Constraint.size() == 1) { 7270 // GCC ARM Constraint Letters 7271 switch (Constraint[0]) { 7272 case 'l': 7273 if (Subtarget->isThumb()) 7274 return std::make_pair(0U, ARM::tGPRRegisterClass); 7275 else 7276 return std::make_pair(0U, ARM::GPRRegisterClass); 7277 case 'r': 7278 return std::make_pair(0U, ARM::GPRRegisterClass); 7279 case 'w': 7280 if (VT == MVT::f32) 7281 return std::make_pair(0U, ARM::SPRRegisterClass); 7282 if (VT.getSizeInBits() == 64) 7283 return std::make_pair(0U, ARM::DPRRegisterClass); 7284 if (VT.getSizeInBits() == 128) 7285 return std::make_pair(0U, ARM::QPRRegisterClass); 7286 break; 7287 } 7288 } 7289 if (StringRef("{cc}").equals_lower(Constraint)) 7290 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 7291 7292 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 7293} 7294 7295std::vector<unsigned> ARMTargetLowering:: 7296getRegClassForInlineAsmConstraint(const std::string &Constraint, 7297 EVT VT) const { 7298 if (Constraint.size() != 1) 7299 return std::vector<unsigned>(); 7300 7301 switch (Constraint[0]) { // GCC ARM Constraint Letters 7302 default: break; 7303 case 'l': 7304 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 7305 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 7306 0); 7307 case 'r': 7308 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 7309 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 7310 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 7311 ARM::R12, ARM::LR, 0); 7312 case 'w': 7313 if (VT == MVT::f32) 7314 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 7315 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 7316 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 7317 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 7318 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 7319 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 7320 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 7321 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 7322 if (VT.getSizeInBits() == 64) 7323 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 7324 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 7325 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 7326 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 7327 if (VT.getSizeInBits() == 128) 7328 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 7329 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 7330 break; 7331 } 7332 7333 return std::vector<unsigned>(); 7334} 7335 7336/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 7337/// vector. If it is invalid, don't add anything to Ops. 7338void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 7339 char Constraint, 7340 std::vector<SDValue>&Ops, 7341 SelectionDAG &DAG) const { 7342 SDValue Result(0, 0); 7343 7344 switch (Constraint) { 7345 default: break; 7346 case 'I': case 'J': case 'K': case 'L': 7347 case 'M': case 'N': case 'O': 7348 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 7349 if (!C) 7350 return; 7351 7352 int64_t CVal64 = C->getSExtValue(); 7353 int CVal = (int) CVal64; 7354 // None of these constraints allow values larger than 32 bits. Check 7355 // that the value fits in an int. 7356 if (CVal != CVal64) 7357 return; 7358 7359 switch (Constraint) { 7360 case 'I': 7361 if (Subtarget->isThumb1Only()) { 7362 // This must be a constant between 0 and 255, for ADD 7363 // immediates. 7364 if (CVal >= 0 && CVal <= 255) 7365 break; 7366 } else if (Subtarget->isThumb2()) { 7367 // A constant that can be used as an immediate value in a 7368 // data-processing instruction. 7369 if (ARM_AM::getT2SOImmVal(CVal) != -1) 7370 break; 7371 } else { 7372 // A constant that can be used as an immediate value in a 7373 // data-processing instruction. 7374 if (ARM_AM::getSOImmVal(CVal) != -1) 7375 break; 7376 } 7377 return; 7378 7379 case 'J': 7380 if (Subtarget->isThumb()) { // FIXME thumb2 7381 // This must be a constant between -255 and -1, for negated ADD 7382 // immediates. This can be used in GCC with an "n" modifier that 7383 // prints the negated value, for use with SUB instructions. It is 7384 // not useful otherwise but is implemented for compatibility. 7385 if (CVal >= -255 && CVal <= -1) 7386 break; 7387 } else { 7388 // This must be a constant between -4095 and 4095. It is not clear 7389 // what this constraint is intended for. Implemented for 7390 // compatibility with GCC. 7391 if (CVal >= -4095 && CVal <= 4095) 7392 break; 7393 } 7394 return; 7395 7396 case 'K': 7397 if (Subtarget->isThumb1Only()) { 7398 // A 32-bit value where only one byte has a nonzero value. Exclude 7399 // zero to match GCC. This constraint is used by GCC internally for 7400 // constants that can be loaded with a move/shift combination. 7401 // It is not useful otherwise but is implemented for compatibility. 7402 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 7403 break; 7404 } else if (Subtarget->isThumb2()) { 7405 // A constant whose bitwise inverse can be used as an immediate 7406 // value in a data-processing instruction. This can be used in GCC 7407 // with a "B" modifier that prints the inverted value, for use with 7408 // BIC and MVN instructions. It is not useful otherwise but is 7409 // implemented for compatibility. 7410 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 7411 break; 7412 } else { 7413 // A constant whose bitwise inverse can be used as an immediate 7414 // value in a data-processing instruction. This can be used in GCC 7415 // with a "B" modifier that prints the inverted value, for use with 7416 // BIC and MVN instructions. It is not useful otherwise but is 7417 // implemented for compatibility. 7418 if (ARM_AM::getSOImmVal(~CVal) != -1) 7419 break; 7420 } 7421 return; 7422 7423 case 'L': 7424 if (Subtarget->isThumb1Only()) { 7425 // This must be a constant between -7 and 7, 7426 // for 3-operand ADD/SUB immediate instructions. 7427 if (CVal >= -7 && CVal < 7) 7428 break; 7429 } else if (Subtarget->isThumb2()) { 7430 // A constant whose negation can be used as an immediate value in a 7431 // data-processing instruction. This can be used in GCC with an "n" 7432 // modifier that prints the negated value, for use with SUB 7433 // instructions. It is not useful otherwise but is implemented for 7434 // compatibility. 7435 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 7436 break; 7437 } else { 7438 // A constant whose negation can be used as an immediate value in a 7439 // data-processing instruction. This can be used in GCC with an "n" 7440 // modifier that prints the negated value, for use with SUB 7441 // instructions. It is not useful otherwise but is implemented for 7442 // compatibility. 7443 if (ARM_AM::getSOImmVal(-CVal) != -1) 7444 break; 7445 } 7446 return; 7447 7448 case 'M': 7449 if (Subtarget->isThumb()) { // FIXME thumb2 7450 // This must be a multiple of 4 between 0 and 1020, for 7451 // ADD sp + immediate. 7452 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 7453 break; 7454 } else { 7455 // A power of two or a constant between 0 and 32. This is used in 7456 // GCC for the shift amount on shifted register operands, but it is 7457 // useful in general for any shift amounts. 7458 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 7459 break; 7460 } 7461 return; 7462 7463 case 'N': 7464 if (Subtarget->isThumb()) { // FIXME thumb2 7465 // This must be a constant between 0 and 31, for shift amounts. 7466 if (CVal >= 0 && CVal <= 31) 7467 break; 7468 } 7469 return; 7470 7471 case 'O': 7472 if (Subtarget->isThumb()) { // FIXME thumb2 7473 // This must be a multiple of 4 between -508 and 508, for 7474 // ADD/SUB sp = sp + immediate. 7475 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 7476 break; 7477 } 7478 return; 7479 } 7480 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 7481 break; 7482 } 7483 7484 if (Result.getNode()) { 7485 Ops.push_back(Result); 7486 return; 7487 } 7488 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7489} 7490 7491bool 7492ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 7493 // The ARM target isn't yet aware of offsets. 7494 return false; 7495} 7496 7497int ARM::getVFPf32Imm(const APFloat &FPImm) { 7498 APInt Imm = FPImm.bitcastToAPInt(); 7499 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 7500 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 7501 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 7502 7503 // We can handle 4 bits of mantissa. 7504 // mantissa = (16+UInt(e:f:g:h))/16. 7505 if (Mantissa & 0x7ffff) 7506 return -1; 7507 Mantissa >>= 19; 7508 if ((Mantissa & 0xf) != Mantissa) 7509 return -1; 7510 7511 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 7512 if (Exp < -3 || Exp > 4) 7513 return -1; 7514 Exp = ((Exp+3) & 0x7) ^ 4; 7515 7516 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 7517} 7518 7519int ARM::getVFPf64Imm(const APFloat &FPImm) { 7520 APInt Imm = FPImm.bitcastToAPInt(); 7521 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 7522 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 7523 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 7524 7525 // We can handle 4 bits of mantissa. 7526 // mantissa = (16+UInt(e:f:g:h))/16. 7527 if (Mantissa & 0xffffffffffffLL) 7528 return -1; 7529 Mantissa >>= 48; 7530 if ((Mantissa & 0xf) != Mantissa) 7531 return -1; 7532 7533 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 7534 if (Exp < -3 || Exp > 4) 7535 return -1; 7536 Exp = ((Exp+3) & 0x7) ^ 4; 7537 7538 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 7539} 7540 7541bool ARM::isBitFieldInvertedMask(unsigned v) { 7542 if (v == 0xffffffff) 7543 return 0; 7544 // there can be 1's on either or both "outsides", all the "inside" 7545 // bits must be 0's 7546 unsigned int lsb = 0, msb = 31; 7547 while (v & (1 << msb)) --msb; 7548 while (v & (1 << lsb)) ++lsb; 7549 for (unsigned int i = lsb; i <= msb; ++i) { 7550 if (v & (1 << i)) 7551 return 0; 7552 } 7553 return 1; 7554} 7555 7556/// isFPImmLegal - Returns true if the target can instruction select the 7557/// specified FP immediate natively. If false, the legalizer will 7558/// materialize the FP immediate as a load from a constant pool. 7559bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 7560 if (!Subtarget->hasVFP3()) 7561 return false; 7562 if (VT == MVT::f32) 7563 return ARM::getVFPf32Imm(Imm) != -1; 7564 if (VT == MVT::f64) 7565 return ARM::getVFPf64Imm(Imm) != -1; 7566 return false; 7567} 7568 7569/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 7570/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 7571/// specified in the intrinsic calls. 7572bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 7573 const CallInst &I, 7574 unsigned Intrinsic) const { 7575 switch (Intrinsic) { 7576 case Intrinsic::arm_neon_vld1: 7577 case Intrinsic::arm_neon_vld2: 7578 case Intrinsic::arm_neon_vld3: 7579 case Intrinsic::arm_neon_vld4: 7580 case Intrinsic::arm_neon_vld2lane: 7581 case Intrinsic::arm_neon_vld3lane: 7582 case Intrinsic::arm_neon_vld4lane: { 7583 Info.opc = ISD::INTRINSIC_W_CHAIN; 7584 // Conservatively set memVT to the entire set of vectors loaded. 7585 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 7586 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7587 Info.ptrVal = I.getArgOperand(0); 7588 Info.offset = 0; 7589 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 7590 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 7591 Info.vol = false; // volatile loads with NEON intrinsics not supported 7592 Info.readMem = true; 7593 Info.writeMem = false; 7594 return true; 7595 } 7596 case Intrinsic::arm_neon_vst1: 7597 case Intrinsic::arm_neon_vst2: 7598 case Intrinsic::arm_neon_vst3: 7599 case Intrinsic::arm_neon_vst4: 7600 case Intrinsic::arm_neon_vst2lane: 7601 case Intrinsic::arm_neon_vst3lane: 7602 case Intrinsic::arm_neon_vst4lane: { 7603 Info.opc = ISD::INTRINSIC_VOID; 7604 // Conservatively set memVT to the entire set of vectors stored. 7605 unsigned NumElts = 0; 7606 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 7607 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 7608 if (!ArgTy->isVectorTy()) 7609 break; 7610 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 7611 } 7612 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7613 Info.ptrVal = I.getArgOperand(0); 7614 Info.offset = 0; 7615 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 7616 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 7617 Info.vol = false; // volatile stores with NEON intrinsics not supported 7618 Info.readMem = false; 7619 Info.writeMem = true; 7620 return true; 7621 } 7622 default: 7623 break; 7624 } 7625 7626 return false; 7627} 7628