ARMISelLowering.cpp revision de64aaf6c8ab3a170b2e5a5b0968595503b5aad4
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/PseudoSourceValue.h" 43#include "llvm/CodeGen/SelectionDAG.h" 44#include "llvm/MC/MCSectionMachO.h" 45#include "llvm/Target/TargetOptions.h" 46#include "llvm/ADT/VectorExtras.h" 47#include "llvm/ADT/StringExtras.h" 48#include "llvm/ADT/Statistic.h" 49#include "llvm/Support/CommandLine.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53#include <sstream> 54using namespace llvm; 55 56STATISTIC(NumTailCalls, "Number of tail calls"); 57STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 58 59// This option should go away when tail calls fully work. 60static cl::opt<bool> 61EnableARMTailCalls("arm-tail-calls", cl::Hidden, 62 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 63 cl::init(false)); 64 65cl::opt<bool> 66EnableARMLongCalls("arm-long-calls", cl::Hidden, 67 cl::desc("Generate calls via indirect call instructions"), 68 cl::init(false)); 69 70static cl::opt<bool> 71ARMInterworking("arm-interworking", cl::Hidden, 72 cl::desc("Enable / disable ARM interworking (for debugging only)"), 73 cl::init(true)); 74 75// The APCS parameter registers. 76static const unsigned GPRArgRegs[] = { 77 ARM::R0, ARM::R1, ARM::R2, ARM::R3 78}; 79 80void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 81 EVT PromotedBitwiseVT) { 82 if (VT != PromotedLdStVT) { 83 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 84 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 85 PromotedLdStVT.getSimpleVT()); 86 87 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 88 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 89 PromotedLdStVT.getSimpleVT()); 90 } 91 92 EVT ElemTy = VT.getVectorElementType(); 93 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 94 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 95 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 96 if (ElemTy != MVT::i32) { 97 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 98 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 100 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 101 } 102 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 104 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 105 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 106 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 107 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 108 if (VT.isInteger()) { 109 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 110 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 111 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 112 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 113 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 114 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 115 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 116 setTruncStoreAction(VT.getSimpleVT(), 117 (MVT::SimpleValueType)InnerVT, Expand); 118 } 119 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 120 121 // Promote all bit-wise operations. 122 if (VT.isInteger() && VT != PromotedBitwiseVT) { 123 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 124 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 125 PromotedBitwiseVT.getSimpleVT()); 126 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 127 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 128 PromotedBitwiseVT.getSimpleVT()); 129 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 130 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 131 PromotedBitwiseVT.getSimpleVT()); 132 } 133 134 // Neon does not support vector divide/remainder operations. 135 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 136 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 137 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 138 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 139 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 140 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 141} 142 143void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 144 addRegisterClass(VT, ARM::DPRRegisterClass); 145 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 146} 147 148void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 149 addRegisterClass(VT, ARM::QPRRegisterClass); 150 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 151} 152 153static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 154 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 155 return new TargetLoweringObjectFileMachO(); 156 157 return new ARMElfTargetObjectFile(); 158} 159 160ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 161 : TargetLowering(TM, createTLOF(TM)) { 162 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 163 RegInfo = TM.getRegisterInfo(); 164 Itins = TM.getInstrItineraryData(); 165 166 if (Subtarget->isTargetDarwin()) { 167 // Uses VFP for Thumb libfuncs if available. 168 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 169 // Single-precision floating-point arithmetic. 170 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 171 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 172 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 173 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 174 175 // Double-precision floating-point arithmetic. 176 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 177 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 178 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 179 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 180 181 // Single-precision comparisons. 182 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 183 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 184 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 185 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 186 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 187 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 188 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 189 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 190 191 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 192 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 193 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 194 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 195 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 196 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 197 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 198 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 199 200 // Double-precision comparisons. 201 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 202 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 203 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 204 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 205 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 206 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 207 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 208 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 209 210 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 216 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 217 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 218 219 // Floating-point to integer conversions. 220 // i64 conversions are done via library routines even when generating VFP 221 // instructions, so use the same ones. 222 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 223 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 224 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 225 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 226 227 // Conversions between floating types. 228 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 229 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 230 231 // Integer to floating-point conversions. 232 // i64 conversions are done via library routines even when generating VFP 233 // instructions, so use the same ones. 234 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 235 // e.g., __floatunsidf vs. __floatunssidfvfp. 236 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 237 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 238 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 239 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 240 } 241 } 242 243 // These libcalls are not available in 32-bit. 244 setLibcallName(RTLIB::SHL_I128, 0); 245 setLibcallName(RTLIB::SRL_I128, 0); 246 setLibcallName(RTLIB::SRA_I128, 0); 247 248 if (Subtarget->isAAPCS_ABI()) { 249 // Double-precision floating-point arithmetic helper functions 250 // RTABI chapter 4.1.2, Table 2 251 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 252 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 253 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 254 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 255 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 256 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 257 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 258 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 259 260 // Double-precision floating-point comparison helper functions 261 // RTABI chapter 4.1.2, Table 3 262 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 263 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 264 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 265 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 266 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 267 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 268 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 269 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 270 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 271 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 272 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 273 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 274 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 275 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 276 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 277 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 278 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 279 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 280 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 281 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 282 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 283 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 284 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 285 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 286 287 // Single-precision floating-point arithmetic helper functions 288 // RTABI chapter 4.1.2, Table 4 289 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 290 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 291 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 292 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 293 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 294 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 295 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 297 298 // Single-precision floating-point comparison helper functions 299 // RTABI chapter 4.1.2, Table 5 300 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 301 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 302 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 303 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 304 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 305 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 306 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 307 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 308 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 309 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 310 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 311 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 312 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 313 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 314 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 315 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 316 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 317 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 318 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 319 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 320 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 321 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 322 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 323 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 324 325 // Floating-point to integer conversions. 326 // RTABI chapter 4.1.2, Table 6 327 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 328 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 329 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 330 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 331 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 332 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 333 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 334 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 335 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 342 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 343 344 // Conversions between floating types. 345 // RTABI chapter 4.1.2, Table 7 346 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 347 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 348 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 349 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 350 351 // Integer to floating-point conversions. 352 // RTABI chapter 4.1.2, Table 8 353 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 354 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 355 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 356 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 357 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 358 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 359 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 360 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 361 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 362 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 363 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 364 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 365 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 366 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 367 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 368 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 369 370 // Long long helper functions 371 // RTABI chapter 4.2, Table 9 372 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 373 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 374 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 375 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 376 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 377 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 378 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 380 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 384 385 // Integer division functions 386 // RTABI chapter 4.3.1 387 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 388 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 389 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 390 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 391 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 392 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 393 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 394 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 395 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 396 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 397 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 399 400 // Memory operations 401 // RTABI chapter 4.3.4 402 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 403 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 404 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 405 } 406 407 if (Subtarget->isThumb1Only()) 408 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 409 else 410 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 411 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 412 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 413 if (!Subtarget->isFPOnlySP()) 414 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 415 416 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 417 } 418 419 if (Subtarget->hasNEON()) { 420 addDRTypeForNEON(MVT::v2f32); 421 addDRTypeForNEON(MVT::v8i8); 422 addDRTypeForNEON(MVT::v4i16); 423 addDRTypeForNEON(MVT::v2i32); 424 addDRTypeForNEON(MVT::v1i64); 425 426 addQRTypeForNEON(MVT::v4f32); 427 addQRTypeForNEON(MVT::v2f64); 428 addQRTypeForNEON(MVT::v16i8); 429 addQRTypeForNEON(MVT::v8i16); 430 addQRTypeForNEON(MVT::v4i32); 431 addQRTypeForNEON(MVT::v2i64); 432 433 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 434 // neither Neon nor VFP support any arithmetic operations on it. 435 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 436 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 437 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 438 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 439 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 440 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 441 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 442 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 443 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 444 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 445 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 446 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 447 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 448 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 449 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 450 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 451 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 452 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 453 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 454 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 455 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 456 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 457 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 458 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 459 460 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 461 462 // Neon does not support some operations on v1i64 and v2i64 types. 463 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 464 // Custom handling for some quad-vector types to detect VMULL. 465 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 466 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 467 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 468 // Custom handling for some vector types to avoid expensive expansions 469 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 470 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 471 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 472 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 473 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 474 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 475 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 476 // a destination type that is wider than the source. 477 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 478 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 479 480 setTargetDAGCombine(ISD::INTRINSIC_VOID); 481 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 482 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 483 setTargetDAGCombine(ISD::SHL); 484 setTargetDAGCombine(ISD::SRL); 485 setTargetDAGCombine(ISD::SRA); 486 setTargetDAGCombine(ISD::SIGN_EXTEND); 487 setTargetDAGCombine(ISD::ZERO_EXTEND); 488 setTargetDAGCombine(ISD::ANY_EXTEND); 489 setTargetDAGCombine(ISD::SELECT_CC); 490 setTargetDAGCombine(ISD::BUILD_VECTOR); 491 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 492 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 493 setTargetDAGCombine(ISD::STORE); 494 } 495 496 computeRegisterProperties(); 497 498 // ARM does not have f32 extending load. 499 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 500 501 // ARM does not have i1 sign extending load. 502 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 503 504 // ARM supports all 4 flavors of integer indexed load / store. 505 if (!Subtarget->isThumb1Only()) { 506 for (unsigned im = (unsigned)ISD::PRE_INC; 507 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 508 setIndexedLoadAction(im, MVT::i1, Legal); 509 setIndexedLoadAction(im, MVT::i8, Legal); 510 setIndexedLoadAction(im, MVT::i16, Legal); 511 setIndexedLoadAction(im, MVT::i32, Legal); 512 setIndexedStoreAction(im, MVT::i1, Legal); 513 setIndexedStoreAction(im, MVT::i8, Legal); 514 setIndexedStoreAction(im, MVT::i16, Legal); 515 setIndexedStoreAction(im, MVT::i32, Legal); 516 } 517 } 518 519 // i64 operation support. 520 setOperationAction(ISD::MUL, MVT::i64, Expand); 521 setOperationAction(ISD::MULHU, MVT::i32, Expand); 522 if (Subtarget->isThumb1Only()) { 523 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 524 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 525 } 526 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()) 527 setOperationAction(ISD::MULHS, MVT::i32, Expand); 528 529 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 530 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 531 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 532 setOperationAction(ISD::SRL, MVT::i64, Custom); 533 setOperationAction(ISD::SRA, MVT::i64, Custom); 534 535 // ARM does not have ROTL. 536 setOperationAction(ISD::ROTL, MVT::i32, Expand); 537 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 538 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 539 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 540 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 541 542 // Only ARMv6 has BSWAP. 543 if (!Subtarget->hasV6Ops()) 544 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 545 546 // These are expanded into libcalls. 547 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 548 // v7M has a hardware divider 549 setOperationAction(ISD::SDIV, MVT::i32, Expand); 550 setOperationAction(ISD::UDIV, MVT::i32, Expand); 551 } 552 setOperationAction(ISD::SREM, MVT::i32, Expand); 553 setOperationAction(ISD::UREM, MVT::i32, Expand); 554 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 555 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 556 557 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 558 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 559 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 560 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 561 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 562 563 setOperationAction(ISD::TRAP, MVT::Other, Legal); 564 565 // Use the default implementation. 566 setOperationAction(ISD::VASTART, MVT::Other, Custom); 567 setOperationAction(ISD::VAARG, MVT::Other, Expand); 568 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 569 setOperationAction(ISD::VAEND, MVT::Other, Expand); 570 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 571 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 572 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 573 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 574 setExceptionPointerRegister(ARM::R0); 575 setExceptionSelectorRegister(ARM::R1); 576 577 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 578 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 579 // the default expansion. 580 if (Subtarget->hasDataBarrier() || 581 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 582 // membarrier needs custom lowering; the rest are legal and handled 583 // normally. 584 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 585 } else { 586 // Set them all for expansion, which will force libcalls. 587 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 588 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 589 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 590 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 591 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 592 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 593 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 594 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 595 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 596 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 597 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 598 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 599 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 600 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 601 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 602 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 603 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 604 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 605 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 606 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 607 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 608 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 609 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 610 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 611 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 612 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i8, Expand); 613 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i16, Expand); 614 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 615 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i8, Expand); 616 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i16, Expand); 617 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 618 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i8, Expand); 619 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i16, Expand); 620 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 621 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i8, Expand); 622 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i16, Expand); 623 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 624 // Since the libcalls include locking, fold in the fences 625 setShouldFoldAtomicFences(true); 626 } 627 // 64-bit versions are always libcalls (for now) 628 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 629 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 630 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 631 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 632 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 633 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 634 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 635 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 636 637 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 638 639 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 640 if (!Subtarget->hasV6Ops()) { 641 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 642 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 643 } 644 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 645 646 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 647 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 648 // iff target supports vfp2. 649 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 650 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 651 } 652 653 // We want to custom lower some of our intrinsics. 654 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 655 if (Subtarget->isTargetDarwin()) { 656 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 657 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 658 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 659 } 660 661 setOperationAction(ISD::SETCC, MVT::i32, Expand); 662 setOperationAction(ISD::SETCC, MVT::f32, Expand); 663 setOperationAction(ISD::SETCC, MVT::f64, Expand); 664 setOperationAction(ISD::SELECT, MVT::i32, Custom); 665 setOperationAction(ISD::SELECT, MVT::f32, Custom); 666 setOperationAction(ISD::SELECT, MVT::f64, Custom); 667 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 668 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 669 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 670 671 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 672 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 673 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 674 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 675 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 676 677 // We don't support sin/cos/fmod/copysign/pow 678 setOperationAction(ISD::FSIN, MVT::f64, Expand); 679 setOperationAction(ISD::FSIN, MVT::f32, Expand); 680 setOperationAction(ISD::FCOS, MVT::f32, Expand); 681 setOperationAction(ISD::FCOS, MVT::f64, Expand); 682 setOperationAction(ISD::FREM, MVT::f64, Expand); 683 setOperationAction(ISD::FREM, MVT::f32, Expand); 684 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 685 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 686 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 687 } 688 setOperationAction(ISD::FPOW, MVT::f64, Expand); 689 setOperationAction(ISD::FPOW, MVT::f32, Expand); 690 691 // Various VFP goodness 692 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 693 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 694 if (Subtarget->hasVFP2()) { 695 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 696 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 697 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 698 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 699 } 700 // Special handling for half-precision FP. 701 if (!Subtarget->hasFP16()) { 702 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 703 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 704 } 705 } 706 707 // We have target-specific dag combine patterns for the following nodes: 708 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 709 setTargetDAGCombine(ISD::ADD); 710 setTargetDAGCombine(ISD::SUB); 711 setTargetDAGCombine(ISD::MUL); 712 713 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 714 setTargetDAGCombine(ISD::OR); 715 if (Subtarget->hasNEON()) 716 setTargetDAGCombine(ISD::AND); 717 718 setStackPointerRegisterToSaveRestore(ARM::SP); 719 720 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 721 setSchedulingPreference(Sched::RegPressure); 722 else 723 setSchedulingPreference(Sched::Hybrid); 724 725 //// temporary - rewrite interface to use type 726 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 727 728 // On ARM arguments smaller than 4 bytes are extended, so all arguments 729 // are at least 4 bytes aligned. 730 setMinStackArgumentAlignment(4); 731 732 benefitFromCodePlacementOpt = true; 733 734 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 735} 736 737// FIXME: It might make sense to define the representative register class as the 738// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 739// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 740// SPR's representative would be DPR_VFP2. This should work well if register 741// pressure tracking were modified such that a register use would increment the 742// pressure of the register class's representative and all of it's super 743// classes' representatives transitively. We have not implemented this because 744// of the difficulty prior to coalescing of modeling operand register classes 745// due to the common occurrence of cross class copies and subregister insertions 746// and extractions. 747std::pair<const TargetRegisterClass*, uint8_t> 748ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 749 const TargetRegisterClass *RRC = 0; 750 uint8_t Cost = 1; 751 switch (VT.getSimpleVT().SimpleTy) { 752 default: 753 return TargetLowering::findRepresentativeClass(VT); 754 // Use DPR as representative register class for all floating point 755 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 756 // the cost is 1 for both f32 and f64. 757 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 758 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 759 RRC = ARM::DPRRegisterClass; 760 // When NEON is used for SP, only half of the register file is available 761 // because operations that define both SP and DP results will be constrained 762 // to the VFP2 class (D0-D15). We currently model this constraint prior to 763 // coalescing by double-counting the SP regs. See the FIXME above. 764 if (Subtarget->useNEONForSinglePrecisionFP()) 765 Cost = 2; 766 break; 767 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 768 case MVT::v4f32: case MVT::v2f64: 769 RRC = ARM::DPRRegisterClass; 770 Cost = 2; 771 break; 772 case MVT::v4i64: 773 RRC = ARM::DPRRegisterClass; 774 Cost = 4; 775 break; 776 case MVT::v8i64: 777 RRC = ARM::DPRRegisterClass; 778 Cost = 8; 779 break; 780 } 781 return std::make_pair(RRC, Cost); 782} 783 784const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 785 switch (Opcode) { 786 default: return 0; 787 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 788 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 789 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 790 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 791 case ARMISD::CALL: return "ARMISD::CALL"; 792 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 793 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 794 case ARMISD::tCALL: return "ARMISD::tCALL"; 795 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 796 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 797 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 798 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 799 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 800 case ARMISD::CMP: return "ARMISD::CMP"; 801 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 802 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 803 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 804 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 805 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 806 case ARMISD::CMOV: return "ARMISD::CMOV"; 807 808 case ARMISD::RBIT: return "ARMISD::RBIT"; 809 810 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 811 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 812 case ARMISD::SITOF: return "ARMISD::SITOF"; 813 case ARMISD::UITOF: return "ARMISD::UITOF"; 814 815 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 816 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 817 case ARMISD::RRX: return "ARMISD::RRX"; 818 819 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 820 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 821 822 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 823 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 824 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 825 826 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 827 828 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 829 830 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 831 832 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 833 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 834 835 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 836 837 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 838 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 839 case ARMISD::VCGE: return "ARMISD::VCGE"; 840 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 841 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 842 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 843 case ARMISD::VCGT: return "ARMISD::VCGT"; 844 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 845 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 846 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 847 case ARMISD::VTST: return "ARMISD::VTST"; 848 849 case ARMISD::VSHL: return "ARMISD::VSHL"; 850 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 851 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 852 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 853 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 854 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 855 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 856 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 857 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 858 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 859 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 860 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 861 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 862 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 863 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 864 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 865 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 866 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 867 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 868 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 869 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 870 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 871 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 872 case ARMISD::VDUP: return "ARMISD::VDUP"; 873 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 874 case ARMISD::VEXT: return "ARMISD::VEXT"; 875 case ARMISD::VREV64: return "ARMISD::VREV64"; 876 case ARMISD::VREV32: return "ARMISD::VREV32"; 877 case ARMISD::VREV16: return "ARMISD::VREV16"; 878 case ARMISD::VZIP: return "ARMISD::VZIP"; 879 case ARMISD::VUZP: return "ARMISD::VUZP"; 880 case ARMISD::VTRN: return "ARMISD::VTRN"; 881 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 882 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 883 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 884 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 885 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 886 case ARMISD::FMAX: return "ARMISD::FMAX"; 887 case ARMISD::FMIN: return "ARMISD::FMIN"; 888 case ARMISD::BFI: return "ARMISD::BFI"; 889 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 890 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 891 case ARMISD::VBSL: return "ARMISD::VBSL"; 892 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 893 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 894 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 895 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 896 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 897 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 898 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 899 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 900 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 901 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 902 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 903 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 904 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 905 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 906 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 907 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 908 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 909 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 910 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 911 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 912 } 913} 914 915/// getRegClassFor - Return the register class that should be used for the 916/// specified value type. 917TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 918 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 919 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 920 // load / store 4 to 8 consecutive D registers. 921 if (Subtarget->hasNEON()) { 922 if (VT == MVT::v4i64) 923 return ARM::QQPRRegisterClass; 924 else if (VT == MVT::v8i64) 925 return ARM::QQQQPRRegisterClass; 926 } 927 return TargetLowering::getRegClassFor(VT); 928} 929 930// Create a fast isel object. 931FastISel * 932ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 933 return ARM::createFastISel(funcInfo); 934} 935 936/// getMaximalGlobalOffset - Returns the maximal possible offset which can 937/// be used for loads / stores from the global. 938unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 939 return (Subtarget->isThumb1Only() ? 127 : 4095); 940} 941 942Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 943 unsigned NumVals = N->getNumValues(); 944 if (!NumVals) 945 return Sched::RegPressure; 946 947 for (unsigned i = 0; i != NumVals; ++i) { 948 EVT VT = N->getValueType(i); 949 if (VT == MVT::Glue || VT == MVT::Other) 950 continue; 951 if (VT.isFloatingPoint() || VT.isVector()) 952 return Sched::Latency; 953 } 954 955 if (!N->isMachineOpcode()) 956 return Sched::RegPressure; 957 958 // Load are scheduled for latency even if there instruction itinerary 959 // is not available. 960 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 961 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 962 963 if (TID.getNumDefs() == 0) 964 return Sched::RegPressure; 965 if (!Itins->isEmpty() && 966 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 967 return Sched::Latency; 968 969 return Sched::RegPressure; 970} 971 972//===----------------------------------------------------------------------===// 973// Lowering Code 974//===----------------------------------------------------------------------===// 975 976/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 977static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 978 switch (CC) { 979 default: llvm_unreachable("Unknown condition code!"); 980 case ISD::SETNE: return ARMCC::NE; 981 case ISD::SETEQ: return ARMCC::EQ; 982 case ISD::SETGT: return ARMCC::GT; 983 case ISD::SETGE: return ARMCC::GE; 984 case ISD::SETLT: return ARMCC::LT; 985 case ISD::SETLE: return ARMCC::LE; 986 case ISD::SETUGT: return ARMCC::HI; 987 case ISD::SETUGE: return ARMCC::HS; 988 case ISD::SETULT: return ARMCC::LO; 989 case ISD::SETULE: return ARMCC::LS; 990 } 991} 992 993/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 994static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 995 ARMCC::CondCodes &CondCode2) { 996 CondCode2 = ARMCC::AL; 997 switch (CC) { 998 default: llvm_unreachable("Unknown FP condition!"); 999 case ISD::SETEQ: 1000 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1001 case ISD::SETGT: 1002 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1003 case ISD::SETGE: 1004 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1005 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1006 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1007 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1008 case ISD::SETO: CondCode = ARMCC::VC; break; 1009 case ISD::SETUO: CondCode = ARMCC::VS; break; 1010 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1011 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1012 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1013 case ISD::SETLT: 1014 case ISD::SETULT: CondCode = ARMCC::LT; break; 1015 case ISD::SETLE: 1016 case ISD::SETULE: CondCode = ARMCC::LE; break; 1017 case ISD::SETNE: 1018 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1019 } 1020} 1021 1022//===----------------------------------------------------------------------===// 1023// Calling Convention Implementation 1024//===----------------------------------------------------------------------===// 1025 1026#include "ARMGenCallingConv.inc" 1027 1028/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1029/// given CallingConvention value. 1030CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1031 bool Return, 1032 bool isVarArg) const { 1033 switch (CC) { 1034 default: 1035 llvm_unreachable("Unsupported calling convention"); 1036 case CallingConv::Fast: 1037 if (Subtarget->hasVFP2() && !isVarArg) { 1038 if (!Subtarget->isAAPCS_ABI()) 1039 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1040 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1041 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1042 } 1043 // Fallthrough 1044 case CallingConv::C: { 1045 // Use target triple & subtarget features to do actual dispatch. 1046 if (!Subtarget->isAAPCS_ABI()) 1047 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1048 else if (Subtarget->hasVFP2() && 1049 FloatABIType == FloatABI::Hard && !isVarArg) 1050 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1051 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1052 } 1053 case CallingConv::ARM_AAPCS_VFP: 1054 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1055 case CallingConv::ARM_AAPCS: 1056 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1057 case CallingConv::ARM_APCS: 1058 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1059 } 1060} 1061 1062/// LowerCallResult - Lower the result values of a call into the 1063/// appropriate copies out of appropriate physical registers. 1064SDValue 1065ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1066 CallingConv::ID CallConv, bool isVarArg, 1067 const SmallVectorImpl<ISD::InputArg> &Ins, 1068 DebugLoc dl, SelectionDAG &DAG, 1069 SmallVectorImpl<SDValue> &InVals) const { 1070 1071 // Assign locations to each value returned by this call. 1072 SmallVector<CCValAssign, 16> RVLocs; 1073 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1074 RVLocs, *DAG.getContext()); 1075 CCInfo.AnalyzeCallResult(Ins, 1076 CCAssignFnForNode(CallConv, /* Return*/ true, 1077 isVarArg)); 1078 1079 // Copy all of the result registers out of their specified physreg. 1080 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1081 CCValAssign VA = RVLocs[i]; 1082 1083 SDValue Val; 1084 if (VA.needsCustom()) { 1085 // Handle f64 or half of a v2f64. 1086 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1087 InFlag); 1088 Chain = Lo.getValue(1); 1089 InFlag = Lo.getValue(2); 1090 VA = RVLocs[++i]; // skip ahead to next loc 1091 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1092 InFlag); 1093 Chain = Hi.getValue(1); 1094 InFlag = Hi.getValue(2); 1095 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1096 1097 if (VA.getLocVT() == MVT::v2f64) { 1098 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1099 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1100 DAG.getConstant(0, MVT::i32)); 1101 1102 VA = RVLocs[++i]; // skip ahead to next loc 1103 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1104 Chain = Lo.getValue(1); 1105 InFlag = Lo.getValue(2); 1106 VA = RVLocs[++i]; // skip ahead to next loc 1107 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1108 Chain = Hi.getValue(1); 1109 InFlag = Hi.getValue(2); 1110 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1111 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1112 DAG.getConstant(1, MVT::i32)); 1113 } 1114 } else { 1115 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1116 InFlag); 1117 Chain = Val.getValue(1); 1118 InFlag = Val.getValue(2); 1119 } 1120 1121 switch (VA.getLocInfo()) { 1122 default: llvm_unreachable("Unknown loc info!"); 1123 case CCValAssign::Full: break; 1124 case CCValAssign::BCvt: 1125 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1126 break; 1127 } 1128 1129 InVals.push_back(Val); 1130 } 1131 1132 return Chain; 1133} 1134 1135/// LowerMemOpCallTo - Store the argument to the stack. 1136SDValue 1137ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1138 SDValue StackPtr, SDValue Arg, 1139 DebugLoc dl, SelectionDAG &DAG, 1140 const CCValAssign &VA, 1141 ISD::ArgFlagsTy Flags) const { 1142 unsigned LocMemOffset = VA.getLocMemOffset(); 1143 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1144 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1145 return DAG.getStore(Chain, dl, Arg, PtrOff, 1146 MachinePointerInfo::getStack(LocMemOffset), 1147 false, false, 0); 1148} 1149 1150void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1151 SDValue Chain, SDValue &Arg, 1152 RegsToPassVector &RegsToPass, 1153 CCValAssign &VA, CCValAssign &NextVA, 1154 SDValue &StackPtr, 1155 SmallVector<SDValue, 8> &MemOpChains, 1156 ISD::ArgFlagsTy Flags) const { 1157 1158 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1159 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1160 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1161 1162 if (NextVA.isRegLoc()) 1163 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1164 else { 1165 assert(NextVA.isMemLoc()); 1166 if (StackPtr.getNode() == 0) 1167 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1168 1169 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1170 dl, DAG, NextVA, 1171 Flags)); 1172 } 1173} 1174 1175/// LowerCall - Lowering a call into a callseq_start <- 1176/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1177/// nodes. 1178SDValue 1179ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1180 CallingConv::ID CallConv, bool isVarArg, 1181 bool &isTailCall, 1182 const SmallVectorImpl<ISD::OutputArg> &Outs, 1183 const SmallVectorImpl<SDValue> &OutVals, 1184 const SmallVectorImpl<ISD::InputArg> &Ins, 1185 DebugLoc dl, SelectionDAG &DAG, 1186 SmallVectorImpl<SDValue> &InVals) const { 1187 MachineFunction &MF = DAG.getMachineFunction(); 1188 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1189 bool IsSibCall = false; 1190 // Temporarily disable tail calls so things don't break. 1191 if (!EnableARMTailCalls) 1192 isTailCall = false; 1193 if (isTailCall) { 1194 // Check if it's really possible to do a tail call. 1195 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1196 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1197 Outs, OutVals, Ins, DAG); 1198 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1199 // detected sibcalls. 1200 if (isTailCall) { 1201 ++NumTailCalls; 1202 IsSibCall = true; 1203 } 1204 } 1205 1206 // Analyze operands of the call, assigning locations to each operand. 1207 SmallVector<CCValAssign, 16> ArgLocs; 1208 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1209 *DAG.getContext()); 1210 CCInfo.setCallOrPrologue(Call); 1211 CCInfo.AnalyzeCallOperands(Outs, 1212 CCAssignFnForNode(CallConv, /* Return*/ false, 1213 isVarArg)); 1214 1215 // Get a count of how many bytes are to be pushed on the stack. 1216 unsigned NumBytes = CCInfo.getNextStackOffset(); 1217 1218 // For tail calls, memory operands are available in our caller's stack. 1219 if (IsSibCall) 1220 NumBytes = 0; 1221 1222 // Adjust the stack pointer for the new arguments... 1223 // These operations are automatically eliminated by the prolog/epilog pass 1224 if (!IsSibCall) 1225 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1226 1227 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1228 1229 RegsToPassVector RegsToPass; 1230 SmallVector<SDValue, 8> MemOpChains; 1231 1232 // Walk the register/memloc assignments, inserting copies/loads. In the case 1233 // of tail call optimization, arguments are handled later. 1234 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1235 i != e; 1236 ++i, ++realArgIdx) { 1237 CCValAssign &VA = ArgLocs[i]; 1238 SDValue Arg = OutVals[realArgIdx]; 1239 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1240 bool isByVal = Flags.isByVal(); 1241 1242 // Promote the value if needed. 1243 switch (VA.getLocInfo()) { 1244 default: llvm_unreachable("Unknown loc info!"); 1245 case CCValAssign::Full: break; 1246 case CCValAssign::SExt: 1247 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1248 break; 1249 case CCValAssign::ZExt: 1250 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1251 break; 1252 case CCValAssign::AExt: 1253 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1254 break; 1255 case CCValAssign::BCvt: 1256 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1257 break; 1258 } 1259 1260 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1261 if (VA.needsCustom()) { 1262 if (VA.getLocVT() == MVT::v2f64) { 1263 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1264 DAG.getConstant(0, MVT::i32)); 1265 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1266 DAG.getConstant(1, MVT::i32)); 1267 1268 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1269 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1270 1271 VA = ArgLocs[++i]; // skip ahead to next loc 1272 if (VA.isRegLoc()) { 1273 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1274 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1275 } else { 1276 assert(VA.isMemLoc()); 1277 1278 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1279 dl, DAG, VA, Flags)); 1280 } 1281 } else { 1282 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1283 StackPtr, MemOpChains, Flags); 1284 } 1285 } else if (VA.isRegLoc()) { 1286 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1287 } else if (isByVal) { 1288 assert(VA.isMemLoc()); 1289 unsigned offset = 0; 1290 1291 // True if this byval aggregate will be split between registers 1292 // and memory. 1293 if (CCInfo.isFirstByValRegValid()) { 1294 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1295 unsigned int i, j; 1296 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1297 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1298 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1299 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1300 MachinePointerInfo(), 1301 false, false, 0); 1302 MemOpChains.push_back(Load.getValue(1)); 1303 RegsToPass.push_back(std::make_pair(j, Load)); 1304 } 1305 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1306 CCInfo.clearFirstByValReg(); 1307 } 1308 1309 unsigned LocMemOffset = VA.getLocMemOffset(); 1310 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1311 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1312 StkPtrOff); 1313 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1314 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1315 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1316 MVT::i32); 1317 MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 1318 Flags.getByValAlign(), 1319 /*isVolatile=*/false, 1320 /*AlwaysInline=*/false, 1321 MachinePointerInfo(0), 1322 MachinePointerInfo(0))); 1323 1324 } else if (!IsSibCall) { 1325 assert(VA.isMemLoc()); 1326 1327 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1328 dl, DAG, VA, Flags)); 1329 } 1330 } 1331 1332 if (!MemOpChains.empty()) 1333 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1334 &MemOpChains[0], MemOpChains.size()); 1335 1336 // Build a sequence of copy-to-reg nodes chained together with token chain 1337 // and flag operands which copy the outgoing args into the appropriate regs. 1338 SDValue InFlag; 1339 // Tail call byval lowering might overwrite argument registers so in case of 1340 // tail call optimization the copies to registers are lowered later. 1341 if (!isTailCall) 1342 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1343 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1344 RegsToPass[i].second, InFlag); 1345 InFlag = Chain.getValue(1); 1346 } 1347 1348 // For tail calls lower the arguments to the 'real' stack slot. 1349 if (isTailCall) { 1350 // Force all the incoming stack arguments to be loaded from the stack 1351 // before any new outgoing arguments are stored to the stack, because the 1352 // outgoing stack slots may alias the incoming argument stack slots, and 1353 // the alias isn't otherwise explicit. This is slightly more conservative 1354 // than necessary, because it means that each store effectively depends 1355 // on every argument instead of just those arguments it would clobber. 1356 1357 // Do not flag preceding copytoreg stuff together with the following stuff. 1358 InFlag = SDValue(); 1359 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1360 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1361 RegsToPass[i].second, InFlag); 1362 InFlag = Chain.getValue(1); 1363 } 1364 InFlag =SDValue(); 1365 } 1366 1367 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1368 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1369 // node so that legalize doesn't hack it. 1370 bool isDirect = false; 1371 bool isARMFunc = false; 1372 bool isLocalARMFunc = false; 1373 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1374 1375 if (EnableARMLongCalls) { 1376 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1377 && "long-calls with non-static relocation model!"); 1378 // Handle a global address or an external symbol. If it's not one of 1379 // those, the target's already in a register, so we don't need to do 1380 // anything extra. 1381 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1382 const GlobalValue *GV = G->getGlobal(); 1383 // Create a constant pool entry for the callee address 1384 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1385 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1386 ARMPCLabelIndex, 1387 ARMCP::CPValue, 0); 1388 // Get the address of the callee into a register 1389 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1390 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1391 Callee = DAG.getLoad(getPointerTy(), dl, 1392 DAG.getEntryNode(), CPAddr, 1393 MachinePointerInfo::getConstantPool(), 1394 false, false, 0); 1395 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1396 const char *Sym = S->getSymbol(); 1397 1398 // Create a constant pool entry for the callee address 1399 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1400 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1401 Sym, ARMPCLabelIndex, 0); 1402 // Get the address of the callee into a register 1403 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1404 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1405 Callee = DAG.getLoad(getPointerTy(), dl, 1406 DAG.getEntryNode(), CPAddr, 1407 MachinePointerInfo::getConstantPool(), 1408 false, false, 0); 1409 } 1410 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1411 const GlobalValue *GV = G->getGlobal(); 1412 isDirect = true; 1413 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1414 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1415 getTargetMachine().getRelocationModel() != Reloc::Static; 1416 isARMFunc = !Subtarget->isThumb() || isStub; 1417 // ARM call to a local ARM function is predicable. 1418 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1419 // tBX takes a register source operand. 1420 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1421 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1422 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1423 ARMPCLabelIndex, 1424 ARMCP::CPValue, 4); 1425 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1426 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1427 Callee = DAG.getLoad(getPointerTy(), dl, 1428 DAG.getEntryNode(), CPAddr, 1429 MachinePointerInfo::getConstantPool(), 1430 false, false, 0); 1431 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1432 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1433 getPointerTy(), Callee, PICLabel); 1434 } else { 1435 // On ELF targets for PIC code, direct calls should go through the PLT 1436 unsigned OpFlags = 0; 1437 if (Subtarget->isTargetELF() && 1438 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1439 OpFlags = ARMII::MO_PLT; 1440 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1441 } 1442 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1443 isDirect = true; 1444 bool isStub = Subtarget->isTargetDarwin() && 1445 getTargetMachine().getRelocationModel() != Reloc::Static; 1446 isARMFunc = !Subtarget->isThumb() || isStub; 1447 // tBX takes a register source operand. 1448 const char *Sym = S->getSymbol(); 1449 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1450 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1451 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1452 Sym, ARMPCLabelIndex, 4); 1453 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1454 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1455 Callee = DAG.getLoad(getPointerTy(), dl, 1456 DAG.getEntryNode(), CPAddr, 1457 MachinePointerInfo::getConstantPool(), 1458 false, false, 0); 1459 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1460 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1461 getPointerTy(), Callee, PICLabel); 1462 } else { 1463 unsigned OpFlags = 0; 1464 // On ELF targets for PIC code, direct calls should go through the PLT 1465 if (Subtarget->isTargetELF() && 1466 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1467 OpFlags = ARMII::MO_PLT; 1468 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1469 } 1470 } 1471 1472 // FIXME: handle tail calls differently. 1473 unsigned CallOpc; 1474 if (Subtarget->isThumb()) { 1475 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1476 CallOpc = ARMISD::CALL_NOLINK; 1477 else 1478 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1479 } else { 1480 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1481 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1482 : ARMISD::CALL_NOLINK; 1483 } 1484 1485 std::vector<SDValue> Ops; 1486 Ops.push_back(Chain); 1487 Ops.push_back(Callee); 1488 1489 // Add argument registers to the end of the list so that they are known live 1490 // into the call. 1491 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1492 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1493 RegsToPass[i].second.getValueType())); 1494 1495 if (InFlag.getNode()) 1496 Ops.push_back(InFlag); 1497 1498 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1499 if (isTailCall) 1500 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1501 1502 // Returns a chain and a flag for retval copy to use. 1503 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1504 InFlag = Chain.getValue(1); 1505 1506 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1507 DAG.getIntPtrConstant(0, true), InFlag); 1508 if (!Ins.empty()) 1509 InFlag = Chain.getValue(1); 1510 1511 // Handle result values, copying them out of physregs into vregs that we 1512 // return. 1513 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1514 dl, DAG, InVals); 1515} 1516 1517/// HandleByVal - Every parameter *after* a byval parameter is passed 1518/// on the stack. Remember the next parameter register to allocate, 1519/// and then confiscate the rest of the parameter registers to insure 1520/// this. 1521void 1522llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { 1523 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1524 assert((State->getCallOrPrologue() == Prologue || 1525 State->getCallOrPrologue() == Call) && 1526 "unhandled ParmContext"); 1527 if ((!State->isFirstByValRegValid()) && 1528 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1529 State->setFirstByValReg(reg); 1530 // At a call site, a byval parameter that is split between 1531 // registers and memory needs its size truncated here. In a 1532 // function prologue, such byval parameters are reassembled in 1533 // memory, and are not truncated. 1534 if (State->getCallOrPrologue() == Call) { 1535 unsigned excess = 4 * (ARM::R4 - reg); 1536 assert(size >= excess && "expected larger existing stack allocation"); 1537 size -= excess; 1538 } 1539 } 1540 // Confiscate any remaining parameter registers to preclude their 1541 // assignment to subsequent parameters. 1542 while (State->AllocateReg(GPRArgRegs, 4)) 1543 ; 1544} 1545 1546/// MatchingStackOffset - Return true if the given stack call argument is 1547/// already available in the same position (relatively) of the caller's 1548/// incoming argument stack. 1549static 1550bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1551 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1552 const ARMInstrInfo *TII) { 1553 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1554 int FI = INT_MAX; 1555 if (Arg.getOpcode() == ISD::CopyFromReg) { 1556 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1557 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1558 return false; 1559 MachineInstr *Def = MRI->getVRegDef(VR); 1560 if (!Def) 1561 return false; 1562 if (!Flags.isByVal()) { 1563 if (!TII->isLoadFromStackSlot(Def, FI)) 1564 return false; 1565 } else { 1566 return false; 1567 } 1568 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1569 if (Flags.isByVal()) 1570 // ByVal argument is passed in as a pointer but it's now being 1571 // dereferenced. e.g. 1572 // define @foo(%struct.X* %A) { 1573 // tail call @bar(%struct.X* byval %A) 1574 // } 1575 return false; 1576 SDValue Ptr = Ld->getBasePtr(); 1577 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1578 if (!FINode) 1579 return false; 1580 FI = FINode->getIndex(); 1581 } else 1582 return false; 1583 1584 assert(FI != INT_MAX); 1585 if (!MFI->isFixedObjectIndex(FI)) 1586 return false; 1587 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1588} 1589 1590/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1591/// for tail call optimization. Targets which want to do tail call 1592/// optimization should implement this function. 1593bool 1594ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1595 CallingConv::ID CalleeCC, 1596 bool isVarArg, 1597 bool isCalleeStructRet, 1598 bool isCallerStructRet, 1599 const SmallVectorImpl<ISD::OutputArg> &Outs, 1600 const SmallVectorImpl<SDValue> &OutVals, 1601 const SmallVectorImpl<ISD::InputArg> &Ins, 1602 SelectionDAG& DAG) const { 1603 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1604 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1605 bool CCMatch = CallerCC == CalleeCC; 1606 1607 // Look for obvious safe cases to perform tail call optimization that do not 1608 // require ABI changes. This is what gcc calls sibcall. 1609 1610 // Do not sibcall optimize vararg calls unless the call site is not passing 1611 // any arguments. 1612 if (isVarArg && !Outs.empty()) 1613 return false; 1614 1615 // Also avoid sibcall optimization if either caller or callee uses struct 1616 // return semantics. 1617 if (isCalleeStructRet || isCallerStructRet) 1618 return false; 1619 1620 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1621 // emitEpilogue is not ready for them. 1622 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1623 // LR. This means if we need to reload LR, it takes an extra instructions, 1624 // which outweighs the value of the tail call; but here we don't know yet 1625 // whether LR is going to be used. Probably the right approach is to 1626 // generate the tail call here and turn it back into CALL/RET in 1627 // emitEpilogue if LR is used. 1628 1629 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1630 // but we need to make sure there are enough registers; the only valid 1631 // registers are the 4 used for parameters. We don't currently do this 1632 // case. 1633 if (Subtarget->isThumb1Only()) 1634 return false; 1635 1636 // If the calling conventions do not match, then we'd better make sure the 1637 // results are returned in the same way as what the caller expects. 1638 if (!CCMatch) { 1639 SmallVector<CCValAssign, 16> RVLocs1; 1640 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1641 RVLocs1, *DAG.getContext()); 1642 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1643 1644 SmallVector<CCValAssign, 16> RVLocs2; 1645 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1646 RVLocs2, *DAG.getContext()); 1647 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1648 1649 if (RVLocs1.size() != RVLocs2.size()) 1650 return false; 1651 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1652 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1653 return false; 1654 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1655 return false; 1656 if (RVLocs1[i].isRegLoc()) { 1657 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1658 return false; 1659 } else { 1660 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1661 return false; 1662 } 1663 } 1664 } 1665 1666 // If the callee takes no arguments then go on to check the results of the 1667 // call. 1668 if (!Outs.empty()) { 1669 // Check if stack adjustment is needed. For now, do not do this if any 1670 // argument is passed on the stack. 1671 SmallVector<CCValAssign, 16> ArgLocs; 1672 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1673 ArgLocs, *DAG.getContext()); 1674 CCInfo.AnalyzeCallOperands(Outs, 1675 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1676 if (CCInfo.getNextStackOffset()) { 1677 MachineFunction &MF = DAG.getMachineFunction(); 1678 1679 // Check if the arguments are already laid out in the right way as 1680 // the caller's fixed stack objects. 1681 MachineFrameInfo *MFI = MF.getFrameInfo(); 1682 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1683 const ARMInstrInfo *TII = 1684 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1685 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1686 i != e; 1687 ++i, ++realArgIdx) { 1688 CCValAssign &VA = ArgLocs[i]; 1689 EVT RegVT = VA.getLocVT(); 1690 SDValue Arg = OutVals[realArgIdx]; 1691 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1692 if (VA.getLocInfo() == CCValAssign::Indirect) 1693 return false; 1694 if (VA.needsCustom()) { 1695 // f64 and vector types are split into multiple registers or 1696 // register/stack-slot combinations. The types will not match 1697 // the registers; give up on memory f64 refs until we figure 1698 // out what to do about this. 1699 if (!VA.isRegLoc()) 1700 return false; 1701 if (!ArgLocs[++i].isRegLoc()) 1702 return false; 1703 if (RegVT == MVT::v2f64) { 1704 if (!ArgLocs[++i].isRegLoc()) 1705 return false; 1706 if (!ArgLocs[++i].isRegLoc()) 1707 return false; 1708 } 1709 } else if (!VA.isRegLoc()) { 1710 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1711 MFI, MRI, TII)) 1712 return false; 1713 } 1714 } 1715 } 1716 } 1717 1718 return true; 1719} 1720 1721SDValue 1722ARMTargetLowering::LowerReturn(SDValue Chain, 1723 CallingConv::ID CallConv, bool isVarArg, 1724 const SmallVectorImpl<ISD::OutputArg> &Outs, 1725 const SmallVectorImpl<SDValue> &OutVals, 1726 DebugLoc dl, SelectionDAG &DAG) const { 1727 1728 // CCValAssign - represent the assignment of the return value to a location. 1729 SmallVector<CCValAssign, 16> RVLocs; 1730 1731 // CCState - Info about the registers and stack slots. 1732 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1733 *DAG.getContext()); 1734 1735 // Analyze outgoing return values. 1736 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1737 isVarArg)); 1738 1739 // If this is the first return lowered for this function, add 1740 // the regs to the liveout set for the function. 1741 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1742 for (unsigned i = 0; i != RVLocs.size(); ++i) 1743 if (RVLocs[i].isRegLoc()) 1744 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1745 } 1746 1747 SDValue Flag; 1748 1749 // Copy the result values into the output registers. 1750 for (unsigned i = 0, realRVLocIdx = 0; 1751 i != RVLocs.size(); 1752 ++i, ++realRVLocIdx) { 1753 CCValAssign &VA = RVLocs[i]; 1754 assert(VA.isRegLoc() && "Can only return in registers!"); 1755 1756 SDValue Arg = OutVals[realRVLocIdx]; 1757 1758 switch (VA.getLocInfo()) { 1759 default: llvm_unreachable("Unknown loc info!"); 1760 case CCValAssign::Full: break; 1761 case CCValAssign::BCvt: 1762 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1763 break; 1764 } 1765 1766 if (VA.needsCustom()) { 1767 if (VA.getLocVT() == MVT::v2f64) { 1768 // Extract the first half and return it in two registers. 1769 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1770 DAG.getConstant(0, MVT::i32)); 1771 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1772 DAG.getVTList(MVT::i32, MVT::i32), Half); 1773 1774 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1775 Flag = Chain.getValue(1); 1776 VA = RVLocs[++i]; // skip ahead to next loc 1777 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1778 HalfGPRs.getValue(1), Flag); 1779 Flag = Chain.getValue(1); 1780 VA = RVLocs[++i]; // skip ahead to next loc 1781 1782 // Extract the 2nd half and fall through to handle it as an f64 value. 1783 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1784 DAG.getConstant(1, MVT::i32)); 1785 } 1786 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1787 // available. 1788 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1789 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1790 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1791 Flag = Chain.getValue(1); 1792 VA = RVLocs[++i]; // skip ahead to next loc 1793 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1794 Flag); 1795 } else 1796 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1797 1798 // Guarantee that all emitted copies are 1799 // stuck together, avoiding something bad. 1800 Flag = Chain.getValue(1); 1801 } 1802 1803 SDValue result; 1804 if (Flag.getNode()) 1805 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1806 else // Return Void 1807 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1808 1809 return result; 1810} 1811 1812bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1813 if (N->getNumValues() != 1) 1814 return false; 1815 if (!N->hasNUsesOfValue(1, 0)) 1816 return false; 1817 1818 unsigned NumCopies = 0; 1819 SDNode* Copies[2]; 1820 SDNode *Use = *N->use_begin(); 1821 if (Use->getOpcode() == ISD::CopyToReg) { 1822 Copies[NumCopies++] = Use; 1823 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1824 // f64 returned in a pair of GPRs. 1825 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1826 UI != UE; ++UI) { 1827 if (UI->getOpcode() != ISD::CopyToReg) 1828 return false; 1829 Copies[UI.getUse().getResNo()] = *UI; 1830 ++NumCopies; 1831 } 1832 } else if (Use->getOpcode() == ISD::BITCAST) { 1833 // f32 returned in a single GPR. 1834 if (!Use->hasNUsesOfValue(1, 0)) 1835 return false; 1836 Use = *Use->use_begin(); 1837 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1838 return false; 1839 Copies[NumCopies++] = Use; 1840 } else { 1841 return false; 1842 } 1843 1844 if (NumCopies != 1 && NumCopies != 2) 1845 return false; 1846 1847 bool HasRet = false; 1848 for (unsigned i = 0; i < NumCopies; ++i) { 1849 SDNode *Copy = Copies[i]; 1850 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1851 UI != UE; ++UI) { 1852 if (UI->getOpcode() == ISD::CopyToReg) { 1853 SDNode *Use = *UI; 1854 if (Use == Copies[0] || Use == Copies[1]) 1855 continue; 1856 return false; 1857 } 1858 if (UI->getOpcode() != ARMISD::RET_FLAG) 1859 return false; 1860 HasRet = true; 1861 } 1862 } 1863 1864 return HasRet; 1865} 1866 1867bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1868 if (!EnableARMTailCalls) 1869 return false; 1870 1871 if (!CI->isTailCall()) 1872 return false; 1873 1874 return !Subtarget->isThumb1Only(); 1875} 1876 1877// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1878// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1879// one of the above mentioned nodes. It has to be wrapped because otherwise 1880// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1881// be used to form addressing mode. These wrapped nodes will be selected 1882// into MOVi. 1883static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1884 EVT PtrVT = Op.getValueType(); 1885 // FIXME there is no actual debug info here 1886 DebugLoc dl = Op.getDebugLoc(); 1887 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1888 SDValue Res; 1889 if (CP->isMachineConstantPoolEntry()) 1890 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1891 CP->getAlignment()); 1892 else 1893 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1894 CP->getAlignment()); 1895 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1896} 1897 1898unsigned ARMTargetLowering::getJumpTableEncoding() const { 1899 return MachineJumpTableInfo::EK_Inline; 1900} 1901 1902SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1903 SelectionDAG &DAG) const { 1904 MachineFunction &MF = DAG.getMachineFunction(); 1905 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1906 unsigned ARMPCLabelIndex = 0; 1907 DebugLoc DL = Op.getDebugLoc(); 1908 EVT PtrVT = getPointerTy(); 1909 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1910 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1911 SDValue CPAddr; 1912 if (RelocM == Reloc::Static) { 1913 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1914 } else { 1915 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1916 ARMPCLabelIndex = AFI->createPICLabelUId(); 1917 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1918 ARMCP::CPBlockAddress, 1919 PCAdj); 1920 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1921 } 1922 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1923 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1924 MachinePointerInfo::getConstantPool(), 1925 false, false, 0); 1926 if (RelocM == Reloc::Static) 1927 return Result; 1928 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1929 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1930} 1931 1932// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1933SDValue 1934ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1935 SelectionDAG &DAG) const { 1936 DebugLoc dl = GA->getDebugLoc(); 1937 EVT PtrVT = getPointerTy(); 1938 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1939 MachineFunction &MF = DAG.getMachineFunction(); 1940 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1941 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1942 ARMConstantPoolValue *CPV = 1943 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1944 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1945 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1946 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1947 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1948 MachinePointerInfo::getConstantPool(), 1949 false, false, 0); 1950 SDValue Chain = Argument.getValue(1); 1951 1952 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1953 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1954 1955 // call __tls_get_addr. 1956 ArgListTy Args; 1957 ArgListEntry Entry; 1958 Entry.Node = Argument; 1959 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1960 Args.push_back(Entry); 1961 // FIXME: is there useful debug info available here? 1962 std::pair<SDValue, SDValue> CallResult = 1963 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1964 false, false, false, false, 1965 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1966 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1967 return CallResult.first; 1968} 1969 1970// Lower ISD::GlobalTLSAddress using the "initial exec" or 1971// "local exec" model. 1972SDValue 1973ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1974 SelectionDAG &DAG) const { 1975 const GlobalValue *GV = GA->getGlobal(); 1976 DebugLoc dl = GA->getDebugLoc(); 1977 SDValue Offset; 1978 SDValue Chain = DAG.getEntryNode(); 1979 EVT PtrVT = getPointerTy(); 1980 // Get the Thread Pointer 1981 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1982 1983 if (GV->isDeclaration()) { 1984 MachineFunction &MF = DAG.getMachineFunction(); 1985 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1986 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1987 // Initial exec model. 1988 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1989 ARMConstantPoolValue *CPV = 1990 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1991 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true); 1992 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1993 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1994 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1995 MachinePointerInfo::getConstantPool(), 1996 false, false, 0); 1997 Chain = Offset.getValue(1); 1998 1999 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2000 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2001 2002 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2003 MachinePointerInfo::getConstantPool(), 2004 false, false, 0); 2005 } else { 2006 // local exec model 2007 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF); 2008 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2009 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2010 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2011 MachinePointerInfo::getConstantPool(), 2012 false, false, 0); 2013 } 2014 2015 // The address of the thread local variable is the add of the thread 2016 // pointer with the offset of the variable. 2017 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2018} 2019 2020SDValue 2021ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2022 // TODO: implement the "local dynamic" model 2023 assert(Subtarget->isTargetELF() && 2024 "TLS not implemented for non-ELF targets"); 2025 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2026 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 2027 // otherwise use the "Local Exec" TLS Model 2028 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 2029 return LowerToTLSGeneralDynamicModel(GA, DAG); 2030 else 2031 return LowerToTLSExecModels(GA, DAG); 2032} 2033 2034SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2035 SelectionDAG &DAG) const { 2036 EVT PtrVT = getPointerTy(); 2037 DebugLoc dl = Op.getDebugLoc(); 2038 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2039 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2040 if (RelocM == Reloc::PIC_) { 2041 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2042 ARMConstantPoolValue *CPV = 2043 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2044 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2045 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2046 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2047 CPAddr, 2048 MachinePointerInfo::getConstantPool(), 2049 false, false, 0); 2050 SDValue Chain = Result.getValue(1); 2051 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2052 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2053 if (!UseGOTOFF) 2054 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2055 MachinePointerInfo::getGOT(), false, false, 0); 2056 return Result; 2057 } 2058 2059 // If we have T2 ops, we can materialize the address directly via movt/movw 2060 // pair. This is always cheaper. 2061 if (Subtarget->useMovt()) { 2062 ++NumMovwMovt; 2063 // FIXME: Once remat is capable of dealing with instructions with register 2064 // operands, expand this into two nodes. 2065 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2066 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2067 } else { 2068 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2069 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2070 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2071 MachinePointerInfo::getConstantPool(), 2072 false, false, 0); 2073 } 2074} 2075 2076SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2077 SelectionDAG &DAG) const { 2078 EVT PtrVT = getPointerTy(); 2079 DebugLoc dl = Op.getDebugLoc(); 2080 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2081 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2082 MachineFunction &MF = DAG.getMachineFunction(); 2083 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2084 2085 // FIXME: Enable this for static codegen when tool issues are fixed. 2086 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2087 ++NumMovwMovt; 2088 // FIXME: Once remat is capable of dealing with instructions with register 2089 // operands, expand this into two nodes. 2090 if (RelocM == Reloc::Static) 2091 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2092 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2093 2094 unsigned Wrapper = (RelocM == Reloc::PIC_) 2095 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2096 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2097 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2098 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2099 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2100 MachinePointerInfo::getGOT(), false, false, 0); 2101 return Result; 2102 } 2103 2104 unsigned ARMPCLabelIndex = 0; 2105 SDValue CPAddr; 2106 if (RelocM == Reloc::Static) { 2107 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2108 } else { 2109 ARMPCLabelIndex = AFI->createPICLabelUId(); 2110 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2111 ARMConstantPoolValue *CPV = 2112 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 2113 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2114 } 2115 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2116 2117 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2118 MachinePointerInfo::getConstantPool(), 2119 false, false, 0); 2120 SDValue Chain = Result.getValue(1); 2121 2122 if (RelocM == Reloc::PIC_) { 2123 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2124 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2125 } 2126 2127 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2128 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2129 false, false, 0); 2130 2131 return Result; 2132} 2133 2134SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2135 SelectionDAG &DAG) const { 2136 assert(Subtarget->isTargetELF() && 2137 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2138 MachineFunction &MF = DAG.getMachineFunction(); 2139 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2140 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2141 EVT PtrVT = getPointerTy(); 2142 DebugLoc dl = Op.getDebugLoc(); 2143 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2144 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 2145 "_GLOBAL_OFFSET_TABLE_", 2146 ARMPCLabelIndex, PCAdj); 2147 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2148 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2149 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2150 MachinePointerInfo::getConstantPool(), 2151 false, false, 0); 2152 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2153 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2154} 2155 2156SDValue 2157ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2158 const { 2159 DebugLoc dl = Op.getDebugLoc(); 2160 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2161 Op.getOperand(0), Op.getOperand(1)); 2162} 2163 2164SDValue 2165ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2166 DebugLoc dl = Op.getDebugLoc(); 2167 SDValue Val = DAG.getConstant(0, MVT::i32); 2168 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2169 Op.getOperand(1), Val); 2170} 2171 2172SDValue 2173ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2174 DebugLoc dl = Op.getDebugLoc(); 2175 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2176 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2177} 2178 2179SDValue 2180ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2181 const ARMSubtarget *Subtarget) const { 2182 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2183 DebugLoc dl = Op.getDebugLoc(); 2184 switch (IntNo) { 2185 default: return SDValue(); // Don't custom lower most intrinsics. 2186 case Intrinsic::arm_thread_pointer: { 2187 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2188 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2189 } 2190 case Intrinsic::eh_sjlj_lsda: { 2191 MachineFunction &MF = DAG.getMachineFunction(); 2192 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2193 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2194 EVT PtrVT = getPointerTy(); 2195 DebugLoc dl = Op.getDebugLoc(); 2196 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2197 SDValue CPAddr; 2198 unsigned PCAdj = (RelocM != Reloc::PIC_) 2199 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2200 ARMConstantPoolValue *CPV = 2201 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2202 ARMCP::CPLSDA, PCAdj); 2203 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2204 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2205 SDValue Result = 2206 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2207 MachinePointerInfo::getConstantPool(), 2208 false, false, 0); 2209 2210 if (RelocM == Reloc::PIC_) { 2211 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2212 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2213 } 2214 return Result; 2215 } 2216 case Intrinsic::arm_neon_vmulls: 2217 case Intrinsic::arm_neon_vmullu: { 2218 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2219 ? ARMISD::VMULLs : ARMISD::VMULLu; 2220 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2221 Op.getOperand(1), Op.getOperand(2)); 2222 } 2223 } 2224} 2225 2226static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2227 const ARMSubtarget *Subtarget) { 2228 DebugLoc dl = Op.getDebugLoc(); 2229 if (!Subtarget->hasDataBarrier()) { 2230 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2231 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2232 // here. 2233 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2234 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2235 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2236 DAG.getConstant(0, MVT::i32)); 2237 } 2238 2239 SDValue Op5 = Op.getOperand(5); 2240 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2241 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2242 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2243 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2244 2245 ARM_MB::MemBOpt DMBOpt; 2246 if (isDeviceBarrier) 2247 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2248 else 2249 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2250 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2251 DAG.getConstant(DMBOpt, MVT::i32)); 2252} 2253 2254static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2255 const ARMSubtarget *Subtarget) { 2256 // ARM pre v5TE and Thumb1 does not have preload instructions. 2257 if (!(Subtarget->isThumb2() || 2258 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2259 // Just preserve the chain. 2260 return Op.getOperand(0); 2261 2262 DebugLoc dl = Op.getDebugLoc(); 2263 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2264 if (!isRead && 2265 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2266 // ARMv7 with MP extension has PLDW. 2267 return Op.getOperand(0); 2268 2269 if (Subtarget->isThumb()) 2270 // Invert the bits. 2271 isRead = ~isRead & 1; 2272 unsigned isData = Subtarget->isThumb() ? 0 : 1; 2273 2274 // Currently there is no intrinsic that matches pli. 2275 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2276 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2277 DAG.getConstant(isData, MVT::i32)); 2278} 2279 2280static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2281 MachineFunction &MF = DAG.getMachineFunction(); 2282 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2283 2284 // vastart just stores the address of the VarArgsFrameIndex slot into the 2285 // memory location argument. 2286 DebugLoc dl = Op.getDebugLoc(); 2287 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2288 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2289 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2290 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2291 MachinePointerInfo(SV), false, false, 0); 2292} 2293 2294SDValue 2295ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2296 SDValue &Root, SelectionDAG &DAG, 2297 DebugLoc dl) const { 2298 MachineFunction &MF = DAG.getMachineFunction(); 2299 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2300 2301 TargetRegisterClass *RC; 2302 if (AFI->isThumb1OnlyFunction()) 2303 RC = ARM::tGPRRegisterClass; 2304 else 2305 RC = ARM::GPRRegisterClass; 2306 2307 // Transform the arguments stored in physical registers into virtual ones. 2308 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2309 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2310 2311 SDValue ArgValue2; 2312 if (NextVA.isMemLoc()) { 2313 MachineFrameInfo *MFI = MF.getFrameInfo(); 2314 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2315 2316 // Create load node to retrieve arguments from the stack. 2317 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2318 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2319 MachinePointerInfo::getFixedStack(FI), 2320 false, false, 0); 2321 } else { 2322 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2323 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2324 } 2325 2326 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2327} 2328 2329void 2330ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2331 unsigned &VARegSize, unsigned &VARegSaveSize) 2332 const { 2333 unsigned NumGPRs; 2334 if (CCInfo.isFirstByValRegValid()) 2335 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2336 else { 2337 unsigned int firstUnalloced; 2338 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2339 sizeof(GPRArgRegs) / 2340 sizeof(GPRArgRegs[0])); 2341 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2342 } 2343 2344 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2345 VARegSize = NumGPRs * 4; 2346 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2347} 2348 2349// The remaining GPRs hold either the beginning of variable-argument 2350// data, or the beginning of an aggregate passed by value (usuall 2351// byval). Either way, we allocate stack slots adjacent to the data 2352// provided by our caller, and store the unallocated registers there. 2353// If this is a variadic function, the va_list pointer will begin with 2354// these values; otherwise, this reassembles a (byval) structure that 2355// was split between registers and memory. 2356void 2357ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2358 DebugLoc dl, SDValue &Chain, 2359 unsigned ArgOffset) const { 2360 MachineFunction &MF = DAG.getMachineFunction(); 2361 MachineFrameInfo *MFI = MF.getFrameInfo(); 2362 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2363 unsigned firstRegToSaveIndex; 2364 if (CCInfo.isFirstByValRegValid()) 2365 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2366 else { 2367 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2368 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2369 } 2370 2371 unsigned VARegSize, VARegSaveSize; 2372 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2373 if (VARegSaveSize) { 2374 // If this function is vararg, store any remaining integer argument regs 2375 // to their spots on the stack so that they may be loaded by deferencing 2376 // the result of va_next. 2377 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2378 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2379 ArgOffset + VARegSaveSize 2380 - VARegSize, 2381 false)); 2382 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2383 getPointerTy()); 2384 2385 SmallVector<SDValue, 4> MemOps; 2386 for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { 2387 TargetRegisterClass *RC; 2388 if (AFI->isThumb1OnlyFunction()) 2389 RC = ARM::tGPRRegisterClass; 2390 else 2391 RC = ARM::GPRRegisterClass; 2392 2393 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2394 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2395 SDValue Store = 2396 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2397 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2398 false, false, 0); 2399 MemOps.push_back(Store); 2400 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2401 DAG.getConstant(4, getPointerTy())); 2402 } 2403 if (!MemOps.empty()) 2404 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2405 &MemOps[0], MemOps.size()); 2406 } else 2407 // This will point to the next argument passed via stack. 2408 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2409} 2410 2411SDValue 2412ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2413 CallingConv::ID CallConv, bool isVarArg, 2414 const SmallVectorImpl<ISD::InputArg> 2415 &Ins, 2416 DebugLoc dl, SelectionDAG &DAG, 2417 SmallVectorImpl<SDValue> &InVals) 2418 const { 2419 MachineFunction &MF = DAG.getMachineFunction(); 2420 MachineFrameInfo *MFI = MF.getFrameInfo(); 2421 2422 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2423 2424 // Assign locations to all of the incoming arguments. 2425 SmallVector<CCValAssign, 16> ArgLocs; 2426 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2427 *DAG.getContext()); 2428 CCInfo.setCallOrPrologue(Prologue); 2429 CCInfo.AnalyzeFormalArguments(Ins, 2430 CCAssignFnForNode(CallConv, /* Return*/ false, 2431 isVarArg)); 2432 2433 SmallVector<SDValue, 16> ArgValues; 2434 int lastInsIndex = -1; 2435 2436 SDValue ArgValue; 2437 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2438 CCValAssign &VA = ArgLocs[i]; 2439 2440 // Arguments stored in registers. 2441 if (VA.isRegLoc()) { 2442 EVT RegVT = VA.getLocVT(); 2443 2444 if (VA.needsCustom()) { 2445 // f64 and vector types are split up into multiple registers or 2446 // combinations of registers and stack slots. 2447 if (VA.getLocVT() == MVT::v2f64) { 2448 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2449 Chain, DAG, dl); 2450 VA = ArgLocs[++i]; // skip ahead to next loc 2451 SDValue ArgValue2; 2452 if (VA.isMemLoc()) { 2453 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2454 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2455 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2456 MachinePointerInfo::getFixedStack(FI), 2457 false, false, 0); 2458 } else { 2459 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2460 Chain, DAG, dl); 2461 } 2462 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2463 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2464 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2465 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2466 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2467 } else 2468 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2469 2470 } else { 2471 TargetRegisterClass *RC; 2472 2473 if (RegVT == MVT::f32) 2474 RC = ARM::SPRRegisterClass; 2475 else if (RegVT == MVT::f64) 2476 RC = ARM::DPRRegisterClass; 2477 else if (RegVT == MVT::v2f64) 2478 RC = ARM::QPRRegisterClass; 2479 else if (RegVT == MVT::i32) 2480 RC = (AFI->isThumb1OnlyFunction() ? 2481 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2482 else 2483 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2484 2485 // Transform the arguments in physical registers into virtual ones. 2486 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2487 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2488 } 2489 2490 // If this is an 8 or 16-bit value, it is really passed promoted 2491 // to 32 bits. Insert an assert[sz]ext to capture this, then 2492 // truncate to the right size. 2493 switch (VA.getLocInfo()) { 2494 default: llvm_unreachable("Unknown loc info!"); 2495 case CCValAssign::Full: break; 2496 case CCValAssign::BCvt: 2497 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2498 break; 2499 case CCValAssign::SExt: 2500 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2501 DAG.getValueType(VA.getValVT())); 2502 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2503 break; 2504 case CCValAssign::ZExt: 2505 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2506 DAG.getValueType(VA.getValVT())); 2507 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2508 break; 2509 } 2510 2511 InVals.push_back(ArgValue); 2512 2513 } else { // VA.isRegLoc() 2514 2515 // sanity check 2516 assert(VA.isMemLoc()); 2517 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2518 2519 int index = ArgLocs[i].getValNo(); 2520 2521 // Some Ins[] entries become multiple ArgLoc[] entries. 2522 // Process them only once. 2523 if (index != lastInsIndex) 2524 { 2525 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2526 // FIXME: For now, all byval parameter objects are marked mutable. 2527 // This can be changed with more analysis. 2528 // In case of tail call optimization mark all arguments mutable. 2529 // Since they could be overwritten by lowering of arguments in case of 2530 // a tail call. 2531 if (Flags.isByVal()) { 2532 unsigned VARegSize, VARegSaveSize; 2533 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2534 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); 2535 unsigned Bytes = Flags.getByValSize() - VARegSize; 2536 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2537 int FI = MFI->CreateFixedObject(Bytes, 2538 VA.getLocMemOffset(), false); 2539 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2540 } else { 2541 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2542 VA.getLocMemOffset(), true); 2543 2544 // Create load nodes to retrieve arguments from the stack. 2545 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2546 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2547 MachinePointerInfo::getFixedStack(FI), 2548 false, false, 0)); 2549 } 2550 lastInsIndex = index; 2551 } 2552 } 2553 } 2554 2555 // varargs 2556 if (isVarArg) 2557 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); 2558 2559 return Chain; 2560} 2561 2562/// isFloatingPointZero - Return true if this is +0.0. 2563static bool isFloatingPointZero(SDValue Op) { 2564 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2565 return CFP->getValueAPF().isPosZero(); 2566 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2567 // Maybe this has already been legalized into the constant pool? 2568 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2569 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2570 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2571 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2572 return CFP->getValueAPF().isPosZero(); 2573 } 2574 } 2575 return false; 2576} 2577 2578/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2579/// the given operands. 2580SDValue 2581ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2582 SDValue &ARMcc, SelectionDAG &DAG, 2583 DebugLoc dl) const { 2584 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2585 unsigned C = RHSC->getZExtValue(); 2586 if (!isLegalICmpImmediate(C)) { 2587 // Constant does not fit, try adjusting it by one? 2588 switch (CC) { 2589 default: break; 2590 case ISD::SETLT: 2591 case ISD::SETGE: 2592 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2593 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2594 RHS = DAG.getConstant(C-1, MVT::i32); 2595 } 2596 break; 2597 case ISD::SETULT: 2598 case ISD::SETUGE: 2599 if (C != 0 && isLegalICmpImmediate(C-1)) { 2600 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2601 RHS = DAG.getConstant(C-1, MVT::i32); 2602 } 2603 break; 2604 case ISD::SETLE: 2605 case ISD::SETGT: 2606 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2607 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2608 RHS = DAG.getConstant(C+1, MVT::i32); 2609 } 2610 break; 2611 case ISD::SETULE: 2612 case ISD::SETUGT: 2613 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2614 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2615 RHS = DAG.getConstant(C+1, MVT::i32); 2616 } 2617 break; 2618 } 2619 } 2620 } 2621 2622 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2623 ARMISD::NodeType CompareType; 2624 switch (CondCode) { 2625 default: 2626 CompareType = ARMISD::CMP; 2627 break; 2628 case ARMCC::EQ: 2629 case ARMCC::NE: 2630 // Uses only Z Flag 2631 CompareType = ARMISD::CMPZ; 2632 break; 2633 } 2634 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2635 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2636} 2637 2638/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2639SDValue 2640ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2641 DebugLoc dl) const { 2642 SDValue Cmp; 2643 if (!isFloatingPointZero(RHS)) 2644 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2645 else 2646 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2647 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2648} 2649 2650/// duplicateCmp - Glue values can have only one use, so this function 2651/// duplicates a comparison node. 2652SDValue 2653ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2654 unsigned Opc = Cmp.getOpcode(); 2655 DebugLoc DL = Cmp.getDebugLoc(); 2656 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2657 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2658 2659 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2660 Cmp = Cmp.getOperand(0); 2661 Opc = Cmp.getOpcode(); 2662 if (Opc == ARMISD::CMPFP) 2663 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2664 else { 2665 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2666 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2667 } 2668 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2669} 2670 2671SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2672 SDValue Cond = Op.getOperand(0); 2673 SDValue SelectTrue = Op.getOperand(1); 2674 SDValue SelectFalse = Op.getOperand(2); 2675 DebugLoc dl = Op.getDebugLoc(); 2676 2677 // Convert: 2678 // 2679 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2680 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2681 // 2682 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2683 const ConstantSDNode *CMOVTrue = 2684 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2685 const ConstantSDNode *CMOVFalse = 2686 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2687 2688 if (CMOVTrue && CMOVFalse) { 2689 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2690 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2691 2692 SDValue True; 2693 SDValue False; 2694 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2695 True = SelectTrue; 2696 False = SelectFalse; 2697 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2698 True = SelectFalse; 2699 False = SelectTrue; 2700 } 2701 2702 if (True.getNode() && False.getNode()) { 2703 EVT VT = Op.getValueType(); 2704 SDValue ARMcc = Cond.getOperand(2); 2705 SDValue CCR = Cond.getOperand(3); 2706 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2707 assert(True.getValueType() == VT); 2708 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2709 } 2710 } 2711 } 2712 2713 return DAG.getSelectCC(dl, Cond, 2714 DAG.getConstant(0, Cond.getValueType()), 2715 SelectTrue, SelectFalse, ISD::SETNE); 2716} 2717 2718SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2719 EVT VT = Op.getValueType(); 2720 SDValue LHS = Op.getOperand(0); 2721 SDValue RHS = Op.getOperand(1); 2722 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2723 SDValue TrueVal = Op.getOperand(2); 2724 SDValue FalseVal = Op.getOperand(3); 2725 DebugLoc dl = Op.getDebugLoc(); 2726 2727 if (LHS.getValueType() == MVT::i32) { 2728 SDValue ARMcc; 2729 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2730 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2731 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2732 } 2733 2734 ARMCC::CondCodes CondCode, CondCode2; 2735 FPCCToARMCC(CC, CondCode, CondCode2); 2736 2737 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2738 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2739 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2740 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2741 ARMcc, CCR, Cmp); 2742 if (CondCode2 != ARMCC::AL) { 2743 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2744 // FIXME: Needs another CMP because flag can have but one use. 2745 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2746 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2747 Result, TrueVal, ARMcc2, CCR, Cmp2); 2748 } 2749 return Result; 2750} 2751 2752/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2753/// to morph to an integer compare sequence. 2754static bool canChangeToInt(SDValue Op, bool &SeenZero, 2755 const ARMSubtarget *Subtarget) { 2756 SDNode *N = Op.getNode(); 2757 if (!N->hasOneUse()) 2758 // Otherwise it requires moving the value from fp to integer registers. 2759 return false; 2760 if (!N->getNumValues()) 2761 return false; 2762 EVT VT = Op.getValueType(); 2763 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2764 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2765 // vmrs are very slow, e.g. cortex-a8. 2766 return false; 2767 2768 if (isFloatingPointZero(Op)) { 2769 SeenZero = true; 2770 return true; 2771 } 2772 return ISD::isNormalLoad(N); 2773} 2774 2775static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2776 if (isFloatingPointZero(Op)) 2777 return DAG.getConstant(0, MVT::i32); 2778 2779 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2780 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2781 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2782 Ld->isVolatile(), Ld->isNonTemporal(), 2783 Ld->getAlignment()); 2784 2785 llvm_unreachable("Unknown VFP cmp argument!"); 2786} 2787 2788static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2789 SDValue &RetVal1, SDValue &RetVal2) { 2790 if (isFloatingPointZero(Op)) { 2791 RetVal1 = DAG.getConstant(0, MVT::i32); 2792 RetVal2 = DAG.getConstant(0, MVT::i32); 2793 return; 2794 } 2795 2796 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2797 SDValue Ptr = Ld->getBasePtr(); 2798 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2799 Ld->getChain(), Ptr, 2800 Ld->getPointerInfo(), 2801 Ld->isVolatile(), Ld->isNonTemporal(), 2802 Ld->getAlignment()); 2803 2804 EVT PtrType = Ptr.getValueType(); 2805 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2806 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2807 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2808 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2809 Ld->getChain(), NewPtr, 2810 Ld->getPointerInfo().getWithOffset(4), 2811 Ld->isVolatile(), Ld->isNonTemporal(), 2812 NewAlign); 2813 return; 2814 } 2815 2816 llvm_unreachable("Unknown VFP cmp argument!"); 2817} 2818 2819/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2820/// f32 and even f64 comparisons to integer ones. 2821SDValue 2822ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2823 SDValue Chain = Op.getOperand(0); 2824 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2825 SDValue LHS = Op.getOperand(2); 2826 SDValue RHS = Op.getOperand(3); 2827 SDValue Dest = Op.getOperand(4); 2828 DebugLoc dl = Op.getDebugLoc(); 2829 2830 bool SeenZero = false; 2831 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2832 canChangeToInt(RHS, SeenZero, Subtarget) && 2833 // If one of the operand is zero, it's safe to ignore the NaN case since 2834 // we only care about equality comparisons. 2835 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2836 // If unsafe fp math optimization is enabled and there are no other uses of 2837 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2838 // to an integer comparison. 2839 if (CC == ISD::SETOEQ) 2840 CC = ISD::SETEQ; 2841 else if (CC == ISD::SETUNE) 2842 CC = ISD::SETNE; 2843 2844 SDValue ARMcc; 2845 if (LHS.getValueType() == MVT::f32) { 2846 LHS = bitcastf32Toi32(LHS, DAG); 2847 RHS = bitcastf32Toi32(RHS, DAG); 2848 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2849 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2850 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2851 Chain, Dest, ARMcc, CCR, Cmp); 2852 } 2853 2854 SDValue LHS1, LHS2; 2855 SDValue RHS1, RHS2; 2856 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2857 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2858 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2859 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2860 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2861 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2862 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2863 } 2864 2865 return SDValue(); 2866} 2867 2868SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2869 SDValue Chain = Op.getOperand(0); 2870 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2871 SDValue LHS = Op.getOperand(2); 2872 SDValue RHS = Op.getOperand(3); 2873 SDValue Dest = Op.getOperand(4); 2874 DebugLoc dl = Op.getDebugLoc(); 2875 2876 if (LHS.getValueType() == MVT::i32) { 2877 SDValue ARMcc; 2878 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2879 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2880 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2881 Chain, Dest, ARMcc, CCR, Cmp); 2882 } 2883 2884 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2885 2886 if (UnsafeFPMath && 2887 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2888 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2889 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2890 if (Result.getNode()) 2891 return Result; 2892 } 2893 2894 ARMCC::CondCodes CondCode, CondCode2; 2895 FPCCToARMCC(CC, CondCode, CondCode2); 2896 2897 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2898 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2899 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2900 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2901 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2902 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2903 if (CondCode2 != ARMCC::AL) { 2904 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2905 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2906 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2907 } 2908 return Res; 2909} 2910 2911SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2912 SDValue Chain = Op.getOperand(0); 2913 SDValue Table = Op.getOperand(1); 2914 SDValue Index = Op.getOperand(2); 2915 DebugLoc dl = Op.getDebugLoc(); 2916 2917 EVT PTy = getPointerTy(); 2918 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2919 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2920 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2921 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2922 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2923 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2924 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2925 if (Subtarget->isThumb2()) { 2926 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2927 // which does another jump to the destination. This also makes it easier 2928 // to translate it to TBB / TBH later. 2929 // FIXME: This might not work if the function is extremely large. 2930 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2931 Addr, Op.getOperand(2), JTI, UId); 2932 } 2933 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2934 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2935 MachinePointerInfo::getJumpTable(), 2936 false, false, 0); 2937 Chain = Addr.getValue(1); 2938 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2939 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2940 } else { 2941 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2942 MachinePointerInfo::getJumpTable(), false, false, 0); 2943 Chain = Addr.getValue(1); 2944 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2945 } 2946} 2947 2948static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2949 DebugLoc dl = Op.getDebugLoc(); 2950 unsigned Opc; 2951 2952 switch (Op.getOpcode()) { 2953 default: 2954 assert(0 && "Invalid opcode!"); 2955 case ISD::FP_TO_SINT: 2956 Opc = ARMISD::FTOSI; 2957 break; 2958 case ISD::FP_TO_UINT: 2959 Opc = ARMISD::FTOUI; 2960 break; 2961 } 2962 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2963 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 2964} 2965 2966static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2967 EVT VT = Op.getValueType(); 2968 DebugLoc dl = Op.getDebugLoc(); 2969 2970 EVT OperandVT = Op.getOperand(0).getValueType(); 2971 assert(OperandVT == MVT::v4i16 && "Invalid type for custom lowering!"); 2972 if (VT != MVT::v4f32) 2973 return DAG.UnrollVectorOp(Op.getNode()); 2974 2975 unsigned CastOpc; 2976 unsigned Opc; 2977 switch (Op.getOpcode()) { 2978 default: 2979 assert(0 && "Invalid opcode!"); 2980 case ISD::SINT_TO_FP: 2981 CastOpc = ISD::SIGN_EXTEND; 2982 Opc = ISD::SINT_TO_FP; 2983 break; 2984 case ISD::UINT_TO_FP: 2985 CastOpc = ISD::ZERO_EXTEND; 2986 Opc = ISD::UINT_TO_FP; 2987 break; 2988 } 2989 2990 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 2991 return DAG.getNode(Opc, dl, VT, Op); 2992} 2993 2994static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2995 EVT VT = Op.getValueType(); 2996 if (VT.isVector()) 2997 return LowerVectorINT_TO_FP(Op, DAG); 2998 2999 DebugLoc dl = Op.getDebugLoc(); 3000 unsigned Opc; 3001 3002 switch (Op.getOpcode()) { 3003 default: 3004 assert(0 && "Invalid opcode!"); 3005 case ISD::SINT_TO_FP: 3006 Opc = ARMISD::SITOF; 3007 break; 3008 case ISD::UINT_TO_FP: 3009 Opc = ARMISD::UITOF; 3010 break; 3011 } 3012 3013 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3014 return DAG.getNode(Opc, dl, VT, Op); 3015} 3016 3017SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3018 // Implement fcopysign with a fabs and a conditional fneg. 3019 SDValue Tmp0 = Op.getOperand(0); 3020 SDValue Tmp1 = Op.getOperand(1); 3021 DebugLoc dl = Op.getDebugLoc(); 3022 EVT VT = Op.getValueType(); 3023 EVT SrcVT = Tmp1.getValueType(); 3024 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3025 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3026 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3027 3028 if (UseNEON) { 3029 // Use VBSL to copy the sign bit. 3030 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3031 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3032 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3033 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3034 if (VT == MVT::f64) 3035 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3036 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3037 DAG.getConstant(32, MVT::i32)); 3038 else /*if (VT == MVT::f32)*/ 3039 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3040 if (SrcVT == MVT::f32) { 3041 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3042 if (VT == MVT::f64) 3043 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3044 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3045 DAG.getConstant(32, MVT::i32)); 3046 } else if (VT == MVT::f32) 3047 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3048 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3049 DAG.getConstant(32, MVT::i32)); 3050 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3051 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3052 3053 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3054 MVT::i32); 3055 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3056 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3057 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3058 3059 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3060 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3061 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3062 if (VT == MVT::f32) { 3063 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3064 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3065 DAG.getConstant(0, MVT::i32)); 3066 } else { 3067 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3068 } 3069 3070 return Res; 3071 } 3072 3073 // Bitcast operand 1 to i32. 3074 if (SrcVT == MVT::f64) 3075 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3076 &Tmp1, 1).getValue(1); 3077 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3078 3079 // Or in the signbit with integer operations. 3080 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3081 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3082 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3083 if (VT == MVT::f32) { 3084 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3085 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3086 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3087 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3088 } 3089 3090 // f64: Or the high part with signbit and then combine two parts. 3091 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3092 &Tmp0, 1); 3093 SDValue Lo = Tmp0.getValue(0); 3094 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3095 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3096 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3097} 3098 3099SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3100 MachineFunction &MF = DAG.getMachineFunction(); 3101 MachineFrameInfo *MFI = MF.getFrameInfo(); 3102 MFI->setReturnAddressIsTaken(true); 3103 3104 EVT VT = Op.getValueType(); 3105 DebugLoc dl = Op.getDebugLoc(); 3106 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3107 if (Depth) { 3108 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3109 SDValue Offset = DAG.getConstant(4, MVT::i32); 3110 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3111 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3112 MachinePointerInfo(), false, false, 0); 3113 } 3114 3115 // Return LR, which contains the return address. Mark it an implicit live-in. 3116 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3117 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3118} 3119 3120SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3121 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3122 MFI->setFrameAddressIsTaken(true); 3123 3124 EVT VT = Op.getValueType(); 3125 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3126 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3127 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3128 ? ARM::R7 : ARM::R11; 3129 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3130 while (Depth--) 3131 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3132 MachinePointerInfo(), 3133 false, false, 0); 3134 return FrameAddr; 3135} 3136 3137/// ExpandBITCAST - If the target supports VFP, this function is called to 3138/// expand a bit convert where either the source or destination type is i64 to 3139/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3140/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3141/// vectors), since the legalizer won't know what to do with that. 3142static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3143 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3144 DebugLoc dl = N->getDebugLoc(); 3145 SDValue Op = N->getOperand(0); 3146 3147 // This function is only supposed to be called for i64 types, either as the 3148 // source or destination of the bit convert. 3149 EVT SrcVT = Op.getValueType(); 3150 EVT DstVT = N->getValueType(0); 3151 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3152 "ExpandBITCAST called for non-i64 type"); 3153 3154 // Turn i64->f64 into VMOVDRR. 3155 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3156 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3157 DAG.getConstant(0, MVT::i32)); 3158 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3159 DAG.getConstant(1, MVT::i32)); 3160 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3161 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3162 } 3163 3164 // Turn f64->i64 into VMOVRRD. 3165 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3166 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3167 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3168 // Merge the pieces into a single i64 value. 3169 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3170 } 3171 3172 return SDValue(); 3173} 3174 3175/// getZeroVector - Returns a vector of specified type with all zero elements. 3176/// Zero vectors are used to represent vector negation and in those cases 3177/// will be implemented with the NEON VNEG instruction. However, VNEG does 3178/// not support i64 elements, so sometimes the zero vectors will need to be 3179/// explicitly constructed. Regardless, use a canonical VMOV to create the 3180/// zero vector. 3181static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3182 assert(VT.isVector() && "Expected a vector type"); 3183 // The canonical modified immediate encoding of a zero vector is....0! 3184 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3185 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3186 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3187 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3188} 3189 3190/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3191/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3192SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3193 SelectionDAG &DAG) const { 3194 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3195 EVT VT = Op.getValueType(); 3196 unsigned VTBits = VT.getSizeInBits(); 3197 DebugLoc dl = Op.getDebugLoc(); 3198 SDValue ShOpLo = Op.getOperand(0); 3199 SDValue ShOpHi = Op.getOperand(1); 3200 SDValue ShAmt = Op.getOperand(2); 3201 SDValue ARMcc; 3202 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3203 3204 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3205 3206 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3207 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3208 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3209 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3210 DAG.getConstant(VTBits, MVT::i32)); 3211 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3212 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3213 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3214 3215 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3216 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3217 ARMcc, DAG, dl); 3218 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3219 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3220 CCR, Cmp); 3221 3222 SDValue Ops[2] = { Lo, Hi }; 3223 return DAG.getMergeValues(Ops, 2, dl); 3224} 3225 3226/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3227/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3228SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3229 SelectionDAG &DAG) const { 3230 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3231 EVT VT = Op.getValueType(); 3232 unsigned VTBits = VT.getSizeInBits(); 3233 DebugLoc dl = Op.getDebugLoc(); 3234 SDValue ShOpLo = Op.getOperand(0); 3235 SDValue ShOpHi = Op.getOperand(1); 3236 SDValue ShAmt = Op.getOperand(2); 3237 SDValue ARMcc; 3238 3239 assert(Op.getOpcode() == ISD::SHL_PARTS); 3240 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3241 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3242 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3243 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3244 DAG.getConstant(VTBits, MVT::i32)); 3245 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3246 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3247 3248 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3249 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3250 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3251 ARMcc, DAG, dl); 3252 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3253 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3254 CCR, Cmp); 3255 3256 SDValue Ops[2] = { Lo, Hi }; 3257 return DAG.getMergeValues(Ops, 2, dl); 3258} 3259 3260SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3261 SelectionDAG &DAG) const { 3262 // The rounding mode is in bits 23:22 of the FPSCR. 3263 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3264 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3265 // so that the shift + and get folded into a bitfield extract. 3266 DebugLoc dl = Op.getDebugLoc(); 3267 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3268 DAG.getConstant(Intrinsic::arm_get_fpscr, 3269 MVT::i32)); 3270 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3271 DAG.getConstant(1U << 22, MVT::i32)); 3272 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3273 DAG.getConstant(22, MVT::i32)); 3274 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3275 DAG.getConstant(3, MVT::i32)); 3276} 3277 3278static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3279 const ARMSubtarget *ST) { 3280 EVT VT = N->getValueType(0); 3281 DebugLoc dl = N->getDebugLoc(); 3282 3283 if (!ST->hasV6T2Ops()) 3284 return SDValue(); 3285 3286 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3287 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3288} 3289 3290static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3291 const ARMSubtarget *ST) { 3292 EVT VT = N->getValueType(0); 3293 DebugLoc dl = N->getDebugLoc(); 3294 3295 if (!VT.isVector()) 3296 return SDValue(); 3297 3298 // Lower vector shifts on NEON to use VSHL. 3299 assert(ST->hasNEON() && "unexpected vector shift"); 3300 3301 // Left shifts translate directly to the vshiftu intrinsic. 3302 if (N->getOpcode() == ISD::SHL) 3303 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3304 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3305 N->getOperand(0), N->getOperand(1)); 3306 3307 assert((N->getOpcode() == ISD::SRA || 3308 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3309 3310 // NEON uses the same intrinsics for both left and right shifts. For 3311 // right shifts, the shift amounts are negative, so negate the vector of 3312 // shift amounts. 3313 EVT ShiftVT = N->getOperand(1).getValueType(); 3314 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3315 getZeroVector(ShiftVT, DAG, dl), 3316 N->getOperand(1)); 3317 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3318 Intrinsic::arm_neon_vshifts : 3319 Intrinsic::arm_neon_vshiftu); 3320 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3321 DAG.getConstant(vshiftInt, MVT::i32), 3322 N->getOperand(0), NegatedCount); 3323} 3324 3325static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3326 const ARMSubtarget *ST) { 3327 EVT VT = N->getValueType(0); 3328 DebugLoc dl = N->getDebugLoc(); 3329 3330 // We can get here for a node like i32 = ISD::SHL i32, i64 3331 if (VT != MVT::i64) 3332 return SDValue(); 3333 3334 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3335 "Unknown shift to lower!"); 3336 3337 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3338 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3339 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3340 return SDValue(); 3341 3342 // If we are in thumb mode, we don't have RRX. 3343 if (ST->isThumb1Only()) return SDValue(); 3344 3345 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3346 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3347 DAG.getConstant(0, MVT::i32)); 3348 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3349 DAG.getConstant(1, MVT::i32)); 3350 3351 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3352 // captures the result into a carry flag. 3353 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3354 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3355 3356 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3357 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3358 3359 // Merge the pieces into a single i64 value. 3360 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3361} 3362 3363static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3364 SDValue TmpOp0, TmpOp1; 3365 bool Invert = false; 3366 bool Swap = false; 3367 unsigned Opc = 0; 3368 3369 SDValue Op0 = Op.getOperand(0); 3370 SDValue Op1 = Op.getOperand(1); 3371 SDValue CC = Op.getOperand(2); 3372 EVT VT = Op.getValueType(); 3373 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3374 DebugLoc dl = Op.getDebugLoc(); 3375 3376 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3377 switch (SetCCOpcode) { 3378 default: llvm_unreachable("Illegal FP comparison"); break; 3379 case ISD::SETUNE: 3380 case ISD::SETNE: Invert = true; // Fallthrough 3381 case ISD::SETOEQ: 3382 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3383 case ISD::SETOLT: 3384 case ISD::SETLT: Swap = true; // Fallthrough 3385 case ISD::SETOGT: 3386 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3387 case ISD::SETOLE: 3388 case ISD::SETLE: Swap = true; // Fallthrough 3389 case ISD::SETOGE: 3390 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3391 case ISD::SETUGE: Swap = true; // Fallthrough 3392 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3393 case ISD::SETUGT: Swap = true; // Fallthrough 3394 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3395 case ISD::SETUEQ: Invert = true; // Fallthrough 3396 case ISD::SETONE: 3397 // Expand this to (OLT | OGT). 3398 TmpOp0 = Op0; 3399 TmpOp1 = Op1; 3400 Opc = ISD::OR; 3401 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3402 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3403 break; 3404 case ISD::SETUO: Invert = true; // Fallthrough 3405 case ISD::SETO: 3406 // Expand this to (OLT | OGE). 3407 TmpOp0 = Op0; 3408 TmpOp1 = Op1; 3409 Opc = ISD::OR; 3410 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3411 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3412 break; 3413 } 3414 } else { 3415 // Integer comparisons. 3416 switch (SetCCOpcode) { 3417 default: llvm_unreachable("Illegal integer comparison"); break; 3418 case ISD::SETNE: Invert = true; 3419 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3420 case ISD::SETLT: Swap = true; 3421 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3422 case ISD::SETLE: Swap = true; 3423 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3424 case ISD::SETULT: Swap = true; 3425 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3426 case ISD::SETULE: Swap = true; 3427 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3428 } 3429 3430 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3431 if (Opc == ARMISD::VCEQ) { 3432 3433 SDValue AndOp; 3434 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3435 AndOp = Op0; 3436 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3437 AndOp = Op1; 3438 3439 // Ignore bitconvert. 3440 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3441 AndOp = AndOp.getOperand(0); 3442 3443 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3444 Opc = ARMISD::VTST; 3445 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3446 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3447 Invert = !Invert; 3448 } 3449 } 3450 } 3451 3452 if (Swap) 3453 std::swap(Op0, Op1); 3454 3455 // If one of the operands is a constant vector zero, attempt to fold the 3456 // comparison to a specialized compare-against-zero form. 3457 SDValue SingleOp; 3458 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3459 SingleOp = Op0; 3460 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3461 if (Opc == ARMISD::VCGE) 3462 Opc = ARMISD::VCLEZ; 3463 else if (Opc == ARMISD::VCGT) 3464 Opc = ARMISD::VCLTZ; 3465 SingleOp = Op1; 3466 } 3467 3468 SDValue Result; 3469 if (SingleOp.getNode()) { 3470 switch (Opc) { 3471 case ARMISD::VCEQ: 3472 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3473 case ARMISD::VCGE: 3474 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3475 case ARMISD::VCLEZ: 3476 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3477 case ARMISD::VCGT: 3478 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3479 case ARMISD::VCLTZ: 3480 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3481 default: 3482 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3483 } 3484 } else { 3485 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3486 } 3487 3488 if (Invert) 3489 Result = DAG.getNOT(dl, Result, VT); 3490 3491 return Result; 3492} 3493 3494/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3495/// valid vector constant for a NEON instruction with a "modified immediate" 3496/// operand (e.g., VMOV). If so, return the encoded value. 3497static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3498 unsigned SplatBitSize, SelectionDAG &DAG, 3499 EVT &VT, bool is128Bits, NEONModImmType type) { 3500 unsigned OpCmode, Imm; 3501 3502 // SplatBitSize is set to the smallest size that splats the vector, so a 3503 // zero vector will always have SplatBitSize == 8. However, NEON modified 3504 // immediate instructions others than VMOV do not support the 8-bit encoding 3505 // of a zero vector, and the default encoding of zero is supposed to be the 3506 // 32-bit version. 3507 if (SplatBits == 0) 3508 SplatBitSize = 32; 3509 3510 switch (SplatBitSize) { 3511 case 8: 3512 if (type != VMOVModImm) 3513 return SDValue(); 3514 // Any 1-byte value is OK. Op=0, Cmode=1110. 3515 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3516 OpCmode = 0xe; 3517 Imm = SplatBits; 3518 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3519 break; 3520 3521 case 16: 3522 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3523 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3524 if ((SplatBits & ~0xff) == 0) { 3525 // Value = 0x00nn: Op=x, Cmode=100x. 3526 OpCmode = 0x8; 3527 Imm = SplatBits; 3528 break; 3529 } 3530 if ((SplatBits & ~0xff00) == 0) { 3531 // Value = 0xnn00: Op=x, Cmode=101x. 3532 OpCmode = 0xa; 3533 Imm = SplatBits >> 8; 3534 break; 3535 } 3536 return SDValue(); 3537 3538 case 32: 3539 // NEON's 32-bit VMOV supports splat values where: 3540 // * only one byte is nonzero, or 3541 // * the least significant byte is 0xff and the second byte is nonzero, or 3542 // * the least significant 2 bytes are 0xff and the third is nonzero. 3543 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3544 if ((SplatBits & ~0xff) == 0) { 3545 // Value = 0x000000nn: Op=x, Cmode=000x. 3546 OpCmode = 0; 3547 Imm = SplatBits; 3548 break; 3549 } 3550 if ((SplatBits & ~0xff00) == 0) { 3551 // Value = 0x0000nn00: Op=x, Cmode=001x. 3552 OpCmode = 0x2; 3553 Imm = SplatBits >> 8; 3554 break; 3555 } 3556 if ((SplatBits & ~0xff0000) == 0) { 3557 // Value = 0x00nn0000: Op=x, Cmode=010x. 3558 OpCmode = 0x4; 3559 Imm = SplatBits >> 16; 3560 break; 3561 } 3562 if ((SplatBits & ~0xff000000) == 0) { 3563 // Value = 0xnn000000: Op=x, Cmode=011x. 3564 OpCmode = 0x6; 3565 Imm = SplatBits >> 24; 3566 break; 3567 } 3568 3569 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3570 if (type == OtherModImm) return SDValue(); 3571 3572 if ((SplatBits & ~0xffff) == 0 && 3573 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3574 // Value = 0x0000nnff: Op=x, Cmode=1100. 3575 OpCmode = 0xc; 3576 Imm = SplatBits >> 8; 3577 SplatBits |= 0xff; 3578 break; 3579 } 3580 3581 if ((SplatBits & ~0xffffff) == 0 && 3582 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3583 // Value = 0x00nnffff: Op=x, Cmode=1101. 3584 OpCmode = 0xd; 3585 Imm = SplatBits >> 16; 3586 SplatBits |= 0xffff; 3587 break; 3588 } 3589 3590 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3591 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3592 // VMOV.I32. A (very) minor optimization would be to replicate the value 3593 // and fall through here to test for a valid 64-bit splat. But, then the 3594 // caller would also need to check and handle the change in size. 3595 return SDValue(); 3596 3597 case 64: { 3598 if (type != VMOVModImm) 3599 return SDValue(); 3600 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3601 uint64_t BitMask = 0xff; 3602 uint64_t Val = 0; 3603 unsigned ImmMask = 1; 3604 Imm = 0; 3605 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3606 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3607 Val |= BitMask; 3608 Imm |= ImmMask; 3609 } else if ((SplatBits & BitMask) != 0) { 3610 return SDValue(); 3611 } 3612 BitMask <<= 8; 3613 ImmMask <<= 1; 3614 } 3615 // Op=1, Cmode=1110. 3616 OpCmode = 0x1e; 3617 SplatBits = Val; 3618 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3619 break; 3620 } 3621 3622 default: 3623 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3624 return SDValue(); 3625 } 3626 3627 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3628 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3629} 3630 3631static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3632 bool &ReverseVEXT, unsigned &Imm) { 3633 unsigned NumElts = VT.getVectorNumElements(); 3634 ReverseVEXT = false; 3635 3636 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3637 if (M[0] < 0) 3638 return false; 3639 3640 Imm = M[0]; 3641 3642 // If this is a VEXT shuffle, the immediate value is the index of the first 3643 // element. The other shuffle indices must be the successive elements after 3644 // the first one. 3645 unsigned ExpectedElt = Imm; 3646 for (unsigned i = 1; i < NumElts; ++i) { 3647 // Increment the expected index. If it wraps around, it may still be 3648 // a VEXT but the source vectors must be swapped. 3649 ExpectedElt += 1; 3650 if (ExpectedElt == NumElts * 2) { 3651 ExpectedElt = 0; 3652 ReverseVEXT = true; 3653 } 3654 3655 if (M[i] < 0) continue; // ignore UNDEF indices 3656 if (ExpectedElt != static_cast<unsigned>(M[i])) 3657 return false; 3658 } 3659 3660 // Adjust the index value if the source operands will be swapped. 3661 if (ReverseVEXT) 3662 Imm -= NumElts; 3663 3664 return true; 3665} 3666 3667/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3668/// instruction with the specified blocksize. (The order of the elements 3669/// within each block of the vector is reversed.) 3670static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3671 unsigned BlockSize) { 3672 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3673 "Only possible block sizes for VREV are: 16, 32, 64"); 3674 3675 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3676 if (EltSz == 64) 3677 return false; 3678 3679 unsigned NumElts = VT.getVectorNumElements(); 3680 unsigned BlockElts = M[0] + 1; 3681 // If the first shuffle index is UNDEF, be optimistic. 3682 if (M[0] < 0) 3683 BlockElts = BlockSize / EltSz; 3684 3685 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3686 return false; 3687 3688 for (unsigned i = 0; i < NumElts; ++i) { 3689 if (M[i] < 0) continue; // ignore UNDEF indices 3690 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3691 return false; 3692 } 3693 3694 return true; 3695} 3696 3697static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3698 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3699 // range, then 0 is placed into the resulting vector. So pretty much any mask 3700 // of 8 elements can work here. 3701 return VT == MVT::v8i8 && M.size() == 8; 3702} 3703 3704static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3705 unsigned &WhichResult) { 3706 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3707 if (EltSz == 64) 3708 return false; 3709 3710 unsigned NumElts = VT.getVectorNumElements(); 3711 WhichResult = (M[0] == 0 ? 0 : 1); 3712 for (unsigned i = 0; i < NumElts; i += 2) { 3713 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3714 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3715 return false; 3716 } 3717 return true; 3718} 3719 3720/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3721/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3722/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3723static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3724 unsigned &WhichResult) { 3725 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3726 if (EltSz == 64) 3727 return false; 3728 3729 unsigned NumElts = VT.getVectorNumElements(); 3730 WhichResult = (M[0] == 0 ? 0 : 1); 3731 for (unsigned i = 0; i < NumElts; i += 2) { 3732 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3733 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3734 return false; 3735 } 3736 return true; 3737} 3738 3739static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3740 unsigned &WhichResult) { 3741 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3742 if (EltSz == 64) 3743 return false; 3744 3745 unsigned NumElts = VT.getVectorNumElements(); 3746 WhichResult = (M[0] == 0 ? 0 : 1); 3747 for (unsigned i = 0; i != NumElts; ++i) { 3748 if (M[i] < 0) continue; // ignore UNDEF indices 3749 if ((unsigned) M[i] != 2 * i + WhichResult) 3750 return false; 3751 } 3752 3753 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3754 if (VT.is64BitVector() && EltSz == 32) 3755 return false; 3756 3757 return true; 3758} 3759 3760/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3761/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3762/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3763static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3764 unsigned &WhichResult) { 3765 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3766 if (EltSz == 64) 3767 return false; 3768 3769 unsigned Half = VT.getVectorNumElements() / 2; 3770 WhichResult = (M[0] == 0 ? 0 : 1); 3771 for (unsigned j = 0; j != 2; ++j) { 3772 unsigned Idx = WhichResult; 3773 for (unsigned i = 0; i != Half; ++i) { 3774 int MIdx = M[i + j * Half]; 3775 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3776 return false; 3777 Idx += 2; 3778 } 3779 } 3780 3781 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3782 if (VT.is64BitVector() && EltSz == 32) 3783 return false; 3784 3785 return true; 3786} 3787 3788static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3789 unsigned &WhichResult) { 3790 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3791 if (EltSz == 64) 3792 return false; 3793 3794 unsigned NumElts = VT.getVectorNumElements(); 3795 WhichResult = (M[0] == 0 ? 0 : 1); 3796 unsigned Idx = WhichResult * NumElts / 2; 3797 for (unsigned i = 0; i != NumElts; i += 2) { 3798 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3799 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3800 return false; 3801 Idx += 1; 3802 } 3803 3804 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3805 if (VT.is64BitVector() && EltSz == 32) 3806 return false; 3807 3808 return true; 3809} 3810 3811/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3812/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3813/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3814static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3815 unsigned &WhichResult) { 3816 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3817 if (EltSz == 64) 3818 return false; 3819 3820 unsigned NumElts = VT.getVectorNumElements(); 3821 WhichResult = (M[0] == 0 ? 0 : 1); 3822 unsigned Idx = WhichResult * NumElts / 2; 3823 for (unsigned i = 0; i != NumElts; i += 2) { 3824 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3825 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3826 return false; 3827 Idx += 1; 3828 } 3829 3830 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3831 if (VT.is64BitVector() && EltSz == 32) 3832 return false; 3833 3834 return true; 3835} 3836 3837// If N is an integer constant that can be moved into a register in one 3838// instruction, return an SDValue of such a constant (will become a MOV 3839// instruction). Otherwise return null. 3840static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3841 const ARMSubtarget *ST, DebugLoc dl) { 3842 uint64_t Val; 3843 if (!isa<ConstantSDNode>(N)) 3844 return SDValue(); 3845 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3846 3847 if (ST->isThumb1Only()) { 3848 if (Val <= 255 || ~Val <= 255) 3849 return DAG.getConstant(Val, MVT::i32); 3850 } else { 3851 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3852 return DAG.getConstant(Val, MVT::i32); 3853 } 3854 return SDValue(); 3855} 3856 3857// If this is a case we can't handle, return null and let the default 3858// expansion code take care of it. 3859SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3860 const ARMSubtarget *ST) const { 3861 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3862 DebugLoc dl = Op.getDebugLoc(); 3863 EVT VT = Op.getValueType(); 3864 3865 APInt SplatBits, SplatUndef; 3866 unsigned SplatBitSize; 3867 bool HasAnyUndefs; 3868 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3869 if (SplatBitSize <= 64) { 3870 // Check if an immediate VMOV works. 3871 EVT VmovVT; 3872 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3873 SplatUndef.getZExtValue(), SplatBitSize, 3874 DAG, VmovVT, VT.is128BitVector(), 3875 VMOVModImm); 3876 if (Val.getNode()) { 3877 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3878 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3879 } 3880 3881 // Try an immediate VMVN. 3882 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3883 ((1LL << SplatBitSize) - 1)); 3884 Val = isNEONModifiedImm(NegatedImm, 3885 SplatUndef.getZExtValue(), SplatBitSize, 3886 DAG, VmovVT, VT.is128BitVector(), 3887 VMVNModImm); 3888 if (Val.getNode()) { 3889 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3890 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3891 } 3892 } 3893 } 3894 3895 // Scan through the operands to see if only one value is used. 3896 unsigned NumElts = VT.getVectorNumElements(); 3897 bool isOnlyLowElement = true; 3898 bool usesOnlyOneValue = true; 3899 bool isConstant = true; 3900 SDValue Value; 3901 for (unsigned i = 0; i < NumElts; ++i) { 3902 SDValue V = Op.getOperand(i); 3903 if (V.getOpcode() == ISD::UNDEF) 3904 continue; 3905 if (i > 0) 3906 isOnlyLowElement = false; 3907 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3908 isConstant = false; 3909 3910 if (!Value.getNode()) 3911 Value = V; 3912 else if (V != Value) 3913 usesOnlyOneValue = false; 3914 } 3915 3916 if (!Value.getNode()) 3917 return DAG.getUNDEF(VT); 3918 3919 if (isOnlyLowElement) 3920 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3921 3922 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3923 3924 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3925 // i32 and try again. 3926 if (usesOnlyOneValue && EltSize <= 32) { 3927 if (!isConstant) 3928 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3929 if (VT.getVectorElementType().isFloatingPoint()) { 3930 SmallVector<SDValue, 8> Ops; 3931 for (unsigned i = 0; i < NumElts; ++i) 3932 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3933 Op.getOperand(i))); 3934 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3935 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3936 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3937 if (Val.getNode()) 3938 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3939 } 3940 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3941 if (Val.getNode()) 3942 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3943 } 3944 3945 // If all elements are constants and the case above didn't get hit, fall back 3946 // to the default expansion, which will generate a load from the constant 3947 // pool. 3948 if (isConstant) 3949 return SDValue(); 3950 3951 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 3952 if (NumElts >= 4) { 3953 SDValue shuffle = ReconstructShuffle(Op, DAG); 3954 if (shuffle != SDValue()) 3955 return shuffle; 3956 } 3957 3958 // Vectors with 32- or 64-bit elements can be built by directly assigning 3959 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3960 // will be legalized. 3961 if (EltSize >= 32) { 3962 // Do the expansion with floating-point types, since that is what the VFP 3963 // registers are defined to use, and since i64 is not legal. 3964 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3965 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3966 SmallVector<SDValue, 8> Ops; 3967 for (unsigned i = 0; i < NumElts; ++i) 3968 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 3969 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3970 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3971 } 3972 3973 return SDValue(); 3974} 3975 3976// Gather data to see if the operation can be modelled as a 3977// shuffle in combination with VEXTs. 3978SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 3979 SelectionDAG &DAG) const { 3980 DebugLoc dl = Op.getDebugLoc(); 3981 EVT VT = Op.getValueType(); 3982 unsigned NumElts = VT.getVectorNumElements(); 3983 3984 SmallVector<SDValue, 2> SourceVecs; 3985 SmallVector<unsigned, 2> MinElts; 3986 SmallVector<unsigned, 2> MaxElts; 3987 3988 for (unsigned i = 0; i < NumElts; ++i) { 3989 SDValue V = Op.getOperand(i); 3990 if (V.getOpcode() == ISD::UNDEF) 3991 continue; 3992 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 3993 // A shuffle can only come from building a vector from various 3994 // elements of other vectors. 3995 return SDValue(); 3996 } 3997 3998 // Record this extraction against the appropriate vector if possible... 3999 SDValue SourceVec = V.getOperand(0); 4000 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4001 bool FoundSource = false; 4002 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4003 if (SourceVecs[j] == SourceVec) { 4004 if (MinElts[j] > EltNo) 4005 MinElts[j] = EltNo; 4006 if (MaxElts[j] < EltNo) 4007 MaxElts[j] = EltNo; 4008 FoundSource = true; 4009 break; 4010 } 4011 } 4012 4013 // Or record a new source if not... 4014 if (!FoundSource) { 4015 SourceVecs.push_back(SourceVec); 4016 MinElts.push_back(EltNo); 4017 MaxElts.push_back(EltNo); 4018 } 4019 } 4020 4021 // Currently only do something sane when at most two source vectors 4022 // involved. 4023 if (SourceVecs.size() > 2) 4024 return SDValue(); 4025 4026 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4027 int VEXTOffsets[2] = {0, 0}; 4028 4029 // This loop extracts the usage patterns of the source vectors 4030 // and prepares appropriate SDValues for a shuffle if possible. 4031 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4032 if (SourceVecs[i].getValueType() == VT) { 4033 // No VEXT necessary 4034 ShuffleSrcs[i] = SourceVecs[i]; 4035 VEXTOffsets[i] = 0; 4036 continue; 4037 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4038 // It probably isn't worth padding out a smaller vector just to 4039 // break it down again in a shuffle. 4040 return SDValue(); 4041 } 4042 4043 // Since only 64-bit and 128-bit vectors are legal on ARM and 4044 // we've eliminated the other cases... 4045 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4046 "unexpected vector sizes in ReconstructShuffle"); 4047 4048 if (MaxElts[i] - MinElts[i] >= NumElts) { 4049 // Span too large for a VEXT to cope 4050 return SDValue(); 4051 } 4052 4053 if (MinElts[i] >= NumElts) { 4054 // The extraction can just take the second half 4055 VEXTOffsets[i] = NumElts; 4056 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4057 SourceVecs[i], 4058 DAG.getIntPtrConstant(NumElts)); 4059 } else if (MaxElts[i] < NumElts) { 4060 // The extraction can just take the first half 4061 VEXTOffsets[i] = 0; 4062 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4063 SourceVecs[i], 4064 DAG.getIntPtrConstant(0)); 4065 } else { 4066 // An actual VEXT is needed 4067 VEXTOffsets[i] = MinElts[i]; 4068 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4069 SourceVecs[i], 4070 DAG.getIntPtrConstant(0)); 4071 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4072 SourceVecs[i], 4073 DAG.getIntPtrConstant(NumElts)); 4074 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4075 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4076 } 4077 } 4078 4079 SmallVector<int, 8> Mask; 4080 4081 for (unsigned i = 0; i < NumElts; ++i) { 4082 SDValue Entry = Op.getOperand(i); 4083 if (Entry.getOpcode() == ISD::UNDEF) { 4084 Mask.push_back(-1); 4085 continue; 4086 } 4087 4088 SDValue ExtractVec = Entry.getOperand(0); 4089 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4090 .getOperand(1))->getSExtValue(); 4091 if (ExtractVec == SourceVecs[0]) { 4092 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4093 } else { 4094 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4095 } 4096 } 4097 4098 // Final check before we try to produce nonsense... 4099 if (isShuffleMaskLegal(Mask, VT)) 4100 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4101 &Mask[0]); 4102 4103 return SDValue(); 4104} 4105 4106/// isShuffleMaskLegal - Targets can use this to indicate that they only 4107/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4108/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4109/// are assumed to be legal. 4110bool 4111ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4112 EVT VT) const { 4113 if (VT.getVectorNumElements() == 4 && 4114 (VT.is128BitVector() || VT.is64BitVector())) { 4115 unsigned PFIndexes[4]; 4116 for (unsigned i = 0; i != 4; ++i) { 4117 if (M[i] < 0) 4118 PFIndexes[i] = 8; 4119 else 4120 PFIndexes[i] = M[i]; 4121 } 4122 4123 // Compute the index in the perfect shuffle table. 4124 unsigned PFTableIndex = 4125 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4126 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4127 unsigned Cost = (PFEntry >> 30); 4128 4129 if (Cost <= 4) 4130 return true; 4131 } 4132 4133 bool ReverseVEXT; 4134 unsigned Imm, WhichResult; 4135 4136 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4137 return (EltSize >= 32 || 4138 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4139 isVREVMask(M, VT, 64) || 4140 isVREVMask(M, VT, 32) || 4141 isVREVMask(M, VT, 16) || 4142 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4143 isVTBLMask(M, VT) || 4144 isVTRNMask(M, VT, WhichResult) || 4145 isVUZPMask(M, VT, WhichResult) || 4146 isVZIPMask(M, VT, WhichResult) || 4147 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4148 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4149 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4150} 4151 4152/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4153/// the specified operations to build the shuffle. 4154static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4155 SDValue RHS, SelectionDAG &DAG, 4156 DebugLoc dl) { 4157 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4158 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4159 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4160 4161 enum { 4162 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4163 OP_VREV, 4164 OP_VDUP0, 4165 OP_VDUP1, 4166 OP_VDUP2, 4167 OP_VDUP3, 4168 OP_VEXT1, 4169 OP_VEXT2, 4170 OP_VEXT3, 4171 OP_VUZPL, // VUZP, left result 4172 OP_VUZPR, // VUZP, right result 4173 OP_VZIPL, // VZIP, left result 4174 OP_VZIPR, // VZIP, right result 4175 OP_VTRNL, // VTRN, left result 4176 OP_VTRNR // VTRN, right result 4177 }; 4178 4179 if (OpNum == OP_COPY) { 4180 if (LHSID == (1*9+2)*9+3) return LHS; 4181 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4182 return RHS; 4183 } 4184 4185 SDValue OpLHS, OpRHS; 4186 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4187 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4188 EVT VT = OpLHS.getValueType(); 4189 4190 switch (OpNum) { 4191 default: llvm_unreachable("Unknown shuffle opcode!"); 4192 case OP_VREV: 4193 // VREV divides the vector in half and swaps within the half. 4194 if (VT.getVectorElementType() == MVT::i32 || 4195 VT.getVectorElementType() == MVT::f32) 4196 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4197 // vrev <4 x i16> -> VREV32 4198 if (VT.getVectorElementType() == MVT::i16) 4199 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4200 // vrev <4 x i8> -> VREV16 4201 assert(VT.getVectorElementType() == MVT::i8); 4202 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4203 case OP_VDUP0: 4204 case OP_VDUP1: 4205 case OP_VDUP2: 4206 case OP_VDUP3: 4207 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4208 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4209 case OP_VEXT1: 4210 case OP_VEXT2: 4211 case OP_VEXT3: 4212 return DAG.getNode(ARMISD::VEXT, dl, VT, 4213 OpLHS, OpRHS, 4214 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4215 case OP_VUZPL: 4216 case OP_VUZPR: 4217 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4218 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4219 case OP_VZIPL: 4220 case OP_VZIPR: 4221 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4222 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4223 case OP_VTRNL: 4224 case OP_VTRNR: 4225 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4226 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4227 } 4228} 4229 4230static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4231 SmallVectorImpl<int> &ShuffleMask, 4232 SelectionDAG &DAG) { 4233 // Check to see if we can use the VTBL instruction. 4234 SDValue V1 = Op.getOperand(0); 4235 SDValue V2 = Op.getOperand(1); 4236 DebugLoc DL = Op.getDebugLoc(); 4237 4238 SmallVector<SDValue, 8> VTBLMask; 4239 for (SmallVectorImpl<int>::iterator 4240 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4241 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4242 4243 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4244 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4245 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4246 &VTBLMask[0], 8)); 4247 4248 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4249 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4250 &VTBLMask[0], 8)); 4251} 4252 4253static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4254 SDValue V1 = Op.getOperand(0); 4255 SDValue V2 = Op.getOperand(1); 4256 DebugLoc dl = Op.getDebugLoc(); 4257 EVT VT = Op.getValueType(); 4258 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4259 SmallVector<int, 8> ShuffleMask; 4260 4261 // Convert shuffles that are directly supported on NEON to target-specific 4262 // DAG nodes, instead of keeping them as shuffles and matching them again 4263 // during code selection. This is more efficient and avoids the possibility 4264 // of inconsistencies between legalization and selection. 4265 // FIXME: floating-point vectors should be canonicalized to integer vectors 4266 // of the same time so that they get CSEd properly. 4267 SVN->getMask(ShuffleMask); 4268 4269 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4270 if (EltSize <= 32) { 4271 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4272 int Lane = SVN->getSplatIndex(); 4273 // If this is undef splat, generate it via "just" vdup, if possible. 4274 if (Lane == -1) Lane = 0; 4275 4276 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4277 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4278 } 4279 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4280 DAG.getConstant(Lane, MVT::i32)); 4281 } 4282 4283 bool ReverseVEXT; 4284 unsigned Imm; 4285 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4286 if (ReverseVEXT) 4287 std::swap(V1, V2); 4288 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4289 DAG.getConstant(Imm, MVT::i32)); 4290 } 4291 4292 if (isVREVMask(ShuffleMask, VT, 64)) 4293 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4294 if (isVREVMask(ShuffleMask, VT, 32)) 4295 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4296 if (isVREVMask(ShuffleMask, VT, 16)) 4297 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4298 4299 // Check for Neon shuffles that modify both input vectors in place. 4300 // If both results are used, i.e., if there are two shuffles with the same 4301 // source operands and with masks corresponding to both results of one of 4302 // these operations, DAG memoization will ensure that a single node is 4303 // used for both shuffles. 4304 unsigned WhichResult; 4305 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4306 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4307 V1, V2).getValue(WhichResult); 4308 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4309 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4310 V1, V2).getValue(WhichResult); 4311 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4312 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4313 V1, V2).getValue(WhichResult); 4314 4315 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4316 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4317 V1, V1).getValue(WhichResult); 4318 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4319 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4320 V1, V1).getValue(WhichResult); 4321 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4322 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4323 V1, V1).getValue(WhichResult); 4324 } 4325 4326 // If the shuffle is not directly supported and it has 4 elements, use 4327 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4328 unsigned NumElts = VT.getVectorNumElements(); 4329 if (NumElts == 4) { 4330 unsigned PFIndexes[4]; 4331 for (unsigned i = 0; i != 4; ++i) { 4332 if (ShuffleMask[i] < 0) 4333 PFIndexes[i] = 8; 4334 else 4335 PFIndexes[i] = ShuffleMask[i]; 4336 } 4337 4338 // Compute the index in the perfect shuffle table. 4339 unsigned PFTableIndex = 4340 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4341 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4342 unsigned Cost = (PFEntry >> 30); 4343 4344 if (Cost <= 4) 4345 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4346 } 4347 4348 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4349 if (EltSize >= 32) { 4350 // Do the expansion with floating-point types, since that is what the VFP 4351 // registers are defined to use, and since i64 is not legal. 4352 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4353 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4354 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4355 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4356 SmallVector<SDValue, 8> Ops; 4357 for (unsigned i = 0; i < NumElts; ++i) { 4358 if (ShuffleMask[i] < 0) 4359 Ops.push_back(DAG.getUNDEF(EltVT)); 4360 else 4361 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4362 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4363 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4364 MVT::i32))); 4365 } 4366 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4367 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4368 } 4369 4370 if (VT == MVT::v8i8) { 4371 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4372 if (NewOp.getNode()) 4373 return NewOp; 4374 } 4375 4376 return SDValue(); 4377} 4378 4379static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4380 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4381 SDValue Lane = Op.getOperand(1); 4382 if (!isa<ConstantSDNode>(Lane)) 4383 return SDValue(); 4384 4385 SDValue Vec = Op.getOperand(0); 4386 if (Op.getValueType() == MVT::i32 && 4387 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4388 DebugLoc dl = Op.getDebugLoc(); 4389 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4390 } 4391 4392 return Op; 4393} 4394 4395static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4396 // The only time a CONCAT_VECTORS operation can have legal types is when 4397 // two 64-bit vectors are concatenated to a 128-bit vector. 4398 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4399 "unexpected CONCAT_VECTORS"); 4400 DebugLoc dl = Op.getDebugLoc(); 4401 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4402 SDValue Op0 = Op.getOperand(0); 4403 SDValue Op1 = Op.getOperand(1); 4404 if (Op0.getOpcode() != ISD::UNDEF) 4405 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4406 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4407 DAG.getIntPtrConstant(0)); 4408 if (Op1.getOpcode() != ISD::UNDEF) 4409 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4410 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4411 DAG.getIntPtrConstant(1)); 4412 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4413} 4414 4415/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4416/// element has been zero/sign-extended, depending on the isSigned parameter, 4417/// from an integer type half its size. 4418static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4419 bool isSigned) { 4420 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4421 EVT VT = N->getValueType(0); 4422 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4423 SDNode *BVN = N->getOperand(0).getNode(); 4424 if (BVN->getValueType(0) != MVT::v4i32 || 4425 BVN->getOpcode() != ISD::BUILD_VECTOR) 4426 return false; 4427 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4428 unsigned HiElt = 1 - LoElt; 4429 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4430 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4431 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4432 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4433 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4434 return false; 4435 if (isSigned) { 4436 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4437 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4438 return true; 4439 } else { 4440 if (Hi0->isNullValue() && Hi1->isNullValue()) 4441 return true; 4442 } 4443 return false; 4444 } 4445 4446 if (N->getOpcode() != ISD::BUILD_VECTOR) 4447 return false; 4448 4449 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4450 SDNode *Elt = N->getOperand(i).getNode(); 4451 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4452 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4453 unsigned HalfSize = EltSize / 2; 4454 if (isSigned) { 4455 int64_t SExtVal = C->getSExtValue(); 4456 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 4457 return false; 4458 } else { 4459 if ((C->getZExtValue() >> HalfSize) != 0) 4460 return false; 4461 } 4462 continue; 4463 } 4464 return false; 4465 } 4466 4467 return true; 4468} 4469 4470/// isSignExtended - Check if a node is a vector value that is sign-extended 4471/// or a constant BUILD_VECTOR with sign-extended elements. 4472static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4473 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4474 return true; 4475 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4476 return true; 4477 return false; 4478} 4479 4480/// isZeroExtended - Check if a node is a vector value that is zero-extended 4481/// or a constant BUILD_VECTOR with zero-extended elements. 4482static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4483 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4484 return true; 4485 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4486 return true; 4487 return false; 4488} 4489 4490/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4491/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4492static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4493 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4494 return N->getOperand(0); 4495 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4496 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4497 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4498 LD->isNonTemporal(), LD->getAlignment()); 4499 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4500 // have been legalized as a BITCAST from v4i32. 4501 if (N->getOpcode() == ISD::BITCAST) { 4502 SDNode *BVN = N->getOperand(0).getNode(); 4503 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4504 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4505 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4506 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4507 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4508 } 4509 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4510 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4511 EVT VT = N->getValueType(0); 4512 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4513 unsigned NumElts = VT.getVectorNumElements(); 4514 MVT TruncVT = MVT::getIntegerVT(EltSize); 4515 SmallVector<SDValue, 8> Ops; 4516 for (unsigned i = 0; i != NumElts; ++i) { 4517 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4518 const APInt &CInt = C->getAPIntValue(); 4519 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4520 } 4521 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4522 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4523} 4524 4525static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4526 unsigned Opcode = N->getOpcode(); 4527 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4528 SDNode *N0 = N->getOperand(0).getNode(); 4529 SDNode *N1 = N->getOperand(1).getNode(); 4530 return N0->hasOneUse() && N1->hasOneUse() && 4531 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4532 } 4533 return false; 4534} 4535 4536static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4537 unsigned Opcode = N->getOpcode(); 4538 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4539 SDNode *N0 = N->getOperand(0).getNode(); 4540 SDNode *N1 = N->getOperand(1).getNode(); 4541 return N0->hasOneUse() && N1->hasOneUse() && 4542 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4543 } 4544 return false; 4545} 4546 4547static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4548 // Multiplications are only custom-lowered for 128-bit vectors so that 4549 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4550 EVT VT = Op.getValueType(); 4551 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4552 SDNode *N0 = Op.getOperand(0).getNode(); 4553 SDNode *N1 = Op.getOperand(1).getNode(); 4554 unsigned NewOpc = 0; 4555 bool isMLA = false; 4556 bool isN0SExt = isSignExtended(N0, DAG); 4557 bool isN1SExt = isSignExtended(N1, DAG); 4558 if (isN0SExt && isN1SExt) 4559 NewOpc = ARMISD::VMULLs; 4560 else { 4561 bool isN0ZExt = isZeroExtended(N0, DAG); 4562 bool isN1ZExt = isZeroExtended(N1, DAG); 4563 if (isN0ZExt && isN1ZExt) 4564 NewOpc = ARMISD::VMULLu; 4565 else if (isN1SExt || isN1ZExt) { 4566 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4567 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4568 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4569 NewOpc = ARMISD::VMULLs; 4570 isMLA = true; 4571 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4572 NewOpc = ARMISD::VMULLu; 4573 isMLA = true; 4574 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4575 std::swap(N0, N1); 4576 NewOpc = ARMISD::VMULLu; 4577 isMLA = true; 4578 } 4579 } 4580 4581 if (!NewOpc) { 4582 if (VT == MVT::v2i64) 4583 // Fall through to expand this. It is not legal. 4584 return SDValue(); 4585 else 4586 // Other vector multiplications are legal. 4587 return Op; 4588 } 4589 } 4590 4591 // Legalize to a VMULL instruction. 4592 DebugLoc DL = Op.getDebugLoc(); 4593 SDValue Op0; 4594 SDValue Op1 = SkipExtension(N1, DAG); 4595 if (!isMLA) { 4596 Op0 = SkipExtension(N0, DAG); 4597 assert(Op0.getValueType().is64BitVector() && 4598 Op1.getValueType().is64BitVector() && 4599 "unexpected types for extended operands to VMULL"); 4600 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4601 } 4602 4603 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4604 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4605 // vmull q0, d4, d6 4606 // vmlal q0, d5, d6 4607 // is faster than 4608 // vaddl q0, d4, d5 4609 // vmovl q1, d6 4610 // vmul q0, q0, q1 4611 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4612 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4613 EVT Op1VT = Op1.getValueType(); 4614 return DAG.getNode(N0->getOpcode(), DL, VT, 4615 DAG.getNode(NewOpc, DL, VT, 4616 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4617 DAG.getNode(NewOpc, DL, VT, 4618 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4619} 4620 4621static SDValue 4622LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4623 // Convert to float 4624 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4625 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4626 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4627 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4628 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4629 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4630 // Get reciprocal estimate. 4631 // float4 recip = vrecpeq_f32(yf); 4632 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4633 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4634 // Because char has a smaller range than uchar, we can actually get away 4635 // without any newton steps. This requires that we use a weird bias 4636 // of 0xb000, however (again, this has been exhaustively tested). 4637 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4638 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4639 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4640 Y = DAG.getConstant(0xb000, MVT::i32); 4641 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4642 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4643 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4644 // Convert back to short. 4645 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4646 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4647 return X; 4648} 4649 4650static SDValue 4651LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4652 SDValue N2; 4653 // Convert to float. 4654 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4655 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4656 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4657 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4658 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4659 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4660 4661 // Use reciprocal estimate and one refinement step. 4662 // float4 recip = vrecpeq_f32(yf); 4663 // recip *= vrecpsq_f32(yf, recip); 4664 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4665 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4666 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4667 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4668 N1, N2); 4669 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4670 // Because short has a smaller range than ushort, we can actually get away 4671 // with only a single newton step. This requires that we use a weird bias 4672 // of 89, however (again, this has been exhaustively tested). 4673 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 4674 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4675 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4676 N1 = DAG.getConstant(0x89, MVT::i32); 4677 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4678 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4679 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4680 // Convert back to integer and return. 4681 // return vmovn_s32(vcvt_s32_f32(result)); 4682 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4683 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4684 return N0; 4685} 4686 4687static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4688 EVT VT = Op.getValueType(); 4689 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4690 "unexpected type for custom-lowering ISD::SDIV"); 4691 4692 DebugLoc dl = Op.getDebugLoc(); 4693 SDValue N0 = Op.getOperand(0); 4694 SDValue N1 = Op.getOperand(1); 4695 SDValue N2, N3; 4696 4697 if (VT == MVT::v8i8) { 4698 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4699 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4700 4701 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4702 DAG.getIntPtrConstant(4)); 4703 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4704 DAG.getIntPtrConstant(4)); 4705 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4706 DAG.getIntPtrConstant(0)); 4707 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4708 DAG.getIntPtrConstant(0)); 4709 4710 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4711 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4712 4713 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4714 N0 = LowerCONCAT_VECTORS(N0, DAG); 4715 4716 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4717 return N0; 4718 } 4719 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4720} 4721 4722static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4723 EVT VT = Op.getValueType(); 4724 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4725 "unexpected type for custom-lowering ISD::UDIV"); 4726 4727 DebugLoc dl = Op.getDebugLoc(); 4728 SDValue N0 = Op.getOperand(0); 4729 SDValue N1 = Op.getOperand(1); 4730 SDValue N2, N3; 4731 4732 if (VT == MVT::v8i8) { 4733 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4734 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4735 4736 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4737 DAG.getIntPtrConstant(4)); 4738 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4739 DAG.getIntPtrConstant(4)); 4740 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4741 DAG.getIntPtrConstant(0)); 4742 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4743 DAG.getIntPtrConstant(0)); 4744 4745 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4746 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4747 4748 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4749 N0 = LowerCONCAT_VECTORS(N0, DAG); 4750 4751 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4752 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4753 N0); 4754 return N0; 4755 } 4756 4757 // v4i16 sdiv ... Convert to float. 4758 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4759 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4760 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4761 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4762 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4763 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4764 4765 // Use reciprocal estimate and two refinement steps. 4766 // float4 recip = vrecpeq_f32(yf); 4767 // recip *= vrecpsq_f32(yf, recip); 4768 // recip *= vrecpsq_f32(yf, recip); 4769 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4770 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 4771 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4772 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4773 BN1, N2); 4774 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4775 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4776 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4777 BN1, N2); 4778 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4779 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4780 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4781 // and that it will never cause us to return an answer too large). 4782 // float4 result = as_float4(as_int4(xf*recip) + 2); 4783 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4784 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4785 N1 = DAG.getConstant(2, MVT::i32); 4786 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4787 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4788 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4789 // Convert back to integer and return. 4790 // return vmovn_u32(vcvt_s32_f32(result)); 4791 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4792 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4793 return N0; 4794} 4795 4796SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4797 switch (Op.getOpcode()) { 4798 default: llvm_unreachable("Don't know how to custom lower this!"); 4799 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4800 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4801 case ISD::GlobalAddress: 4802 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4803 LowerGlobalAddressELF(Op, DAG); 4804 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4805 case ISD::SELECT: return LowerSELECT(Op, DAG); 4806 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4807 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4808 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4809 case ISD::VASTART: return LowerVASTART(Op, DAG); 4810 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4811 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4812 case ISD::SINT_TO_FP: 4813 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4814 case ISD::FP_TO_SINT: 4815 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4816 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4817 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4818 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4819 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4820 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4821 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4822 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4823 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4824 Subtarget); 4825 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4826 case ISD::SHL: 4827 case ISD::SRL: 4828 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4829 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4830 case ISD::SRL_PARTS: 4831 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4832 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4833 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 4834 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4835 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4836 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4837 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4838 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4839 case ISD::MUL: return LowerMUL(Op, DAG); 4840 case ISD::SDIV: return LowerSDIV(Op, DAG); 4841 case ISD::UDIV: return LowerUDIV(Op, DAG); 4842 } 4843 return SDValue(); 4844} 4845 4846/// ReplaceNodeResults - Replace the results of node with an illegal result 4847/// type with new values built out of custom code. 4848void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4849 SmallVectorImpl<SDValue>&Results, 4850 SelectionDAG &DAG) const { 4851 SDValue Res; 4852 switch (N->getOpcode()) { 4853 default: 4854 llvm_unreachable("Don't know how to custom expand this!"); 4855 break; 4856 case ISD::BITCAST: 4857 Res = ExpandBITCAST(N, DAG); 4858 break; 4859 case ISD::SRL: 4860 case ISD::SRA: 4861 Res = Expand64BitShift(N, DAG, Subtarget); 4862 break; 4863 } 4864 if (Res.getNode()) 4865 Results.push_back(Res); 4866} 4867 4868//===----------------------------------------------------------------------===// 4869// ARM Scheduler Hooks 4870//===----------------------------------------------------------------------===// 4871 4872MachineBasicBlock * 4873ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 4874 MachineBasicBlock *BB, 4875 unsigned Size) const { 4876 unsigned dest = MI->getOperand(0).getReg(); 4877 unsigned ptr = MI->getOperand(1).getReg(); 4878 unsigned oldval = MI->getOperand(2).getReg(); 4879 unsigned newval = MI->getOperand(3).getReg(); 4880 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4881 DebugLoc dl = MI->getDebugLoc(); 4882 bool isThumb2 = Subtarget->isThumb2(); 4883 4884 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 4885 unsigned scratch = 4886 MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass 4887 : ARM::GPRRegisterClass); 4888 4889 if (isThumb2) { 4890 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 4891 MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass); 4892 MRI.constrainRegClass(newval, ARM::rGPRRegisterClass); 4893 } 4894 4895 unsigned ldrOpc, strOpc; 4896 switch (Size) { 4897 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4898 case 1: 4899 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4900 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4901 break; 4902 case 2: 4903 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4904 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4905 break; 4906 case 4: 4907 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4908 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4909 break; 4910 } 4911 4912 MachineFunction *MF = BB->getParent(); 4913 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4914 MachineFunction::iterator It = BB; 4915 ++It; // insert the new blocks after the current block 4916 4917 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4918 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4919 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4920 MF->insert(It, loop1MBB); 4921 MF->insert(It, loop2MBB); 4922 MF->insert(It, exitMBB); 4923 4924 // Transfer the remainder of BB and its successor edges to exitMBB. 4925 exitMBB->splice(exitMBB->begin(), BB, 4926 llvm::next(MachineBasicBlock::iterator(MI)), 4927 BB->end()); 4928 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4929 4930 // thisMBB: 4931 // ... 4932 // fallthrough --> loop1MBB 4933 BB->addSuccessor(loop1MBB); 4934 4935 // loop1MBB: 4936 // ldrex dest, [ptr] 4937 // cmp dest, oldval 4938 // bne exitMBB 4939 BB = loop1MBB; 4940 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4941 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4942 .addReg(dest).addReg(oldval)); 4943 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4944 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4945 BB->addSuccessor(loop2MBB); 4946 BB->addSuccessor(exitMBB); 4947 4948 // loop2MBB: 4949 // strex scratch, newval, [ptr] 4950 // cmp scratch, #0 4951 // bne loop1MBB 4952 BB = loop2MBB; 4953 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 4954 .addReg(ptr)); 4955 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4956 .addReg(scratch).addImm(0)); 4957 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4958 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4959 BB->addSuccessor(loop1MBB); 4960 BB->addSuccessor(exitMBB); 4961 4962 // exitMBB: 4963 // ... 4964 BB = exitMBB; 4965 4966 MI->eraseFromParent(); // The instruction is gone now. 4967 4968 return BB; 4969} 4970 4971MachineBasicBlock * 4972ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4973 unsigned Size, unsigned BinOpcode) const { 4974 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4975 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4976 4977 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4978 MachineFunction *MF = BB->getParent(); 4979 MachineFunction::iterator It = BB; 4980 ++It; 4981 4982 unsigned dest = MI->getOperand(0).getReg(); 4983 unsigned ptr = MI->getOperand(1).getReg(); 4984 unsigned incr = MI->getOperand(2).getReg(); 4985 DebugLoc dl = MI->getDebugLoc(); 4986 bool isThumb2 = Subtarget->isThumb2(); 4987 4988 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 4989 if (isThumb2) { 4990 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 4991 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 4992 } 4993 4994 unsigned ldrOpc, strOpc; 4995 switch (Size) { 4996 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4997 case 1: 4998 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4999 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5000 break; 5001 case 2: 5002 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5003 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5004 break; 5005 case 4: 5006 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5007 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5008 break; 5009 } 5010 5011 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5012 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5013 MF->insert(It, loopMBB); 5014 MF->insert(It, exitMBB); 5015 5016 // Transfer the remainder of BB and its successor edges to exitMBB. 5017 exitMBB->splice(exitMBB->begin(), BB, 5018 llvm::next(MachineBasicBlock::iterator(MI)), 5019 BB->end()); 5020 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5021 5022 TargetRegisterClass *TRC = 5023 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5024 unsigned scratch = MRI.createVirtualRegister(TRC); 5025 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5026 5027 // thisMBB: 5028 // ... 5029 // fallthrough --> loopMBB 5030 BB->addSuccessor(loopMBB); 5031 5032 // loopMBB: 5033 // ldrex dest, ptr 5034 // <binop> scratch2, dest, incr 5035 // strex scratch, scratch2, ptr 5036 // cmp scratch, #0 5037 // bne- loopMBB 5038 // fallthrough --> exitMBB 5039 BB = loopMBB; 5040 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 5041 if (BinOpcode) { 5042 // operand order needs to go the other way for NAND 5043 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5044 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5045 addReg(incr).addReg(dest)).addReg(0); 5046 else 5047 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5048 addReg(dest).addReg(incr)).addReg(0); 5049 } 5050 5051 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 5052 .addReg(ptr)); 5053 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5054 .addReg(scratch).addImm(0)); 5055 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5056 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5057 5058 BB->addSuccessor(loopMBB); 5059 BB->addSuccessor(exitMBB); 5060 5061 // exitMBB: 5062 // ... 5063 BB = exitMBB; 5064 5065 MI->eraseFromParent(); // The instruction is gone now. 5066 5067 return BB; 5068} 5069 5070MachineBasicBlock * 5071ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5072 MachineBasicBlock *BB, 5073 unsigned Size, 5074 bool signExtend, 5075 ARMCC::CondCodes Cond) const { 5076 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5077 5078 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5079 MachineFunction *MF = BB->getParent(); 5080 MachineFunction::iterator It = BB; 5081 ++It; 5082 5083 unsigned dest = MI->getOperand(0).getReg(); 5084 unsigned ptr = MI->getOperand(1).getReg(); 5085 unsigned incr = MI->getOperand(2).getReg(); 5086 unsigned oldval = dest; 5087 DebugLoc dl = MI->getDebugLoc(); 5088 bool isThumb2 = Subtarget->isThumb2(); 5089 5090 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5091 if (isThumb2) { 5092 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5093 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5094 } 5095 5096 unsigned ldrOpc, strOpc, extendOpc; 5097 switch (Size) { 5098 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5099 case 1: 5100 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5101 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5102 extendOpc = isThumb2 ? ARM::t2SXTBr : ARM::SXTBr; 5103 break; 5104 case 2: 5105 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5106 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5107 extendOpc = isThumb2 ? ARM::t2SXTHr : ARM::SXTHr; 5108 break; 5109 case 4: 5110 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5111 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5112 extendOpc = 0; 5113 break; 5114 } 5115 5116 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5117 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5118 MF->insert(It, loopMBB); 5119 MF->insert(It, exitMBB); 5120 5121 // Transfer the remainder of BB and its successor edges to exitMBB. 5122 exitMBB->splice(exitMBB->begin(), BB, 5123 llvm::next(MachineBasicBlock::iterator(MI)), 5124 BB->end()); 5125 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5126 5127 TargetRegisterClass *TRC = 5128 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5129 unsigned scratch = MRI.createVirtualRegister(TRC); 5130 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5131 5132 // thisMBB: 5133 // ... 5134 // fallthrough --> loopMBB 5135 BB->addSuccessor(loopMBB); 5136 5137 // loopMBB: 5138 // ldrex dest, ptr 5139 // (sign extend dest, if required) 5140 // cmp dest, incr 5141 // cmov.cond scratch2, dest, incr 5142 // strex scratch, scratch2, ptr 5143 // cmp scratch, #0 5144 // bne- loopMBB 5145 // fallthrough --> exitMBB 5146 BB = loopMBB; 5147 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 5148 5149 // Sign extend the value, if necessary. 5150 if (signExtend && extendOpc) { 5151 oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass); 5152 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval).addReg(dest)); 5153 } 5154 5155 // Build compare and cmov instructions. 5156 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5157 .addReg(oldval).addReg(incr)); 5158 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5159 .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); 5160 5161 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 5162 .addReg(ptr)); 5163 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5164 .addReg(scratch).addImm(0)); 5165 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5166 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5167 5168 BB->addSuccessor(loopMBB); 5169 BB->addSuccessor(exitMBB); 5170 5171 // exitMBB: 5172 // ... 5173 BB = exitMBB; 5174 5175 MI->eraseFromParent(); // The instruction is gone now. 5176 5177 return BB; 5178} 5179 5180static 5181MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 5182 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 5183 E = MBB->succ_end(); I != E; ++I) 5184 if (*I != Succ) 5185 return *I; 5186 llvm_unreachable("Expecting a BB with two successors!"); 5187} 5188 5189// FIXME: This opcode table should obviously be expressed in the target 5190// description. We probably just need a "machine opcode" value in the pseudo 5191// instruction. But the ideal solution maybe to simply remove the "S" version 5192// of the opcode altogether. 5193struct AddSubFlagsOpcodePair { 5194 unsigned PseudoOpc; 5195 unsigned MachineOpc; 5196}; 5197 5198static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 5199 {ARM::ADCSri, ARM::ADCri}, 5200 {ARM::ADCSrr, ARM::ADCrr}, 5201 {ARM::ADCSrs, ARM::ADCrs}, 5202 {ARM::SBCSri, ARM::SBCri}, 5203 {ARM::SBCSrr, ARM::SBCrr}, 5204 {ARM::SBCSrs, ARM::SBCrs}, 5205 {ARM::RSBSri, ARM::RSBri}, 5206 {ARM::RSBSrr, ARM::RSBrr}, 5207 {ARM::RSBSrs, ARM::RSBrs}, 5208 {ARM::RSCSri, ARM::RSCri}, 5209 {ARM::RSCSrs, ARM::RSCrs}, 5210 {ARM::t2ADCSri, ARM::t2ADCri}, 5211 {ARM::t2ADCSrr, ARM::t2ADCrr}, 5212 {ARM::t2ADCSrs, ARM::t2ADCrs}, 5213 {ARM::t2SBCSri, ARM::t2SBCri}, 5214 {ARM::t2SBCSrr, ARM::t2SBCrr}, 5215 {ARM::t2SBCSrs, ARM::t2SBCrs}, 5216 {ARM::t2RSBSri, ARM::t2RSBri}, 5217 {ARM::t2RSBSrs, ARM::t2RSBrs}, 5218}; 5219 5220// Convert and Add or Subtract with Carry and Flags to a generic opcode with 5221// CPSR<def> operand. e.g. ADCS (...) -> ADC (... CPSR<def>). 5222// 5223// FIXME: Somewhere we should assert that CPSR<def> is in the correct 5224// position to be recognized by the target descrition as the 'S' bit. 5225bool ARMTargetLowering::RemapAddSubWithFlags(MachineInstr *MI, 5226 MachineBasicBlock *BB) const { 5227 unsigned OldOpc = MI->getOpcode(); 5228 unsigned NewOpc = 0; 5229 5230 // This is only called for instructions that need remapping, so iterating over 5231 // the tiny opcode table is not costly. 5232 static const int NPairs = 5233 sizeof(AddSubFlagsOpcodeMap) / sizeof(AddSubFlagsOpcodePair); 5234 for (AddSubFlagsOpcodePair *Pair = &AddSubFlagsOpcodeMap[0], 5235 *End = &AddSubFlagsOpcodeMap[NPairs]; Pair != End; ++Pair) { 5236 if (OldOpc == Pair->PseudoOpc) { 5237 NewOpc = Pair->MachineOpc; 5238 break; 5239 } 5240 } 5241 if (!NewOpc) 5242 return false; 5243 5244 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5245 DebugLoc dl = MI->getDebugLoc(); 5246 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 5247 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 5248 MIB.addOperand(MI->getOperand(i)); 5249 AddDefaultPred(MIB); 5250 MIB.addReg(ARM::CPSR, RegState::Define); // S bit 5251 MI->eraseFromParent(); 5252 return true; 5253} 5254 5255MachineBasicBlock * 5256ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5257 MachineBasicBlock *BB) const { 5258 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5259 DebugLoc dl = MI->getDebugLoc(); 5260 bool isThumb2 = Subtarget->isThumb2(); 5261 switch (MI->getOpcode()) { 5262 default: { 5263 if (RemapAddSubWithFlags(MI, BB)) 5264 return BB; 5265 5266 MI->dump(); 5267 llvm_unreachable("Unexpected instr type to insert"); 5268 } 5269 case ARM::ATOMIC_LOAD_ADD_I8: 5270 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5271 case ARM::ATOMIC_LOAD_ADD_I16: 5272 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5273 case ARM::ATOMIC_LOAD_ADD_I32: 5274 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5275 5276 case ARM::ATOMIC_LOAD_AND_I8: 5277 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5278 case ARM::ATOMIC_LOAD_AND_I16: 5279 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5280 case ARM::ATOMIC_LOAD_AND_I32: 5281 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5282 5283 case ARM::ATOMIC_LOAD_OR_I8: 5284 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5285 case ARM::ATOMIC_LOAD_OR_I16: 5286 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5287 case ARM::ATOMIC_LOAD_OR_I32: 5288 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5289 5290 case ARM::ATOMIC_LOAD_XOR_I8: 5291 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5292 case ARM::ATOMIC_LOAD_XOR_I16: 5293 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5294 case ARM::ATOMIC_LOAD_XOR_I32: 5295 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5296 5297 case ARM::ATOMIC_LOAD_NAND_I8: 5298 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5299 case ARM::ATOMIC_LOAD_NAND_I16: 5300 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5301 case ARM::ATOMIC_LOAD_NAND_I32: 5302 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5303 5304 case ARM::ATOMIC_LOAD_SUB_I8: 5305 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5306 case ARM::ATOMIC_LOAD_SUB_I16: 5307 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5308 case ARM::ATOMIC_LOAD_SUB_I32: 5309 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5310 5311 case ARM::ATOMIC_LOAD_MIN_I8: 5312 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 5313 case ARM::ATOMIC_LOAD_MIN_I16: 5314 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 5315 case ARM::ATOMIC_LOAD_MIN_I32: 5316 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 5317 5318 case ARM::ATOMIC_LOAD_MAX_I8: 5319 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 5320 case ARM::ATOMIC_LOAD_MAX_I16: 5321 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 5322 case ARM::ATOMIC_LOAD_MAX_I32: 5323 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 5324 5325 case ARM::ATOMIC_LOAD_UMIN_I8: 5326 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 5327 case ARM::ATOMIC_LOAD_UMIN_I16: 5328 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 5329 case ARM::ATOMIC_LOAD_UMIN_I32: 5330 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 5331 5332 case ARM::ATOMIC_LOAD_UMAX_I8: 5333 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 5334 case ARM::ATOMIC_LOAD_UMAX_I16: 5335 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 5336 case ARM::ATOMIC_LOAD_UMAX_I32: 5337 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 5338 5339 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 5340 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 5341 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 5342 5343 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 5344 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 5345 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 5346 5347 case ARM::tMOVCCr_pseudo: { 5348 // To "insert" a SELECT_CC instruction, we actually have to insert the 5349 // diamond control-flow pattern. The incoming instruction knows the 5350 // destination vreg to set, the condition code register to branch on, the 5351 // true/false values to select between, and a branch opcode to use. 5352 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5353 MachineFunction::iterator It = BB; 5354 ++It; 5355 5356 // thisMBB: 5357 // ... 5358 // TrueVal = ... 5359 // cmpTY ccX, r1, r2 5360 // bCC copy1MBB 5361 // fallthrough --> copy0MBB 5362 MachineBasicBlock *thisMBB = BB; 5363 MachineFunction *F = BB->getParent(); 5364 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 5365 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 5366 F->insert(It, copy0MBB); 5367 F->insert(It, sinkMBB); 5368 5369 // Transfer the remainder of BB and its successor edges to sinkMBB. 5370 sinkMBB->splice(sinkMBB->begin(), BB, 5371 llvm::next(MachineBasicBlock::iterator(MI)), 5372 BB->end()); 5373 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 5374 5375 BB->addSuccessor(copy0MBB); 5376 BB->addSuccessor(sinkMBB); 5377 5378 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 5379 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 5380 5381 // copy0MBB: 5382 // %FalseValue = ... 5383 // # fallthrough to sinkMBB 5384 BB = copy0MBB; 5385 5386 // Update machine-CFG edges 5387 BB->addSuccessor(sinkMBB); 5388 5389 // sinkMBB: 5390 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5391 // ... 5392 BB = sinkMBB; 5393 BuildMI(*BB, BB->begin(), dl, 5394 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 5395 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5396 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5397 5398 MI->eraseFromParent(); // The pseudo instruction is gone now. 5399 return BB; 5400 } 5401 5402 case ARM::BCCi64: 5403 case ARM::BCCZi64: { 5404 // If there is an unconditional branch to the other successor, remove it. 5405 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 5406 5407 // Compare both parts that make up the double comparison separately for 5408 // equality. 5409 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 5410 5411 unsigned LHS1 = MI->getOperand(1).getReg(); 5412 unsigned LHS2 = MI->getOperand(2).getReg(); 5413 if (RHSisZero) { 5414 AddDefaultPred(BuildMI(BB, dl, 5415 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5416 .addReg(LHS1).addImm(0)); 5417 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5418 .addReg(LHS2).addImm(0) 5419 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5420 } else { 5421 unsigned RHS1 = MI->getOperand(3).getReg(); 5422 unsigned RHS2 = MI->getOperand(4).getReg(); 5423 AddDefaultPred(BuildMI(BB, dl, 5424 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5425 .addReg(LHS1).addReg(RHS1)); 5426 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5427 .addReg(LHS2).addReg(RHS2) 5428 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5429 } 5430 5431 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 5432 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 5433 if (MI->getOperand(0).getImm() == ARMCC::NE) 5434 std::swap(destMBB, exitMBB); 5435 5436 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5437 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 5438 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 5439 .addMBB(exitMBB); 5440 5441 MI->eraseFromParent(); // The pseudo instruction is gone now. 5442 return BB; 5443 } 5444 } 5445} 5446 5447//===----------------------------------------------------------------------===// 5448// ARM Optimization Hooks 5449//===----------------------------------------------------------------------===// 5450 5451static 5452SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 5453 TargetLowering::DAGCombinerInfo &DCI) { 5454 SelectionDAG &DAG = DCI.DAG; 5455 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5456 EVT VT = N->getValueType(0); 5457 unsigned Opc = N->getOpcode(); 5458 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 5459 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 5460 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 5461 ISD::CondCode CC = ISD::SETCC_INVALID; 5462 5463 if (isSlctCC) { 5464 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 5465 } else { 5466 SDValue CCOp = Slct.getOperand(0); 5467 if (CCOp.getOpcode() == ISD::SETCC) 5468 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 5469 } 5470 5471 bool DoXform = false; 5472 bool InvCC = false; 5473 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 5474 "Bad input!"); 5475 5476 if (LHS.getOpcode() == ISD::Constant && 5477 cast<ConstantSDNode>(LHS)->isNullValue()) { 5478 DoXform = true; 5479 } else if (CC != ISD::SETCC_INVALID && 5480 RHS.getOpcode() == ISD::Constant && 5481 cast<ConstantSDNode>(RHS)->isNullValue()) { 5482 std::swap(LHS, RHS); 5483 SDValue Op0 = Slct.getOperand(0); 5484 EVT OpVT = isSlctCC ? Op0.getValueType() : 5485 Op0.getOperand(0).getValueType(); 5486 bool isInt = OpVT.isInteger(); 5487 CC = ISD::getSetCCInverse(CC, isInt); 5488 5489 if (!TLI.isCondCodeLegal(CC, OpVT)) 5490 return SDValue(); // Inverse operator isn't legal. 5491 5492 DoXform = true; 5493 InvCC = true; 5494 } 5495 5496 if (DoXform) { 5497 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 5498 if (isSlctCC) 5499 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 5500 Slct.getOperand(0), Slct.getOperand(1), CC); 5501 SDValue CCOp = Slct.getOperand(0); 5502 if (InvCC) 5503 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 5504 CCOp.getOperand(0), CCOp.getOperand(1), CC); 5505 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 5506 CCOp, OtherOp, Result); 5507 } 5508 return SDValue(); 5509} 5510 5511/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 5512/// operands N0 and N1. This is a helper for PerformADDCombine that is 5513/// called with the default operands, and if that fails, with commuted 5514/// operands. 5515static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 5516 TargetLowering::DAGCombinerInfo &DCI) { 5517 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 5518 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 5519 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 5520 if (Result.getNode()) return Result; 5521 } 5522 return SDValue(); 5523} 5524 5525/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 5526/// 5527static SDValue PerformADDCombine(SDNode *N, 5528 TargetLowering::DAGCombinerInfo &DCI) { 5529 SDValue N0 = N->getOperand(0); 5530 SDValue N1 = N->getOperand(1); 5531 5532 // First try with the default operand order. 5533 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 5534 if (Result.getNode()) 5535 return Result; 5536 5537 // If that didn't work, try again with the operands commuted. 5538 return PerformADDCombineWithOperands(N, N1, N0, DCI); 5539} 5540 5541/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 5542/// 5543static SDValue PerformSUBCombine(SDNode *N, 5544 TargetLowering::DAGCombinerInfo &DCI) { 5545 SDValue N0 = N->getOperand(0); 5546 SDValue N1 = N->getOperand(1); 5547 5548 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 5549 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 5550 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 5551 if (Result.getNode()) return Result; 5552 } 5553 5554 return SDValue(); 5555} 5556 5557/// PerformVMULCombine 5558/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 5559/// special multiplier accumulator forwarding. 5560/// vmul d3, d0, d2 5561/// vmla d3, d1, d2 5562/// is faster than 5563/// vadd d3, d0, d1 5564/// vmul d3, d3, d2 5565static SDValue PerformVMULCombine(SDNode *N, 5566 TargetLowering::DAGCombinerInfo &DCI, 5567 const ARMSubtarget *Subtarget) { 5568 if (!Subtarget->hasVMLxForwarding()) 5569 return SDValue(); 5570 5571 SelectionDAG &DAG = DCI.DAG; 5572 SDValue N0 = N->getOperand(0); 5573 SDValue N1 = N->getOperand(1); 5574 unsigned Opcode = N0.getOpcode(); 5575 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 5576 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 5577 Opcode = N0.getOpcode(); 5578 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 5579 Opcode != ISD::FADD && Opcode != ISD::FSUB) 5580 return SDValue(); 5581 std::swap(N0, N1); 5582 } 5583 5584 EVT VT = N->getValueType(0); 5585 DebugLoc DL = N->getDebugLoc(); 5586 SDValue N00 = N0->getOperand(0); 5587 SDValue N01 = N0->getOperand(1); 5588 return DAG.getNode(Opcode, DL, VT, 5589 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 5590 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 5591} 5592 5593static SDValue PerformMULCombine(SDNode *N, 5594 TargetLowering::DAGCombinerInfo &DCI, 5595 const ARMSubtarget *Subtarget) { 5596 SelectionDAG &DAG = DCI.DAG; 5597 5598 if (Subtarget->isThumb1Only()) 5599 return SDValue(); 5600 5601 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 5602 return SDValue(); 5603 5604 EVT VT = N->getValueType(0); 5605 if (VT.is64BitVector() || VT.is128BitVector()) 5606 return PerformVMULCombine(N, DCI, Subtarget); 5607 if (VT != MVT::i32) 5608 return SDValue(); 5609 5610 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5611 if (!C) 5612 return SDValue(); 5613 5614 uint64_t MulAmt = C->getZExtValue(); 5615 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 5616 ShiftAmt = ShiftAmt & (32 - 1); 5617 SDValue V = N->getOperand(0); 5618 DebugLoc DL = N->getDebugLoc(); 5619 5620 SDValue Res; 5621 MulAmt >>= ShiftAmt; 5622 if (isPowerOf2_32(MulAmt - 1)) { 5623 // (mul x, 2^N + 1) => (add (shl x, N), x) 5624 Res = DAG.getNode(ISD::ADD, DL, VT, 5625 V, DAG.getNode(ISD::SHL, DL, VT, 5626 V, DAG.getConstant(Log2_32(MulAmt-1), 5627 MVT::i32))); 5628 } else if (isPowerOf2_32(MulAmt + 1)) { 5629 // (mul x, 2^N - 1) => (sub (shl x, N), x) 5630 Res = DAG.getNode(ISD::SUB, DL, VT, 5631 DAG.getNode(ISD::SHL, DL, VT, 5632 V, DAG.getConstant(Log2_32(MulAmt+1), 5633 MVT::i32)), 5634 V); 5635 } else 5636 return SDValue(); 5637 5638 if (ShiftAmt != 0) 5639 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 5640 DAG.getConstant(ShiftAmt, MVT::i32)); 5641 5642 // Do not add new nodes to DAG combiner worklist. 5643 DCI.CombineTo(N, Res, false); 5644 return SDValue(); 5645} 5646 5647static SDValue PerformANDCombine(SDNode *N, 5648 TargetLowering::DAGCombinerInfo &DCI) { 5649 5650 // Attempt to use immediate-form VBIC 5651 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 5652 DebugLoc dl = N->getDebugLoc(); 5653 EVT VT = N->getValueType(0); 5654 SelectionDAG &DAG = DCI.DAG; 5655 5656 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 5657 return SDValue(); 5658 5659 APInt SplatBits, SplatUndef; 5660 unsigned SplatBitSize; 5661 bool HasAnyUndefs; 5662 if (BVN && 5663 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5664 if (SplatBitSize <= 64) { 5665 EVT VbicVT; 5666 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 5667 SplatUndef.getZExtValue(), SplatBitSize, 5668 DAG, VbicVT, VT.is128BitVector(), 5669 OtherModImm); 5670 if (Val.getNode()) { 5671 SDValue Input = 5672 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 5673 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 5674 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 5675 } 5676 } 5677 } 5678 5679 return SDValue(); 5680} 5681 5682/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 5683static SDValue PerformORCombine(SDNode *N, 5684 TargetLowering::DAGCombinerInfo &DCI, 5685 const ARMSubtarget *Subtarget) { 5686 // Attempt to use immediate-form VORR 5687 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 5688 DebugLoc dl = N->getDebugLoc(); 5689 EVT VT = N->getValueType(0); 5690 SelectionDAG &DAG = DCI.DAG; 5691 5692 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 5693 return SDValue(); 5694 5695 APInt SplatBits, SplatUndef; 5696 unsigned SplatBitSize; 5697 bool HasAnyUndefs; 5698 if (BVN && Subtarget->hasNEON() && 5699 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5700 if (SplatBitSize <= 64) { 5701 EVT VorrVT; 5702 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 5703 SplatUndef.getZExtValue(), SplatBitSize, 5704 DAG, VorrVT, VT.is128BitVector(), 5705 OtherModImm); 5706 if (Val.getNode()) { 5707 SDValue Input = 5708 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 5709 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 5710 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 5711 } 5712 } 5713 } 5714 5715 SDValue N0 = N->getOperand(0); 5716 if (N0.getOpcode() != ISD::AND) 5717 return SDValue(); 5718 SDValue N1 = N->getOperand(1); 5719 5720 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 5721 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 5722 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 5723 APInt SplatUndef; 5724 unsigned SplatBitSize; 5725 bool HasAnyUndefs; 5726 5727 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 5728 APInt SplatBits0; 5729 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 5730 HasAnyUndefs) && !HasAnyUndefs) { 5731 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 5732 APInt SplatBits1; 5733 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 5734 HasAnyUndefs) && !HasAnyUndefs && 5735 SplatBits0 == ~SplatBits1) { 5736 // Canonicalize the vector type to make instruction selection simpler. 5737 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 5738 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 5739 N0->getOperand(1), N0->getOperand(0), 5740 N1->getOperand(0)); 5741 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 5742 } 5743 } 5744 } 5745 5746 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 5747 // reasonable. 5748 5749 // BFI is only available on V6T2+ 5750 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 5751 return SDValue(); 5752 5753 DebugLoc DL = N->getDebugLoc(); 5754 // 1) or (and A, mask), val => ARMbfi A, val, mask 5755 // iff (val & mask) == val 5756 // 5757 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 5758 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 5759 // && mask == ~mask2 5760 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 5761 // && ~mask == mask2 5762 // (i.e., copy a bitfield value into another bitfield of the same width) 5763 5764 if (VT != MVT::i32) 5765 return SDValue(); 5766 5767 SDValue N00 = N0.getOperand(0); 5768 5769 // The value and the mask need to be constants so we can verify this is 5770 // actually a bitfield set. If the mask is 0xffff, we can do better 5771 // via a movt instruction, so don't use BFI in that case. 5772 SDValue MaskOp = N0.getOperand(1); 5773 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 5774 if (!MaskC) 5775 return SDValue(); 5776 unsigned Mask = MaskC->getZExtValue(); 5777 if (Mask == 0xffff) 5778 return SDValue(); 5779 SDValue Res; 5780 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 5781 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5782 if (N1C) { 5783 unsigned Val = N1C->getZExtValue(); 5784 if ((Val & ~Mask) != Val) 5785 return SDValue(); 5786 5787 if (ARM::isBitFieldInvertedMask(Mask)) { 5788 Val >>= CountTrailingZeros_32(~Mask); 5789 5790 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 5791 DAG.getConstant(Val, MVT::i32), 5792 DAG.getConstant(Mask, MVT::i32)); 5793 5794 // Do not add new nodes to DAG combiner worklist. 5795 DCI.CombineTo(N, Res, false); 5796 return SDValue(); 5797 } 5798 } else if (N1.getOpcode() == ISD::AND) { 5799 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 5800 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 5801 if (!N11C) 5802 return SDValue(); 5803 unsigned Mask2 = N11C->getZExtValue(); 5804 5805 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 5806 // as is to match. 5807 if (ARM::isBitFieldInvertedMask(Mask) && 5808 (Mask == ~Mask2)) { 5809 // The pack halfword instruction works better for masks that fit it, 5810 // so use that when it's available. 5811 if (Subtarget->hasT2ExtractPack() && 5812 (Mask == 0xffff || Mask == 0xffff0000)) 5813 return SDValue(); 5814 // 2a 5815 unsigned amt = CountTrailingZeros_32(Mask2); 5816 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 5817 DAG.getConstant(amt, MVT::i32)); 5818 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 5819 DAG.getConstant(Mask, MVT::i32)); 5820 // Do not add new nodes to DAG combiner worklist. 5821 DCI.CombineTo(N, Res, false); 5822 return SDValue(); 5823 } else if (ARM::isBitFieldInvertedMask(~Mask) && 5824 (~Mask == Mask2)) { 5825 // The pack halfword instruction works better for masks that fit it, 5826 // so use that when it's available. 5827 if (Subtarget->hasT2ExtractPack() && 5828 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 5829 return SDValue(); 5830 // 2b 5831 unsigned lsb = CountTrailingZeros_32(Mask); 5832 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 5833 DAG.getConstant(lsb, MVT::i32)); 5834 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 5835 DAG.getConstant(Mask2, MVT::i32)); 5836 // Do not add new nodes to DAG combiner worklist. 5837 DCI.CombineTo(N, Res, false); 5838 return SDValue(); 5839 } 5840 } 5841 5842 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 5843 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 5844 ARM::isBitFieldInvertedMask(~Mask)) { 5845 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 5846 // where lsb(mask) == #shamt and masked bits of B are known zero. 5847 SDValue ShAmt = N00.getOperand(1); 5848 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 5849 unsigned LSB = CountTrailingZeros_32(Mask); 5850 if (ShAmtC != LSB) 5851 return SDValue(); 5852 5853 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 5854 DAG.getConstant(~Mask, MVT::i32)); 5855 5856 // Do not add new nodes to DAG combiner worklist. 5857 DCI.CombineTo(N, Res, false); 5858 } 5859 5860 return SDValue(); 5861} 5862 5863/// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff 5864/// C1 & C2 == C1. 5865static SDValue PerformBFICombine(SDNode *N, 5866 TargetLowering::DAGCombinerInfo &DCI) { 5867 SDValue N1 = N->getOperand(1); 5868 if (N1.getOpcode() == ISD::AND) { 5869 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 5870 if (!N11C) 5871 return SDValue(); 5872 unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 5873 unsigned Mask2 = N11C->getZExtValue(); 5874 if ((Mask & Mask2) == Mask2) 5875 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 5876 N->getOperand(0), N1.getOperand(0), 5877 N->getOperand(2)); 5878 } 5879 return SDValue(); 5880} 5881 5882/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 5883/// ARMISD::VMOVRRD. 5884static SDValue PerformVMOVRRDCombine(SDNode *N, 5885 TargetLowering::DAGCombinerInfo &DCI) { 5886 // vmovrrd(vmovdrr x, y) -> x,y 5887 SDValue InDouble = N->getOperand(0); 5888 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 5889 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 5890 5891 // vmovrrd(load f64) -> (load i32), (load i32) 5892 SDNode *InNode = InDouble.getNode(); 5893 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 5894 InNode->getValueType(0) == MVT::f64 && 5895 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 5896 !cast<LoadSDNode>(InNode)->isVolatile()) { 5897 // TODO: Should this be done for non-FrameIndex operands? 5898 LoadSDNode *LD = cast<LoadSDNode>(InNode); 5899 5900 SelectionDAG &DAG = DCI.DAG; 5901 DebugLoc DL = LD->getDebugLoc(); 5902 SDValue BasePtr = LD->getBasePtr(); 5903 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 5904 LD->getPointerInfo(), LD->isVolatile(), 5905 LD->isNonTemporal(), LD->getAlignment()); 5906 5907 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 5908 DAG.getConstant(4, MVT::i32)); 5909 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 5910 LD->getPointerInfo(), LD->isVolatile(), 5911 LD->isNonTemporal(), 5912 std::min(4U, LD->getAlignment() / 2)); 5913 5914 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 5915 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 5916 DCI.RemoveFromWorklist(LD); 5917 DAG.DeleteNode(LD); 5918 return Result; 5919 } 5920 5921 return SDValue(); 5922} 5923 5924/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 5925/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 5926static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 5927 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 5928 SDValue Op0 = N->getOperand(0); 5929 SDValue Op1 = N->getOperand(1); 5930 if (Op0.getOpcode() == ISD::BITCAST) 5931 Op0 = Op0.getOperand(0); 5932 if (Op1.getOpcode() == ISD::BITCAST) 5933 Op1 = Op1.getOperand(0); 5934 if (Op0.getOpcode() == ARMISD::VMOVRRD && 5935 Op0.getNode() == Op1.getNode() && 5936 Op0.getResNo() == 0 && Op1.getResNo() == 1) 5937 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 5938 N->getValueType(0), Op0.getOperand(0)); 5939 return SDValue(); 5940} 5941 5942/// PerformSTORECombine - Target-specific dag combine xforms for 5943/// ISD::STORE. 5944static SDValue PerformSTORECombine(SDNode *N, 5945 TargetLowering::DAGCombinerInfo &DCI) { 5946 // Bitcast an i64 store extracted from a vector to f64. 5947 // Otherwise, the i64 value will be legalized to a pair of i32 values. 5948 StoreSDNode *St = cast<StoreSDNode>(N); 5949 SDValue StVal = St->getValue(); 5950 if (!ISD::isNormalStore(St) || St->isVolatile()) 5951 return SDValue(); 5952 5953 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 5954 StVal.getNode()->hasOneUse() && !St->isVolatile()) { 5955 SelectionDAG &DAG = DCI.DAG; 5956 DebugLoc DL = St->getDebugLoc(); 5957 SDValue BasePtr = St->getBasePtr(); 5958 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 5959 StVal.getNode()->getOperand(0), BasePtr, 5960 St->getPointerInfo(), St->isVolatile(), 5961 St->isNonTemporal(), St->getAlignment()); 5962 5963 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 5964 DAG.getConstant(4, MVT::i32)); 5965 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 5966 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 5967 St->isNonTemporal(), 5968 std::min(4U, St->getAlignment() / 2)); 5969 } 5970 5971 if (StVal.getValueType() != MVT::i64 || 5972 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5973 return SDValue(); 5974 5975 SelectionDAG &DAG = DCI.DAG; 5976 DebugLoc dl = StVal.getDebugLoc(); 5977 SDValue IntVec = StVal.getOperand(0); 5978 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 5979 IntVec.getValueType().getVectorNumElements()); 5980 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 5981 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 5982 Vec, StVal.getOperand(1)); 5983 dl = N->getDebugLoc(); 5984 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 5985 // Make the DAGCombiner fold the bitcasts. 5986 DCI.AddToWorklist(Vec.getNode()); 5987 DCI.AddToWorklist(ExtElt.getNode()); 5988 DCI.AddToWorklist(V.getNode()); 5989 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 5990 St->getPointerInfo(), St->isVolatile(), 5991 St->isNonTemporal(), St->getAlignment(), 5992 St->getTBAAInfo()); 5993} 5994 5995/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 5996/// are normal, non-volatile loads. If so, it is profitable to bitcast an 5997/// i64 vector to have f64 elements, since the value can then be loaded 5998/// directly into a VFP register. 5999static bool hasNormalLoadOperand(SDNode *N) { 6000 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 6001 for (unsigned i = 0; i < NumElts; ++i) { 6002 SDNode *Elt = N->getOperand(i).getNode(); 6003 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 6004 return true; 6005 } 6006 return false; 6007} 6008 6009/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 6010/// ISD::BUILD_VECTOR. 6011static SDValue PerformBUILD_VECTORCombine(SDNode *N, 6012 TargetLowering::DAGCombinerInfo &DCI){ 6013 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 6014 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 6015 // into a pair of GPRs, which is fine when the value is used as a scalar, 6016 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 6017 SelectionDAG &DAG = DCI.DAG; 6018 if (N->getNumOperands() == 2) { 6019 SDValue RV = PerformVMOVDRRCombine(N, DAG); 6020 if (RV.getNode()) 6021 return RV; 6022 } 6023 6024 // Load i64 elements as f64 values so that type legalization does not split 6025 // them up into i32 values. 6026 EVT VT = N->getValueType(0); 6027 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 6028 return SDValue(); 6029 DebugLoc dl = N->getDebugLoc(); 6030 SmallVector<SDValue, 8> Ops; 6031 unsigned NumElts = VT.getVectorNumElements(); 6032 for (unsigned i = 0; i < NumElts; ++i) { 6033 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 6034 Ops.push_back(V); 6035 // Make the DAGCombiner fold the bitcast. 6036 DCI.AddToWorklist(V.getNode()); 6037 } 6038 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 6039 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 6040 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 6041} 6042 6043/// PerformInsertEltCombine - Target-specific dag combine xforms for 6044/// ISD::INSERT_VECTOR_ELT. 6045static SDValue PerformInsertEltCombine(SDNode *N, 6046 TargetLowering::DAGCombinerInfo &DCI) { 6047 // Bitcast an i64 load inserted into a vector to f64. 6048 // Otherwise, the i64 value will be legalized to a pair of i32 values. 6049 EVT VT = N->getValueType(0); 6050 SDNode *Elt = N->getOperand(1).getNode(); 6051 if (VT.getVectorElementType() != MVT::i64 || 6052 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 6053 return SDValue(); 6054 6055 SelectionDAG &DAG = DCI.DAG; 6056 DebugLoc dl = N->getDebugLoc(); 6057 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 6058 VT.getVectorNumElements()); 6059 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 6060 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 6061 // Make the DAGCombiner fold the bitcasts. 6062 DCI.AddToWorklist(Vec.getNode()); 6063 DCI.AddToWorklist(V.getNode()); 6064 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 6065 Vec, V, N->getOperand(2)); 6066 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 6067} 6068 6069/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 6070/// ISD::VECTOR_SHUFFLE. 6071static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 6072 // The LLVM shufflevector instruction does not require the shuffle mask 6073 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 6074 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 6075 // operands do not match the mask length, they are extended by concatenating 6076 // them with undef vectors. That is probably the right thing for other 6077 // targets, but for NEON it is better to concatenate two double-register 6078 // size vector operands into a single quad-register size vector. Do that 6079 // transformation here: 6080 // shuffle(concat(v1, undef), concat(v2, undef)) -> 6081 // shuffle(concat(v1, v2), undef) 6082 SDValue Op0 = N->getOperand(0); 6083 SDValue Op1 = N->getOperand(1); 6084 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 6085 Op1.getOpcode() != ISD::CONCAT_VECTORS || 6086 Op0.getNumOperands() != 2 || 6087 Op1.getNumOperands() != 2) 6088 return SDValue(); 6089 SDValue Concat0Op1 = Op0.getOperand(1); 6090 SDValue Concat1Op1 = Op1.getOperand(1); 6091 if (Concat0Op1.getOpcode() != ISD::UNDEF || 6092 Concat1Op1.getOpcode() != ISD::UNDEF) 6093 return SDValue(); 6094 // Skip the transformation if any of the types are illegal. 6095 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6096 EVT VT = N->getValueType(0); 6097 if (!TLI.isTypeLegal(VT) || 6098 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 6099 !TLI.isTypeLegal(Concat1Op1.getValueType())) 6100 return SDValue(); 6101 6102 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 6103 Op0.getOperand(0), Op1.getOperand(0)); 6104 // Translate the shuffle mask. 6105 SmallVector<int, 16> NewMask; 6106 unsigned NumElts = VT.getVectorNumElements(); 6107 unsigned HalfElts = NumElts/2; 6108 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 6109 for (unsigned n = 0; n < NumElts; ++n) { 6110 int MaskElt = SVN->getMaskElt(n); 6111 int NewElt = -1; 6112 if (MaskElt < (int)HalfElts) 6113 NewElt = MaskElt; 6114 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 6115 NewElt = HalfElts + MaskElt - NumElts; 6116 NewMask.push_back(NewElt); 6117 } 6118 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 6119 DAG.getUNDEF(VT), NewMask.data()); 6120} 6121 6122/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 6123/// NEON load/store intrinsics to merge base address updates. 6124static SDValue CombineBaseUpdate(SDNode *N, 6125 TargetLowering::DAGCombinerInfo &DCI) { 6126 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6127 return SDValue(); 6128 6129 SelectionDAG &DAG = DCI.DAG; 6130 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 6131 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 6132 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 6133 SDValue Addr = N->getOperand(AddrOpIdx); 6134 6135 // Search for a use of the address operand that is an increment. 6136 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 6137 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 6138 SDNode *User = *UI; 6139 if (User->getOpcode() != ISD::ADD || 6140 UI.getUse().getResNo() != Addr.getResNo()) 6141 continue; 6142 6143 // Check that the add is independent of the load/store. Otherwise, folding 6144 // it would create a cycle. 6145 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 6146 continue; 6147 6148 // Find the new opcode for the updating load/store. 6149 bool isLoad = true; 6150 bool isLaneOp = false; 6151 unsigned NewOpc = 0; 6152 unsigned NumVecs = 0; 6153 if (isIntrinsic) { 6154 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 6155 switch (IntNo) { 6156 default: assert(0 && "unexpected intrinsic for Neon base update"); 6157 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 6158 NumVecs = 1; break; 6159 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 6160 NumVecs = 2; break; 6161 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 6162 NumVecs = 3; break; 6163 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 6164 NumVecs = 4; break; 6165 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 6166 NumVecs = 2; isLaneOp = true; break; 6167 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 6168 NumVecs = 3; isLaneOp = true; break; 6169 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 6170 NumVecs = 4; isLaneOp = true; break; 6171 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 6172 NumVecs = 1; isLoad = false; break; 6173 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 6174 NumVecs = 2; isLoad = false; break; 6175 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 6176 NumVecs = 3; isLoad = false; break; 6177 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 6178 NumVecs = 4; isLoad = false; break; 6179 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 6180 NumVecs = 2; isLoad = false; isLaneOp = true; break; 6181 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 6182 NumVecs = 3; isLoad = false; isLaneOp = true; break; 6183 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 6184 NumVecs = 4; isLoad = false; isLaneOp = true; break; 6185 } 6186 } else { 6187 isLaneOp = true; 6188 switch (N->getOpcode()) { 6189 default: assert(0 && "unexpected opcode for Neon base update"); 6190 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 6191 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 6192 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 6193 } 6194 } 6195 6196 // Find the size of memory referenced by the load/store. 6197 EVT VecTy; 6198 if (isLoad) 6199 VecTy = N->getValueType(0); 6200 else 6201 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 6202 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 6203 if (isLaneOp) 6204 NumBytes /= VecTy.getVectorNumElements(); 6205 6206 // If the increment is a constant, it must match the memory ref size. 6207 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 6208 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 6209 uint64_t IncVal = CInc->getZExtValue(); 6210 if (IncVal != NumBytes) 6211 continue; 6212 } else if (NumBytes >= 3 * 16) { 6213 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 6214 // separate instructions that make it harder to use a non-constant update. 6215 continue; 6216 } 6217 6218 // Create the new updating load/store node. 6219 EVT Tys[6]; 6220 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 6221 unsigned n; 6222 for (n = 0; n < NumResultVecs; ++n) 6223 Tys[n] = VecTy; 6224 Tys[n++] = MVT::i32; 6225 Tys[n] = MVT::Other; 6226 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 6227 SmallVector<SDValue, 8> Ops; 6228 Ops.push_back(N->getOperand(0)); // incoming chain 6229 Ops.push_back(N->getOperand(AddrOpIdx)); 6230 Ops.push_back(Inc); 6231 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 6232 Ops.push_back(N->getOperand(i)); 6233 } 6234 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 6235 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 6236 Ops.data(), Ops.size(), 6237 MemInt->getMemoryVT(), 6238 MemInt->getMemOperand()); 6239 6240 // Update the uses. 6241 std::vector<SDValue> NewResults; 6242 for (unsigned i = 0; i < NumResultVecs; ++i) { 6243 NewResults.push_back(SDValue(UpdN.getNode(), i)); 6244 } 6245 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 6246 DCI.CombineTo(N, NewResults); 6247 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 6248 6249 break; 6250 } 6251 return SDValue(); 6252} 6253 6254/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 6255/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 6256/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 6257/// return true. 6258static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 6259 SelectionDAG &DAG = DCI.DAG; 6260 EVT VT = N->getValueType(0); 6261 // vldN-dup instructions only support 64-bit vectors for N > 1. 6262 if (!VT.is64BitVector()) 6263 return false; 6264 6265 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 6266 SDNode *VLD = N->getOperand(0).getNode(); 6267 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 6268 return false; 6269 unsigned NumVecs = 0; 6270 unsigned NewOpc = 0; 6271 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 6272 if (IntNo == Intrinsic::arm_neon_vld2lane) { 6273 NumVecs = 2; 6274 NewOpc = ARMISD::VLD2DUP; 6275 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 6276 NumVecs = 3; 6277 NewOpc = ARMISD::VLD3DUP; 6278 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 6279 NumVecs = 4; 6280 NewOpc = ARMISD::VLD4DUP; 6281 } else { 6282 return false; 6283 } 6284 6285 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 6286 // numbers match the load. 6287 unsigned VLDLaneNo = 6288 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 6289 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 6290 UI != UE; ++UI) { 6291 // Ignore uses of the chain result. 6292 if (UI.getUse().getResNo() == NumVecs) 6293 continue; 6294 SDNode *User = *UI; 6295 if (User->getOpcode() != ARMISD::VDUPLANE || 6296 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 6297 return false; 6298 } 6299 6300 // Create the vldN-dup node. 6301 EVT Tys[5]; 6302 unsigned n; 6303 for (n = 0; n < NumVecs; ++n) 6304 Tys[n] = VT; 6305 Tys[n] = MVT::Other; 6306 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 6307 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 6308 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 6309 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 6310 Ops, 2, VLDMemInt->getMemoryVT(), 6311 VLDMemInt->getMemOperand()); 6312 6313 // Update the uses. 6314 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 6315 UI != UE; ++UI) { 6316 unsigned ResNo = UI.getUse().getResNo(); 6317 // Ignore uses of the chain result. 6318 if (ResNo == NumVecs) 6319 continue; 6320 SDNode *User = *UI; 6321 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 6322 } 6323 6324 // Now the vldN-lane intrinsic is dead except for its chain result. 6325 // Update uses of the chain. 6326 std::vector<SDValue> VLDDupResults; 6327 for (unsigned n = 0; n < NumVecs; ++n) 6328 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 6329 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 6330 DCI.CombineTo(VLD, VLDDupResults); 6331 6332 return true; 6333} 6334 6335/// PerformVDUPLANECombine - Target-specific dag combine xforms for 6336/// ARMISD::VDUPLANE. 6337static SDValue PerformVDUPLANECombine(SDNode *N, 6338 TargetLowering::DAGCombinerInfo &DCI) { 6339 SDValue Op = N->getOperand(0); 6340 6341 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 6342 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 6343 if (CombineVLDDUP(N, DCI)) 6344 return SDValue(N, 0); 6345 6346 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 6347 // redundant. Ignore bit_converts for now; element sizes are checked below. 6348 while (Op.getOpcode() == ISD::BITCAST) 6349 Op = Op.getOperand(0); 6350 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 6351 return SDValue(); 6352 6353 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 6354 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 6355 // The canonical VMOV for a zero vector uses a 32-bit element size. 6356 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6357 unsigned EltBits; 6358 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 6359 EltSize = 8; 6360 EVT VT = N->getValueType(0); 6361 if (EltSize > VT.getVectorElementType().getSizeInBits()) 6362 return SDValue(); 6363 6364 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 6365} 6366 6367/// getVShiftImm - Check if this is a valid build_vector for the immediate 6368/// operand of a vector shift operation, where all the elements of the 6369/// build_vector must have the same constant integer value. 6370static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 6371 // Ignore bit_converts. 6372 while (Op.getOpcode() == ISD::BITCAST) 6373 Op = Op.getOperand(0); 6374 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6375 APInt SplatBits, SplatUndef; 6376 unsigned SplatBitSize; 6377 bool HasAnyUndefs; 6378 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 6379 HasAnyUndefs, ElementBits) || 6380 SplatBitSize > ElementBits) 6381 return false; 6382 Cnt = SplatBits.getSExtValue(); 6383 return true; 6384} 6385 6386/// isVShiftLImm - Check if this is a valid build_vector for the immediate 6387/// operand of a vector shift left operation. That value must be in the range: 6388/// 0 <= Value < ElementBits for a left shift; or 6389/// 0 <= Value <= ElementBits for a long left shift. 6390static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 6391 assert(VT.isVector() && "vector shift count is not a vector type"); 6392 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6393 if (! getVShiftImm(Op, ElementBits, Cnt)) 6394 return false; 6395 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 6396} 6397 6398/// isVShiftRImm - Check if this is a valid build_vector for the immediate 6399/// operand of a vector shift right operation. For a shift opcode, the value 6400/// is positive, but for an intrinsic the value count must be negative. The 6401/// absolute value must be in the range: 6402/// 1 <= |Value| <= ElementBits for a right shift; or 6403/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 6404static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 6405 int64_t &Cnt) { 6406 assert(VT.isVector() && "vector shift count is not a vector type"); 6407 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6408 if (! getVShiftImm(Op, ElementBits, Cnt)) 6409 return false; 6410 if (isIntrinsic) 6411 Cnt = -Cnt; 6412 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 6413} 6414 6415/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 6416static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 6417 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 6418 switch (IntNo) { 6419 default: 6420 // Don't do anything for most intrinsics. 6421 break; 6422 6423 // Vector shifts: check for immediate versions and lower them. 6424 // Note: This is done during DAG combining instead of DAG legalizing because 6425 // the build_vectors for 64-bit vector element shift counts are generally 6426 // not legal, and it is hard to see their values after they get legalized to 6427 // loads from a constant pool. 6428 case Intrinsic::arm_neon_vshifts: 6429 case Intrinsic::arm_neon_vshiftu: 6430 case Intrinsic::arm_neon_vshiftls: 6431 case Intrinsic::arm_neon_vshiftlu: 6432 case Intrinsic::arm_neon_vshiftn: 6433 case Intrinsic::arm_neon_vrshifts: 6434 case Intrinsic::arm_neon_vrshiftu: 6435 case Intrinsic::arm_neon_vrshiftn: 6436 case Intrinsic::arm_neon_vqshifts: 6437 case Intrinsic::arm_neon_vqshiftu: 6438 case Intrinsic::arm_neon_vqshiftsu: 6439 case Intrinsic::arm_neon_vqshiftns: 6440 case Intrinsic::arm_neon_vqshiftnu: 6441 case Intrinsic::arm_neon_vqshiftnsu: 6442 case Intrinsic::arm_neon_vqrshiftns: 6443 case Intrinsic::arm_neon_vqrshiftnu: 6444 case Intrinsic::arm_neon_vqrshiftnsu: { 6445 EVT VT = N->getOperand(1).getValueType(); 6446 int64_t Cnt; 6447 unsigned VShiftOpc = 0; 6448 6449 switch (IntNo) { 6450 case Intrinsic::arm_neon_vshifts: 6451 case Intrinsic::arm_neon_vshiftu: 6452 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 6453 VShiftOpc = ARMISD::VSHL; 6454 break; 6455 } 6456 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 6457 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 6458 ARMISD::VSHRs : ARMISD::VSHRu); 6459 break; 6460 } 6461 return SDValue(); 6462 6463 case Intrinsic::arm_neon_vshiftls: 6464 case Intrinsic::arm_neon_vshiftlu: 6465 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 6466 break; 6467 llvm_unreachable("invalid shift count for vshll intrinsic"); 6468 6469 case Intrinsic::arm_neon_vrshifts: 6470 case Intrinsic::arm_neon_vrshiftu: 6471 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 6472 break; 6473 return SDValue(); 6474 6475 case Intrinsic::arm_neon_vqshifts: 6476 case Intrinsic::arm_neon_vqshiftu: 6477 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 6478 break; 6479 return SDValue(); 6480 6481 case Intrinsic::arm_neon_vqshiftsu: 6482 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 6483 break; 6484 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 6485 6486 case Intrinsic::arm_neon_vshiftn: 6487 case Intrinsic::arm_neon_vrshiftn: 6488 case Intrinsic::arm_neon_vqshiftns: 6489 case Intrinsic::arm_neon_vqshiftnu: 6490 case Intrinsic::arm_neon_vqshiftnsu: 6491 case Intrinsic::arm_neon_vqrshiftns: 6492 case Intrinsic::arm_neon_vqrshiftnu: 6493 case Intrinsic::arm_neon_vqrshiftnsu: 6494 // Narrowing shifts require an immediate right shift. 6495 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 6496 break; 6497 llvm_unreachable("invalid shift count for narrowing vector shift " 6498 "intrinsic"); 6499 6500 default: 6501 llvm_unreachable("unhandled vector shift"); 6502 } 6503 6504 switch (IntNo) { 6505 case Intrinsic::arm_neon_vshifts: 6506 case Intrinsic::arm_neon_vshiftu: 6507 // Opcode already set above. 6508 break; 6509 case Intrinsic::arm_neon_vshiftls: 6510 case Intrinsic::arm_neon_vshiftlu: 6511 if (Cnt == VT.getVectorElementType().getSizeInBits()) 6512 VShiftOpc = ARMISD::VSHLLi; 6513 else 6514 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 6515 ARMISD::VSHLLs : ARMISD::VSHLLu); 6516 break; 6517 case Intrinsic::arm_neon_vshiftn: 6518 VShiftOpc = ARMISD::VSHRN; break; 6519 case Intrinsic::arm_neon_vrshifts: 6520 VShiftOpc = ARMISD::VRSHRs; break; 6521 case Intrinsic::arm_neon_vrshiftu: 6522 VShiftOpc = ARMISD::VRSHRu; break; 6523 case Intrinsic::arm_neon_vrshiftn: 6524 VShiftOpc = ARMISD::VRSHRN; break; 6525 case Intrinsic::arm_neon_vqshifts: 6526 VShiftOpc = ARMISD::VQSHLs; break; 6527 case Intrinsic::arm_neon_vqshiftu: 6528 VShiftOpc = ARMISD::VQSHLu; break; 6529 case Intrinsic::arm_neon_vqshiftsu: 6530 VShiftOpc = ARMISD::VQSHLsu; break; 6531 case Intrinsic::arm_neon_vqshiftns: 6532 VShiftOpc = ARMISD::VQSHRNs; break; 6533 case Intrinsic::arm_neon_vqshiftnu: 6534 VShiftOpc = ARMISD::VQSHRNu; break; 6535 case Intrinsic::arm_neon_vqshiftnsu: 6536 VShiftOpc = ARMISD::VQSHRNsu; break; 6537 case Intrinsic::arm_neon_vqrshiftns: 6538 VShiftOpc = ARMISD::VQRSHRNs; break; 6539 case Intrinsic::arm_neon_vqrshiftnu: 6540 VShiftOpc = ARMISD::VQRSHRNu; break; 6541 case Intrinsic::arm_neon_vqrshiftnsu: 6542 VShiftOpc = ARMISD::VQRSHRNsu; break; 6543 } 6544 6545 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 6546 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 6547 } 6548 6549 case Intrinsic::arm_neon_vshiftins: { 6550 EVT VT = N->getOperand(1).getValueType(); 6551 int64_t Cnt; 6552 unsigned VShiftOpc = 0; 6553 6554 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 6555 VShiftOpc = ARMISD::VSLI; 6556 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 6557 VShiftOpc = ARMISD::VSRI; 6558 else { 6559 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 6560 } 6561 6562 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 6563 N->getOperand(1), N->getOperand(2), 6564 DAG.getConstant(Cnt, MVT::i32)); 6565 } 6566 6567 case Intrinsic::arm_neon_vqrshifts: 6568 case Intrinsic::arm_neon_vqrshiftu: 6569 // No immediate versions of these to check for. 6570 break; 6571 } 6572 6573 return SDValue(); 6574} 6575 6576/// PerformShiftCombine - Checks for immediate versions of vector shifts and 6577/// lowers them. As with the vector shift intrinsics, this is done during DAG 6578/// combining instead of DAG legalizing because the build_vectors for 64-bit 6579/// vector element shift counts are generally not legal, and it is hard to see 6580/// their values after they get legalized to loads from a constant pool. 6581static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 6582 const ARMSubtarget *ST) { 6583 EVT VT = N->getValueType(0); 6584 6585 // Nothing to be done for scalar shifts. 6586 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6587 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 6588 return SDValue(); 6589 6590 assert(ST->hasNEON() && "unexpected vector shift"); 6591 int64_t Cnt; 6592 6593 switch (N->getOpcode()) { 6594 default: llvm_unreachable("unexpected shift opcode"); 6595 6596 case ISD::SHL: 6597 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 6598 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 6599 DAG.getConstant(Cnt, MVT::i32)); 6600 break; 6601 6602 case ISD::SRA: 6603 case ISD::SRL: 6604 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 6605 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 6606 ARMISD::VSHRs : ARMISD::VSHRu); 6607 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 6608 DAG.getConstant(Cnt, MVT::i32)); 6609 } 6610 } 6611 return SDValue(); 6612} 6613 6614/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 6615/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 6616static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 6617 const ARMSubtarget *ST) { 6618 SDValue N0 = N->getOperand(0); 6619 6620 // Check for sign- and zero-extensions of vector extract operations of 8- 6621 // and 16-bit vector elements. NEON supports these directly. They are 6622 // handled during DAG combining because type legalization will promote them 6623 // to 32-bit types and it is messy to recognize the operations after that. 6624 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 6625 SDValue Vec = N0.getOperand(0); 6626 SDValue Lane = N0.getOperand(1); 6627 EVT VT = N->getValueType(0); 6628 EVT EltVT = N0.getValueType(); 6629 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6630 6631 if (VT == MVT::i32 && 6632 (EltVT == MVT::i8 || EltVT == MVT::i16) && 6633 TLI.isTypeLegal(Vec.getValueType()) && 6634 isa<ConstantSDNode>(Lane)) { 6635 6636 unsigned Opc = 0; 6637 switch (N->getOpcode()) { 6638 default: llvm_unreachable("unexpected opcode"); 6639 case ISD::SIGN_EXTEND: 6640 Opc = ARMISD::VGETLANEs; 6641 break; 6642 case ISD::ZERO_EXTEND: 6643 case ISD::ANY_EXTEND: 6644 Opc = ARMISD::VGETLANEu; 6645 break; 6646 } 6647 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 6648 } 6649 } 6650 6651 return SDValue(); 6652} 6653 6654/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 6655/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 6656static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 6657 const ARMSubtarget *ST) { 6658 // If the target supports NEON, try to use vmax/vmin instructions for f32 6659 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 6660 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 6661 // a NaN; only do the transformation when it matches that behavior. 6662 6663 // For now only do this when using NEON for FP operations; if using VFP, it 6664 // is not obvious that the benefit outweighs the cost of switching to the 6665 // NEON pipeline. 6666 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 6667 N->getValueType(0) != MVT::f32) 6668 return SDValue(); 6669 6670 SDValue CondLHS = N->getOperand(0); 6671 SDValue CondRHS = N->getOperand(1); 6672 SDValue LHS = N->getOperand(2); 6673 SDValue RHS = N->getOperand(3); 6674 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 6675 6676 unsigned Opcode = 0; 6677 bool IsReversed; 6678 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 6679 IsReversed = false; // x CC y ? x : y 6680 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 6681 IsReversed = true ; // x CC y ? y : x 6682 } else { 6683 return SDValue(); 6684 } 6685 6686 bool IsUnordered; 6687 switch (CC) { 6688 default: break; 6689 case ISD::SETOLT: 6690 case ISD::SETOLE: 6691 case ISD::SETLT: 6692 case ISD::SETLE: 6693 case ISD::SETULT: 6694 case ISD::SETULE: 6695 // If LHS is NaN, an ordered comparison will be false and the result will 6696 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 6697 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 6698 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 6699 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 6700 break; 6701 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 6702 // will return -0, so vmin can only be used for unsafe math or if one of 6703 // the operands is known to be nonzero. 6704 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 6705 !UnsafeFPMath && 6706 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 6707 break; 6708 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 6709 break; 6710 6711 case ISD::SETOGT: 6712 case ISD::SETOGE: 6713 case ISD::SETGT: 6714 case ISD::SETGE: 6715 case ISD::SETUGT: 6716 case ISD::SETUGE: 6717 // If LHS is NaN, an ordered comparison will be false and the result will 6718 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 6719 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 6720 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 6721 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 6722 break; 6723 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 6724 // will return +0, so vmax can only be used for unsafe math or if one of 6725 // the operands is known to be nonzero. 6726 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 6727 !UnsafeFPMath && 6728 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 6729 break; 6730 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 6731 break; 6732 } 6733 6734 if (!Opcode) 6735 return SDValue(); 6736 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 6737} 6738 6739SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 6740 DAGCombinerInfo &DCI) const { 6741 switch (N->getOpcode()) { 6742 default: break; 6743 case ISD::ADD: return PerformADDCombine(N, DCI); 6744 case ISD::SUB: return PerformSUBCombine(N, DCI); 6745 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 6746 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 6747 case ISD::AND: return PerformANDCombine(N, DCI); 6748 case ARMISD::BFI: return PerformBFICombine(N, DCI); 6749 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 6750 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 6751 case ISD::STORE: return PerformSTORECombine(N, DCI); 6752 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 6753 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 6754 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 6755 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 6756 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 6757 case ISD::SHL: 6758 case ISD::SRA: 6759 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 6760 case ISD::SIGN_EXTEND: 6761 case ISD::ZERO_EXTEND: 6762 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 6763 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 6764 case ARMISD::VLD2DUP: 6765 case ARMISD::VLD3DUP: 6766 case ARMISD::VLD4DUP: 6767 return CombineBaseUpdate(N, DCI); 6768 case ISD::INTRINSIC_VOID: 6769 case ISD::INTRINSIC_W_CHAIN: 6770 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 6771 case Intrinsic::arm_neon_vld1: 6772 case Intrinsic::arm_neon_vld2: 6773 case Intrinsic::arm_neon_vld3: 6774 case Intrinsic::arm_neon_vld4: 6775 case Intrinsic::arm_neon_vld2lane: 6776 case Intrinsic::arm_neon_vld3lane: 6777 case Intrinsic::arm_neon_vld4lane: 6778 case Intrinsic::arm_neon_vst1: 6779 case Intrinsic::arm_neon_vst2: 6780 case Intrinsic::arm_neon_vst3: 6781 case Intrinsic::arm_neon_vst4: 6782 case Intrinsic::arm_neon_vst2lane: 6783 case Intrinsic::arm_neon_vst3lane: 6784 case Intrinsic::arm_neon_vst4lane: 6785 return CombineBaseUpdate(N, DCI); 6786 default: break; 6787 } 6788 break; 6789 } 6790 return SDValue(); 6791} 6792 6793bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 6794 EVT VT) const { 6795 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 6796} 6797 6798bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 6799 if (!Subtarget->allowsUnalignedMem()) 6800 return false; 6801 6802 switch (VT.getSimpleVT().SimpleTy) { 6803 default: 6804 return false; 6805 case MVT::i8: 6806 case MVT::i16: 6807 case MVT::i32: 6808 return true; 6809 // FIXME: VLD1 etc with standard alignment is legal. 6810 } 6811} 6812 6813static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 6814 if (V < 0) 6815 return false; 6816 6817 unsigned Scale = 1; 6818 switch (VT.getSimpleVT().SimpleTy) { 6819 default: return false; 6820 case MVT::i1: 6821 case MVT::i8: 6822 // Scale == 1; 6823 break; 6824 case MVT::i16: 6825 // Scale == 2; 6826 Scale = 2; 6827 break; 6828 case MVT::i32: 6829 // Scale == 4; 6830 Scale = 4; 6831 break; 6832 } 6833 6834 if ((V & (Scale - 1)) != 0) 6835 return false; 6836 V /= Scale; 6837 return V == (V & ((1LL << 5) - 1)); 6838} 6839 6840static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 6841 const ARMSubtarget *Subtarget) { 6842 bool isNeg = false; 6843 if (V < 0) { 6844 isNeg = true; 6845 V = - V; 6846 } 6847 6848 switch (VT.getSimpleVT().SimpleTy) { 6849 default: return false; 6850 case MVT::i1: 6851 case MVT::i8: 6852 case MVT::i16: 6853 case MVT::i32: 6854 // + imm12 or - imm8 6855 if (isNeg) 6856 return V == (V & ((1LL << 8) - 1)); 6857 return V == (V & ((1LL << 12) - 1)); 6858 case MVT::f32: 6859 case MVT::f64: 6860 // Same as ARM mode. FIXME: NEON? 6861 if (!Subtarget->hasVFP2()) 6862 return false; 6863 if ((V & 3) != 0) 6864 return false; 6865 V >>= 2; 6866 return V == (V & ((1LL << 8) - 1)); 6867 } 6868} 6869 6870/// isLegalAddressImmediate - Return true if the integer value can be used 6871/// as the offset of the target addressing mode for load / store of the 6872/// given type. 6873static bool isLegalAddressImmediate(int64_t V, EVT VT, 6874 const ARMSubtarget *Subtarget) { 6875 if (V == 0) 6876 return true; 6877 6878 if (!VT.isSimple()) 6879 return false; 6880 6881 if (Subtarget->isThumb1Only()) 6882 return isLegalT1AddressImmediate(V, VT); 6883 else if (Subtarget->isThumb2()) 6884 return isLegalT2AddressImmediate(V, VT, Subtarget); 6885 6886 // ARM mode. 6887 if (V < 0) 6888 V = - V; 6889 switch (VT.getSimpleVT().SimpleTy) { 6890 default: return false; 6891 case MVT::i1: 6892 case MVT::i8: 6893 case MVT::i32: 6894 // +- imm12 6895 return V == (V & ((1LL << 12) - 1)); 6896 case MVT::i16: 6897 // +- imm8 6898 return V == (V & ((1LL << 8) - 1)); 6899 case MVT::f32: 6900 case MVT::f64: 6901 if (!Subtarget->hasVFP2()) // FIXME: NEON? 6902 return false; 6903 if ((V & 3) != 0) 6904 return false; 6905 V >>= 2; 6906 return V == (V & ((1LL << 8) - 1)); 6907 } 6908} 6909 6910bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 6911 EVT VT) const { 6912 int Scale = AM.Scale; 6913 if (Scale < 0) 6914 return false; 6915 6916 switch (VT.getSimpleVT().SimpleTy) { 6917 default: return false; 6918 case MVT::i1: 6919 case MVT::i8: 6920 case MVT::i16: 6921 case MVT::i32: 6922 if (Scale == 1) 6923 return true; 6924 // r + r << imm 6925 Scale = Scale & ~1; 6926 return Scale == 2 || Scale == 4 || Scale == 8; 6927 case MVT::i64: 6928 // r + r 6929 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 6930 return true; 6931 return false; 6932 case MVT::isVoid: 6933 // Note, we allow "void" uses (basically, uses that aren't loads or 6934 // stores), because arm allows folding a scale into many arithmetic 6935 // operations. This should be made more precise and revisited later. 6936 6937 // Allow r << imm, but the imm has to be a multiple of two. 6938 if (Scale & 1) return false; 6939 return isPowerOf2_32(Scale); 6940 } 6941} 6942 6943/// isLegalAddressingMode - Return true if the addressing mode represented 6944/// by AM is legal for this target, for a load/store of the specified type. 6945bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 6946 const Type *Ty) const { 6947 EVT VT = getValueType(Ty, true); 6948 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 6949 return false; 6950 6951 // Can never fold addr of global into load/store. 6952 if (AM.BaseGV) 6953 return false; 6954 6955 switch (AM.Scale) { 6956 case 0: // no scale reg, must be "r+i" or "r", or "i". 6957 break; 6958 case 1: 6959 if (Subtarget->isThumb1Only()) 6960 return false; 6961 // FALL THROUGH. 6962 default: 6963 // ARM doesn't support any R+R*scale+imm addr modes. 6964 if (AM.BaseOffs) 6965 return false; 6966 6967 if (!VT.isSimple()) 6968 return false; 6969 6970 if (Subtarget->isThumb2()) 6971 return isLegalT2ScaledAddressingMode(AM, VT); 6972 6973 int Scale = AM.Scale; 6974 switch (VT.getSimpleVT().SimpleTy) { 6975 default: return false; 6976 case MVT::i1: 6977 case MVT::i8: 6978 case MVT::i32: 6979 if (Scale < 0) Scale = -Scale; 6980 if (Scale == 1) 6981 return true; 6982 // r + r << imm 6983 return isPowerOf2_32(Scale & ~1); 6984 case MVT::i16: 6985 case MVT::i64: 6986 // r + r 6987 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 6988 return true; 6989 return false; 6990 6991 case MVT::isVoid: 6992 // Note, we allow "void" uses (basically, uses that aren't loads or 6993 // stores), because arm allows folding a scale into many arithmetic 6994 // operations. This should be made more precise and revisited later. 6995 6996 // Allow r << imm, but the imm has to be a multiple of two. 6997 if (Scale & 1) return false; 6998 return isPowerOf2_32(Scale); 6999 } 7000 break; 7001 } 7002 return true; 7003} 7004 7005/// isLegalICmpImmediate - Return true if the specified immediate is legal 7006/// icmp immediate, that is the target has icmp instructions which can compare 7007/// a register against the immediate without having to materialize the 7008/// immediate into a register. 7009bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 7010 if (!Subtarget->isThumb()) 7011 return ARM_AM::getSOImmVal(Imm) != -1; 7012 if (Subtarget->isThumb2()) 7013 return ARM_AM::getT2SOImmVal(Imm) != -1; 7014 return Imm >= 0 && Imm <= 255; 7015} 7016 7017/// isLegalAddImmediate - Return true if the specified immediate is legal 7018/// add immediate, that is the target has add instructions which can add 7019/// a register with the immediate without having to materialize the 7020/// immediate into a register. 7021bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 7022 return ARM_AM::getSOImmVal(Imm) != -1; 7023} 7024 7025static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 7026 bool isSEXTLoad, SDValue &Base, 7027 SDValue &Offset, bool &isInc, 7028 SelectionDAG &DAG) { 7029 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 7030 return false; 7031 7032 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 7033 // AddressingMode 3 7034 Base = Ptr->getOperand(0); 7035 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7036 int RHSC = (int)RHS->getZExtValue(); 7037 if (RHSC < 0 && RHSC > -256) { 7038 assert(Ptr->getOpcode() == ISD::ADD); 7039 isInc = false; 7040 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7041 return true; 7042 } 7043 } 7044 isInc = (Ptr->getOpcode() == ISD::ADD); 7045 Offset = Ptr->getOperand(1); 7046 return true; 7047 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 7048 // AddressingMode 2 7049 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7050 int RHSC = (int)RHS->getZExtValue(); 7051 if (RHSC < 0 && RHSC > -0x1000) { 7052 assert(Ptr->getOpcode() == ISD::ADD); 7053 isInc = false; 7054 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7055 Base = Ptr->getOperand(0); 7056 return true; 7057 } 7058 } 7059 7060 if (Ptr->getOpcode() == ISD::ADD) { 7061 isInc = true; 7062 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 7063 if (ShOpcVal != ARM_AM::no_shift) { 7064 Base = Ptr->getOperand(1); 7065 Offset = Ptr->getOperand(0); 7066 } else { 7067 Base = Ptr->getOperand(0); 7068 Offset = Ptr->getOperand(1); 7069 } 7070 return true; 7071 } 7072 7073 isInc = (Ptr->getOpcode() == ISD::ADD); 7074 Base = Ptr->getOperand(0); 7075 Offset = Ptr->getOperand(1); 7076 return true; 7077 } 7078 7079 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 7080 return false; 7081} 7082 7083static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 7084 bool isSEXTLoad, SDValue &Base, 7085 SDValue &Offset, bool &isInc, 7086 SelectionDAG &DAG) { 7087 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 7088 return false; 7089 7090 Base = Ptr->getOperand(0); 7091 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7092 int RHSC = (int)RHS->getZExtValue(); 7093 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 7094 assert(Ptr->getOpcode() == ISD::ADD); 7095 isInc = false; 7096 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7097 return true; 7098 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 7099 isInc = Ptr->getOpcode() == ISD::ADD; 7100 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 7101 return true; 7102 } 7103 } 7104 7105 return false; 7106} 7107 7108/// getPreIndexedAddressParts - returns true by value, base pointer and 7109/// offset pointer and addressing mode by reference if the node's address 7110/// can be legally represented as pre-indexed load / store address. 7111bool 7112ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 7113 SDValue &Offset, 7114 ISD::MemIndexedMode &AM, 7115 SelectionDAG &DAG) const { 7116 if (Subtarget->isThumb1Only()) 7117 return false; 7118 7119 EVT VT; 7120 SDValue Ptr; 7121 bool isSEXTLoad = false; 7122 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7123 Ptr = LD->getBasePtr(); 7124 VT = LD->getMemoryVT(); 7125 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 7126 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7127 Ptr = ST->getBasePtr(); 7128 VT = ST->getMemoryVT(); 7129 } else 7130 return false; 7131 7132 bool isInc; 7133 bool isLegal = false; 7134 if (Subtarget->isThumb2()) 7135 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 7136 Offset, isInc, DAG); 7137 else 7138 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 7139 Offset, isInc, DAG); 7140 if (!isLegal) 7141 return false; 7142 7143 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 7144 return true; 7145} 7146 7147/// getPostIndexedAddressParts - returns true by value, base pointer and 7148/// offset pointer and addressing mode by reference if this node can be 7149/// combined with a load / store to form a post-indexed load / store. 7150bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 7151 SDValue &Base, 7152 SDValue &Offset, 7153 ISD::MemIndexedMode &AM, 7154 SelectionDAG &DAG) const { 7155 if (Subtarget->isThumb1Only()) 7156 return false; 7157 7158 EVT VT; 7159 SDValue Ptr; 7160 bool isSEXTLoad = false; 7161 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7162 VT = LD->getMemoryVT(); 7163 Ptr = LD->getBasePtr(); 7164 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 7165 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7166 VT = ST->getMemoryVT(); 7167 Ptr = ST->getBasePtr(); 7168 } else 7169 return false; 7170 7171 bool isInc; 7172 bool isLegal = false; 7173 if (Subtarget->isThumb2()) 7174 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 7175 isInc, DAG); 7176 else 7177 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 7178 isInc, DAG); 7179 if (!isLegal) 7180 return false; 7181 7182 if (Ptr != Base) { 7183 // Swap base ptr and offset to catch more post-index load / store when 7184 // it's legal. In Thumb2 mode, offset must be an immediate. 7185 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 7186 !Subtarget->isThumb2()) 7187 std::swap(Base, Offset); 7188 7189 // Post-indexed load / store update the base pointer. 7190 if (Ptr != Base) 7191 return false; 7192 } 7193 7194 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 7195 return true; 7196} 7197 7198void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 7199 const APInt &Mask, 7200 APInt &KnownZero, 7201 APInt &KnownOne, 7202 const SelectionDAG &DAG, 7203 unsigned Depth) const { 7204 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 7205 switch (Op.getOpcode()) { 7206 default: break; 7207 case ARMISD::CMOV: { 7208 // Bits are known zero/one if known on the LHS and RHS. 7209 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 7210 if (KnownZero == 0 && KnownOne == 0) return; 7211 7212 APInt KnownZeroRHS, KnownOneRHS; 7213 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 7214 KnownZeroRHS, KnownOneRHS, Depth+1); 7215 KnownZero &= KnownZeroRHS; 7216 KnownOne &= KnownOneRHS; 7217 return; 7218 } 7219 } 7220} 7221 7222//===----------------------------------------------------------------------===// 7223// ARM Inline Assembly Support 7224//===----------------------------------------------------------------------===// 7225 7226bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 7227 // Looking for "rev" which is V6+. 7228 if (!Subtarget->hasV6Ops()) 7229 return false; 7230 7231 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 7232 std::string AsmStr = IA->getAsmString(); 7233 SmallVector<StringRef, 4> AsmPieces; 7234 SplitString(AsmStr, AsmPieces, ";\n"); 7235 7236 switch (AsmPieces.size()) { 7237 default: return false; 7238 case 1: 7239 AsmStr = AsmPieces[0]; 7240 AsmPieces.clear(); 7241 SplitString(AsmStr, AsmPieces, " \t,"); 7242 7243 // rev $0, $1 7244 if (AsmPieces.size() == 3 && 7245 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 7246 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 7247 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 7248 if (Ty && Ty->getBitWidth() == 32) 7249 return IntrinsicLowering::LowerToByteSwap(CI); 7250 } 7251 break; 7252 } 7253 7254 return false; 7255} 7256 7257/// getConstraintType - Given a constraint letter, return the type of 7258/// constraint it is for this target. 7259ARMTargetLowering::ConstraintType 7260ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 7261 if (Constraint.size() == 1) { 7262 switch (Constraint[0]) { 7263 default: break; 7264 case 'l': return C_RegisterClass; 7265 case 'w': return C_RegisterClass; 7266 } 7267 } 7268 return TargetLowering::getConstraintType(Constraint); 7269} 7270 7271/// Examine constraint type and operand type and determine a weight value. 7272/// This object must already have been set up with the operand type 7273/// and the current alternative constraint selected. 7274TargetLowering::ConstraintWeight 7275ARMTargetLowering::getSingleConstraintMatchWeight( 7276 AsmOperandInfo &info, const char *constraint) const { 7277 ConstraintWeight weight = CW_Invalid; 7278 Value *CallOperandVal = info.CallOperandVal; 7279 // If we don't have a value, we can't do a match, 7280 // but allow it at the lowest weight. 7281 if (CallOperandVal == NULL) 7282 return CW_Default; 7283 const Type *type = CallOperandVal->getType(); 7284 // Look at the constraint type. 7285 switch (*constraint) { 7286 default: 7287 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 7288 break; 7289 case 'l': 7290 if (type->isIntegerTy()) { 7291 if (Subtarget->isThumb()) 7292 weight = CW_SpecificReg; 7293 else 7294 weight = CW_Register; 7295 } 7296 break; 7297 case 'w': 7298 if (type->isFloatingPointTy()) 7299 weight = CW_Register; 7300 break; 7301 } 7302 return weight; 7303} 7304 7305std::pair<unsigned, const TargetRegisterClass*> 7306ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 7307 EVT VT) const { 7308 if (Constraint.size() == 1) { 7309 // GCC ARM Constraint Letters 7310 switch (Constraint[0]) { 7311 case 'l': 7312 if (Subtarget->isThumb()) 7313 return std::make_pair(0U, ARM::tGPRRegisterClass); 7314 else 7315 return std::make_pair(0U, ARM::GPRRegisterClass); 7316 case 'r': 7317 return std::make_pair(0U, ARM::GPRRegisterClass); 7318 case 'w': 7319 if (VT == MVT::f32) 7320 return std::make_pair(0U, ARM::SPRRegisterClass); 7321 if (VT.getSizeInBits() == 64) 7322 return std::make_pair(0U, ARM::DPRRegisterClass); 7323 if (VT.getSizeInBits() == 128) 7324 return std::make_pair(0U, ARM::QPRRegisterClass); 7325 break; 7326 } 7327 } 7328 if (StringRef("{cc}").equals_lower(Constraint)) 7329 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 7330 7331 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 7332} 7333 7334std::vector<unsigned> ARMTargetLowering:: 7335getRegClassForInlineAsmConstraint(const std::string &Constraint, 7336 EVT VT) const { 7337 if (Constraint.size() != 1) 7338 return std::vector<unsigned>(); 7339 7340 switch (Constraint[0]) { // GCC ARM Constraint Letters 7341 default: break; 7342 case 'l': 7343 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 7344 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 7345 0); 7346 case 'r': 7347 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 7348 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 7349 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 7350 ARM::R12, ARM::LR, 0); 7351 case 'w': 7352 if (VT == MVT::f32) 7353 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 7354 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 7355 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 7356 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 7357 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 7358 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 7359 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 7360 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 7361 if (VT.getSizeInBits() == 64) 7362 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 7363 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 7364 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 7365 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 7366 if (VT.getSizeInBits() == 128) 7367 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 7368 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 7369 break; 7370 } 7371 7372 return std::vector<unsigned>(); 7373} 7374 7375/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 7376/// vector. If it is invalid, don't add anything to Ops. 7377void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 7378 char Constraint, 7379 std::vector<SDValue>&Ops, 7380 SelectionDAG &DAG) const { 7381 SDValue Result(0, 0); 7382 7383 switch (Constraint) { 7384 default: break; 7385 case 'I': case 'J': case 'K': case 'L': 7386 case 'M': case 'N': case 'O': 7387 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 7388 if (!C) 7389 return; 7390 7391 int64_t CVal64 = C->getSExtValue(); 7392 int CVal = (int) CVal64; 7393 // None of these constraints allow values larger than 32 bits. Check 7394 // that the value fits in an int. 7395 if (CVal != CVal64) 7396 return; 7397 7398 switch (Constraint) { 7399 case 'I': 7400 if (Subtarget->isThumb1Only()) { 7401 // This must be a constant between 0 and 255, for ADD 7402 // immediates. 7403 if (CVal >= 0 && CVal <= 255) 7404 break; 7405 } else if (Subtarget->isThumb2()) { 7406 // A constant that can be used as an immediate value in a 7407 // data-processing instruction. 7408 if (ARM_AM::getT2SOImmVal(CVal) != -1) 7409 break; 7410 } else { 7411 // A constant that can be used as an immediate value in a 7412 // data-processing instruction. 7413 if (ARM_AM::getSOImmVal(CVal) != -1) 7414 break; 7415 } 7416 return; 7417 7418 case 'J': 7419 if (Subtarget->isThumb()) { // FIXME thumb2 7420 // This must be a constant between -255 and -1, for negated ADD 7421 // immediates. This can be used in GCC with an "n" modifier that 7422 // prints the negated value, for use with SUB instructions. It is 7423 // not useful otherwise but is implemented for compatibility. 7424 if (CVal >= -255 && CVal <= -1) 7425 break; 7426 } else { 7427 // This must be a constant between -4095 and 4095. It is not clear 7428 // what this constraint is intended for. Implemented for 7429 // compatibility with GCC. 7430 if (CVal >= -4095 && CVal <= 4095) 7431 break; 7432 } 7433 return; 7434 7435 case 'K': 7436 if (Subtarget->isThumb1Only()) { 7437 // A 32-bit value where only one byte has a nonzero value. Exclude 7438 // zero to match GCC. This constraint is used by GCC internally for 7439 // constants that can be loaded with a move/shift combination. 7440 // It is not useful otherwise but is implemented for compatibility. 7441 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 7442 break; 7443 } else if (Subtarget->isThumb2()) { 7444 // A constant whose bitwise inverse can be used as an immediate 7445 // value in a data-processing instruction. This can be used in GCC 7446 // with a "B" modifier that prints the inverted value, for use with 7447 // BIC and MVN instructions. It is not useful otherwise but is 7448 // implemented for compatibility. 7449 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 7450 break; 7451 } else { 7452 // A constant whose bitwise inverse can be used as an immediate 7453 // value in a data-processing instruction. This can be used in GCC 7454 // with a "B" modifier that prints the inverted value, for use with 7455 // BIC and MVN instructions. It is not useful otherwise but is 7456 // implemented for compatibility. 7457 if (ARM_AM::getSOImmVal(~CVal) != -1) 7458 break; 7459 } 7460 return; 7461 7462 case 'L': 7463 if (Subtarget->isThumb1Only()) { 7464 // This must be a constant between -7 and 7, 7465 // for 3-operand ADD/SUB immediate instructions. 7466 if (CVal >= -7 && CVal < 7) 7467 break; 7468 } else if (Subtarget->isThumb2()) { 7469 // A constant whose negation can be used as an immediate value in a 7470 // data-processing instruction. This can be used in GCC with an "n" 7471 // modifier that prints the negated value, for use with SUB 7472 // instructions. It is not useful otherwise but is implemented for 7473 // compatibility. 7474 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 7475 break; 7476 } else { 7477 // A constant whose negation can be used as an immediate value in a 7478 // data-processing instruction. This can be used in GCC with an "n" 7479 // modifier that prints the negated value, for use with SUB 7480 // instructions. It is not useful otherwise but is implemented for 7481 // compatibility. 7482 if (ARM_AM::getSOImmVal(-CVal) != -1) 7483 break; 7484 } 7485 return; 7486 7487 case 'M': 7488 if (Subtarget->isThumb()) { // FIXME thumb2 7489 // This must be a multiple of 4 between 0 and 1020, for 7490 // ADD sp + immediate. 7491 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 7492 break; 7493 } else { 7494 // A power of two or a constant between 0 and 32. This is used in 7495 // GCC for the shift amount on shifted register operands, but it is 7496 // useful in general for any shift amounts. 7497 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 7498 break; 7499 } 7500 return; 7501 7502 case 'N': 7503 if (Subtarget->isThumb()) { // FIXME thumb2 7504 // This must be a constant between 0 and 31, for shift amounts. 7505 if (CVal >= 0 && CVal <= 31) 7506 break; 7507 } 7508 return; 7509 7510 case 'O': 7511 if (Subtarget->isThumb()) { // FIXME thumb2 7512 // This must be a multiple of 4 between -508 and 508, for 7513 // ADD/SUB sp = sp + immediate. 7514 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 7515 break; 7516 } 7517 return; 7518 } 7519 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 7520 break; 7521 } 7522 7523 if (Result.getNode()) { 7524 Ops.push_back(Result); 7525 return; 7526 } 7527 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7528} 7529 7530bool 7531ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 7532 // The ARM target isn't yet aware of offsets. 7533 return false; 7534} 7535 7536int ARM::getVFPf32Imm(const APFloat &FPImm) { 7537 APInt Imm = FPImm.bitcastToAPInt(); 7538 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 7539 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 7540 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 7541 7542 // We can handle 4 bits of mantissa. 7543 // mantissa = (16+UInt(e:f:g:h))/16. 7544 if (Mantissa & 0x7ffff) 7545 return -1; 7546 Mantissa >>= 19; 7547 if ((Mantissa & 0xf) != Mantissa) 7548 return -1; 7549 7550 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 7551 if (Exp < -3 || Exp > 4) 7552 return -1; 7553 Exp = ((Exp+3) & 0x7) ^ 4; 7554 7555 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 7556} 7557 7558int ARM::getVFPf64Imm(const APFloat &FPImm) { 7559 APInt Imm = FPImm.bitcastToAPInt(); 7560 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 7561 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 7562 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 7563 7564 // We can handle 4 bits of mantissa. 7565 // mantissa = (16+UInt(e:f:g:h))/16. 7566 if (Mantissa & 0xffffffffffffLL) 7567 return -1; 7568 Mantissa >>= 48; 7569 if ((Mantissa & 0xf) != Mantissa) 7570 return -1; 7571 7572 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 7573 if (Exp < -3 || Exp > 4) 7574 return -1; 7575 Exp = ((Exp+3) & 0x7) ^ 4; 7576 7577 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 7578} 7579 7580bool ARM::isBitFieldInvertedMask(unsigned v) { 7581 if (v == 0xffffffff) 7582 return 0; 7583 // there can be 1's on either or both "outsides", all the "inside" 7584 // bits must be 0's 7585 unsigned int lsb = 0, msb = 31; 7586 while (v & (1 << msb)) --msb; 7587 while (v & (1 << lsb)) ++lsb; 7588 for (unsigned int i = lsb; i <= msb; ++i) { 7589 if (v & (1 << i)) 7590 return 0; 7591 } 7592 return 1; 7593} 7594 7595/// isFPImmLegal - Returns true if the target can instruction select the 7596/// specified FP immediate natively. If false, the legalizer will 7597/// materialize the FP immediate as a load from a constant pool. 7598bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 7599 if (!Subtarget->hasVFP3()) 7600 return false; 7601 if (VT == MVT::f32) 7602 return ARM::getVFPf32Imm(Imm) != -1; 7603 if (VT == MVT::f64) 7604 return ARM::getVFPf64Imm(Imm) != -1; 7605 return false; 7606} 7607 7608/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 7609/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 7610/// specified in the intrinsic calls. 7611bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 7612 const CallInst &I, 7613 unsigned Intrinsic) const { 7614 switch (Intrinsic) { 7615 case Intrinsic::arm_neon_vld1: 7616 case Intrinsic::arm_neon_vld2: 7617 case Intrinsic::arm_neon_vld3: 7618 case Intrinsic::arm_neon_vld4: 7619 case Intrinsic::arm_neon_vld2lane: 7620 case Intrinsic::arm_neon_vld3lane: 7621 case Intrinsic::arm_neon_vld4lane: { 7622 Info.opc = ISD::INTRINSIC_W_CHAIN; 7623 // Conservatively set memVT to the entire set of vectors loaded. 7624 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 7625 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7626 Info.ptrVal = I.getArgOperand(0); 7627 Info.offset = 0; 7628 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 7629 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 7630 Info.vol = false; // volatile loads with NEON intrinsics not supported 7631 Info.readMem = true; 7632 Info.writeMem = false; 7633 return true; 7634 } 7635 case Intrinsic::arm_neon_vst1: 7636 case Intrinsic::arm_neon_vst2: 7637 case Intrinsic::arm_neon_vst3: 7638 case Intrinsic::arm_neon_vst4: 7639 case Intrinsic::arm_neon_vst2lane: 7640 case Intrinsic::arm_neon_vst3lane: 7641 case Intrinsic::arm_neon_vst4lane: { 7642 Info.opc = ISD::INTRINSIC_VOID; 7643 // Conservatively set memVT to the entire set of vectors stored. 7644 unsigned NumElts = 0; 7645 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 7646 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 7647 if (!ArgTy->isVectorTy()) 7648 break; 7649 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 7650 } 7651 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7652 Info.ptrVal = I.getArgOperand(0); 7653 Info.offset = 0; 7654 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 7655 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 7656 Info.vol = false; // volatile stores with NEON intrinsics not supported 7657 Info.readMem = false; 7658 Info.writeMem = true; 7659 return true; 7660 } 7661 default: 7662 break; 7663 } 7664 7665 return false; 7666} 7667