ARMISelLowering.cpp revision 4815d56bb2c356a610f46753c5f1cefafa113b21
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMCallingConv.h" 18#include "ARMConstantPoolValue.h" 19#include "ARMISelLowering.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMRegisterInfo.h" 23#include "ARMSubtarget.h" 24#include "ARMTargetMachine.h" 25#include "ARMTargetObjectFile.h" 26#include "MCTargetDesc/ARMAddressingModes.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/PseudoSourceValue.h" 43#include "llvm/CodeGen/SelectionDAG.h" 44#include "llvm/MC/MCSectionMachO.h" 45#include "llvm/Target/TargetOptions.h" 46#include "llvm/ADT/VectorExtras.h" 47#include "llvm/ADT/StringExtras.h" 48#include "llvm/ADT/Statistic.h" 49#include "llvm/Support/CommandLine.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53#include <sstream> 54using namespace llvm; 55 56STATISTIC(NumTailCalls, "Number of tail calls"); 57STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 58 59// This option should go away when tail calls fully work. 60static cl::opt<bool> 61EnableARMTailCalls("arm-tail-calls", cl::Hidden, 62 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 63 cl::init(false)); 64 65cl::opt<bool> 66EnableARMLongCalls("arm-long-calls", cl::Hidden, 67 cl::desc("Generate calls via indirect call instructions"), 68 cl::init(false)); 69 70static cl::opt<bool> 71ARMInterworking("arm-interworking", cl::Hidden, 72 cl::desc("Enable / disable ARM interworking (for debugging only)"), 73 cl::init(true)); 74 75namespace llvm { 76 class ARMCCState : public CCState { 77 public: 78 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 79 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 80 LLVMContext &C, ParmContext PC) 81 : CCState(CC, isVarArg, MF, TM, locs, C) { 82 assert(((PC == Call) || (PC == Prologue)) && 83 "ARMCCState users must specify whether their context is call" 84 "or prologue generation."); 85 CallOrPrologue = PC; 86 } 87 }; 88} 89 90// The APCS parameter registers. 91static const unsigned GPRArgRegs[] = { 92 ARM::R0, ARM::R1, ARM::R2, ARM::R3 93}; 94 95void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 96 EVT PromotedBitwiseVT) { 97 if (VT != PromotedLdStVT) { 98 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 99 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 100 PromotedLdStVT.getSimpleVT()); 101 102 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 103 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 104 PromotedLdStVT.getSimpleVT()); 105 } 106 107 EVT ElemTy = VT.getVectorElementType(); 108 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 109 setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom); 110 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 111 if (ElemTy != MVT::i32) { 112 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 113 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 114 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 115 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 116 } 117 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 118 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 119 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 120 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 121 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 122 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 123 if (VT.isInteger()) { 124 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 125 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 126 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 127 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 128 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 129 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 130 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 131 setTruncStoreAction(VT.getSimpleVT(), 132 (MVT::SimpleValueType)InnerVT, Expand); 133 } 134 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 135 136 // Promote all bit-wise operations. 137 if (VT.isInteger() && VT != PromotedBitwiseVT) { 138 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 139 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 140 PromotedBitwiseVT.getSimpleVT()); 141 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 142 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 143 PromotedBitwiseVT.getSimpleVT()); 144 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 145 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 146 PromotedBitwiseVT.getSimpleVT()); 147 } 148 149 // Neon does not support vector divide/remainder operations. 150 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 151 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 152 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 153 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 154 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 155 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 156} 157 158void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 159 addRegisterClass(VT, ARM::DPRRegisterClass); 160 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 161} 162 163void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 164 addRegisterClass(VT, ARM::QPRRegisterClass); 165 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 166} 167 168static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 169 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 170 return new TargetLoweringObjectFileMachO(); 171 172 return new ARMElfTargetObjectFile(); 173} 174 175ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 176 : TargetLowering(TM, createTLOF(TM)) { 177 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 178 RegInfo = TM.getRegisterInfo(); 179 Itins = TM.getInstrItineraryData(); 180 181 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 182 183 if (Subtarget->isTargetDarwin()) { 184 // Uses VFP for Thumb libfuncs if available. 185 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 186 // Single-precision floating-point arithmetic. 187 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 188 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 189 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 190 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 191 192 // Double-precision floating-point arithmetic. 193 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 194 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 195 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 196 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 197 198 // Single-precision comparisons. 199 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 200 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 201 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 202 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 203 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 204 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 205 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 206 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 207 208 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 210 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 216 217 // Double-precision comparisons. 218 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 219 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 220 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 221 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 222 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 223 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 224 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 225 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 226 227 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 228 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 229 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 230 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 231 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 232 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 233 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 234 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 235 236 // Floating-point to integer conversions. 237 // i64 conversions are done via library routines even when generating VFP 238 // instructions, so use the same ones. 239 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 240 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 241 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 242 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 243 244 // Conversions between floating types. 245 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 246 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 247 248 // Integer to floating-point conversions. 249 // i64 conversions are done via library routines even when generating VFP 250 // instructions, so use the same ones. 251 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 252 // e.g., __floatunsidf vs. __floatunssidfvfp. 253 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 254 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 255 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 256 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 257 } 258 } 259 260 // These libcalls are not available in 32-bit. 261 setLibcallName(RTLIB::SHL_I128, 0); 262 setLibcallName(RTLIB::SRL_I128, 0); 263 setLibcallName(RTLIB::SRA_I128, 0); 264 265 if (Subtarget->isAAPCS_ABI()) { 266 // Double-precision floating-point arithmetic helper functions 267 // RTABI chapter 4.1.2, Table 2 268 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 269 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 270 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 271 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 272 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 273 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 276 277 // Double-precision floating-point comparison helper functions 278 // RTABI chapter 4.1.2, Table 3 279 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 280 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 281 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 282 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 283 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 284 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 285 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 286 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 287 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 288 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 289 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 290 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 291 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 292 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 293 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 294 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 295 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 297 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 298 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 299 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 300 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 301 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 302 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 303 304 // Single-precision floating-point arithmetic helper functions 305 // RTABI chapter 4.1.2, Table 4 306 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 307 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 308 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 309 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 310 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 311 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 314 315 // Single-precision floating-point comparison helper functions 316 // RTABI chapter 4.1.2, Table 5 317 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 318 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 319 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 320 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 321 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 322 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 323 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 324 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 325 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 326 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 327 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 328 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 329 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 330 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 331 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 332 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 333 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 335 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 341 342 // Floating-point to integer conversions. 343 // RTABI chapter 4.1.2, Table 6 344 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 345 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 346 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 347 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 348 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 349 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 350 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 351 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 352 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 353 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 360 361 // Conversions between floating types. 362 // RTABI chapter 4.1.2, Table 7 363 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 364 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 365 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 366 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 367 368 // Integer to floating-point conversions. 369 // RTABI chapter 4.1.2, Table 8 370 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 371 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 372 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 373 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 374 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 375 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 376 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 377 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 378 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 380 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 384 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 385 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 386 387 // Long long helper functions 388 // RTABI chapter 4.2, Table 9 389 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 390 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 391 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 392 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 393 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 394 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 395 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 396 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 397 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 399 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 400 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 401 402 // Integer division functions 403 // RTABI chapter 4.3.1 404 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 405 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 406 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 407 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 408 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 409 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 410 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 411 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 412 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 413 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 414 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 415 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 416 417 // Memory operations 418 // RTABI chapter 4.3.4 419 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 420 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 421 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 422 } 423 424 if (Subtarget->isThumb1Only()) 425 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 426 else 427 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 428 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 429 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 430 if (!Subtarget->isFPOnlySP()) 431 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 432 433 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 434 } 435 436 if (Subtarget->hasNEON()) { 437 addDRTypeForNEON(MVT::v2f32); 438 addDRTypeForNEON(MVT::v8i8); 439 addDRTypeForNEON(MVT::v4i16); 440 addDRTypeForNEON(MVT::v2i32); 441 addDRTypeForNEON(MVT::v1i64); 442 443 addQRTypeForNEON(MVT::v4f32); 444 addQRTypeForNEON(MVT::v2f64); 445 addQRTypeForNEON(MVT::v16i8); 446 addQRTypeForNEON(MVT::v8i16); 447 addQRTypeForNEON(MVT::v4i32); 448 addQRTypeForNEON(MVT::v2i64); 449 450 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 451 // neither Neon nor VFP support any arithmetic operations on it. 452 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 453 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 454 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 455 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 456 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 457 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 458 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 459 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 460 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 461 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 462 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 463 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 464 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 465 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 466 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 467 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 468 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 469 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 470 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 471 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 472 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 473 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 474 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 475 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 476 477 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 478 479 // Neon does not support some operations on v1i64 and v2i64 types. 480 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 481 // Custom handling for some quad-vector types to detect VMULL. 482 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 483 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 484 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 485 // Custom handling for some vector types to avoid expensive expansions 486 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 487 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 488 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 489 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 490 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 491 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 492 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 493 // a destination type that is wider than the source. 494 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 495 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 496 497 setTargetDAGCombine(ISD::INTRINSIC_VOID); 498 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 499 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 500 setTargetDAGCombine(ISD::SHL); 501 setTargetDAGCombine(ISD::SRL); 502 setTargetDAGCombine(ISD::SRA); 503 setTargetDAGCombine(ISD::SIGN_EXTEND); 504 setTargetDAGCombine(ISD::ZERO_EXTEND); 505 setTargetDAGCombine(ISD::ANY_EXTEND); 506 setTargetDAGCombine(ISD::SELECT_CC); 507 setTargetDAGCombine(ISD::BUILD_VECTOR); 508 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 509 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 510 setTargetDAGCombine(ISD::STORE); 511 setTargetDAGCombine(ISD::FP_TO_SINT); 512 setTargetDAGCombine(ISD::FP_TO_UINT); 513 setTargetDAGCombine(ISD::FDIV); 514 } 515 516 computeRegisterProperties(); 517 518 // ARM does not have f32 extending load. 519 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 520 521 // ARM does not have i1 sign extending load. 522 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 523 524 // ARM supports all 4 flavors of integer indexed load / store. 525 if (!Subtarget->isThumb1Only()) { 526 for (unsigned im = (unsigned)ISD::PRE_INC; 527 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 528 setIndexedLoadAction(im, MVT::i1, Legal); 529 setIndexedLoadAction(im, MVT::i8, Legal); 530 setIndexedLoadAction(im, MVT::i16, Legal); 531 setIndexedLoadAction(im, MVT::i32, Legal); 532 setIndexedStoreAction(im, MVT::i1, Legal); 533 setIndexedStoreAction(im, MVT::i8, Legal); 534 setIndexedStoreAction(im, MVT::i16, Legal); 535 setIndexedStoreAction(im, MVT::i32, Legal); 536 } 537 } 538 539 // i64 operation support. 540 setOperationAction(ISD::MUL, MVT::i64, Expand); 541 setOperationAction(ISD::MULHU, MVT::i32, Expand); 542 if (Subtarget->isThumb1Only()) { 543 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 544 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 545 } 546 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 547 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 548 setOperationAction(ISD::MULHS, MVT::i32, Expand); 549 550 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 551 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 552 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 553 setOperationAction(ISD::SRL, MVT::i64, Custom); 554 setOperationAction(ISD::SRA, MVT::i64, Custom); 555 556 if (!Subtarget->isThumb1Only()) { 557 // FIXME: We should do this for Thumb1 as well. 558 setOperationAction(ISD::ADDC, MVT::i32, Custom); 559 setOperationAction(ISD::ADDE, MVT::i32, Custom); 560 setOperationAction(ISD::SUBC, MVT::i32, Custom); 561 setOperationAction(ISD::SUBE, MVT::i32, Custom); 562 } 563 564 // ARM does not have ROTL. 565 setOperationAction(ISD::ROTL, MVT::i32, Expand); 566 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 567 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 568 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 569 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 570 571 // Only ARMv6 has BSWAP. 572 if (!Subtarget->hasV6Ops()) 573 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 574 575 // These are expanded into libcalls. 576 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 577 // v7M has a hardware divider 578 setOperationAction(ISD::SDIV, MVT::i32, Expand); 579 setOperationAction(ISD::UDIV, MVT::i32, Expand); 580 } 581 setOperationAction(ISD::SREM, MVT::i32, Expand); 582 setOperationAction(ISD::UREM, MVT::i32, Expand); 583 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 584 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 585 586 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 587 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 588 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 589 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 590 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 591 592 setOperationAction(ISD::TRAP, MVT::Other, Legal); 593 594 // Use the default implementation. 595 setOperationAction(ISD::VASTART, MVT::Other, Custom); 596 setOperationAction(ISD::VAARG, MVT::Other, Expand); 597 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 598 setOperationAction(ISD::VAEND, MVT::Other, Expand); 599 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 600 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 601 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 602 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 603 setExceptionPointerRegister(ARM::R0); 604 setExceptionSelectorRegister(ARM::R1); 605 606 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 607 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 608 // the default expansion. 609 // FIXME: This should be checking for v6k, not just v6. 610 if (Subtarget->hasDataBarrier() || 611 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 612 // membarrier needs custom lowering; the rest are legal and handled 613 // normally. 614 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 615 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 616 // Custom lowering for 64-bit ops 617 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 618 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 619 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 620 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 621 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 622 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 623 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 624 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 625 setInsertFencesForAtomic(true); 626 } else { 627 // Set them all for expansion, which will force libcalls. 628 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 629 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 630 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 631 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 632 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 633 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 634 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 635 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 636 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 637 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 638 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 639 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 640 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 641 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 642 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 643 // Unordered/Monotonic case. 644 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 645 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 646 // Since the libcalls include locking, fold in the fences 647 setShouldFoldAtomicFences(true); 648 } 649 650 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 651 652 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 653 if (!Subtarget->hasV6Ops()) { 654 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 655 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 656 } 657 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 658 659 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 660 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 661 // iff target supports vfp2. 662 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 663 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 664 } 665 666 // We want to custom lower some of our intrinsics. 667 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 668 if (Subtarget->isTargetDarwin()) { 669 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 670 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 671 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 672 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 673 } 674 675 setOperationAction(ISD::SETCC, MVT::i32, Expand); 676 setOperationAction(ISD::SETCC, MVT::f32, Expand); 677 setOperationAction(ISD::SETCC, MVT::f64, Expand); 678 setOperationAction(ISD::SELECT, MVT::i32, Custom); 679 setOperationAction(ISD::SELECT, MVT::f32, Custom); 680 setOperationAction(ISD::SELECT, MVT::f64, Custom); 681 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 682 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 683 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 684 685 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 686 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 687 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 688 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 689 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 690 691 // We don't support sin/cos/fmod/copysign/pow 692 setOperationAction(ISD::FSIN, MVT::f64, Expand); 693 setOperationAction(ISD::FSIN, MVT::f32, Expand); 694 setOperationAction(ISD::FCOS, MVT::f32, Expand); 695 setOperationAction(ISD::FCOS, MVT::f64, Expand); 696 setOperationAction(ISD::FREM, MVT::f64, Expand); 697 setOperationAction(ISD::FREM, MVT::f32, Expand); 698 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 699 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 700 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 701 } 702 setOperationAction(ISD::FPOW, MVT::f64, Expand); 703 setOperationAction(ISD::FPOW, MVT::f32, Expand); 704 705 setOperationAction(ISD::FMA, MVT::f64, Expand); 706 setOperationAction(ISD::FMA, MVT::f32, Expand); 707 708 // Various VFP goodness 709 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 710 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 711 if (Subtarget->hasVFP2()) { 712 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 713 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 714 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 715 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 716 } 717 // Special handling for half-precision FP. 718 if (!Subtarget->hasFP16()) { 719 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 720 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 721 } 722 } 723 724 // We have target-specific dag combine patterns for the following nodes: 725 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 726 setTargetDAGCombine(ISD::ADD); 727 setTargetDAGCombine(ISD::SUB); 728 setTargetDAGCombine(ISD::MUL); 729 730 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 731 setTargetDAGCombine(ISD::OR); 732 if (Subtarget->hasNEON()) 733 setTargetDAGCombine(ISD::AND); 734 735 setStackPointerRegisterToSaveRestore(ARM::SP); 736 737 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 738 setSchedulingPreference(Sched::RegPressure); 739 else 740 setSchedulingPreference(Sched::Hybrid); 741 742 //// temporary - rewrite interface to use type 743 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 744 745 // On ARM arguments smaller than 4 bytes are extended, so all arguments 746 // are at least 4 bytes aligned. 747 setMinStackArgumentAlignment(4); 748 749 benefitFromCodePlacementOpt = true; 750 751 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 752} 753 754// FIXME: It might make sense to define the representative register class as the 755// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 756// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 757// SPR's representative would be DPR_VFP2. This should work well if register 758// pressure tracking were modified such that a register use would increment the 759// pressure of the register class's representative and all of it's super 760// classes' representatives transitively. We have not implemented this because 761// of the difficulty prior to coalescing of modeling operand register classes 762// due to the common occurrence of cross class copies and subregister insertions 763// and extractions. 764std::pair<const TargetRegisterClass*, uint8_t> 765ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 766 const TargetRegisterClass *RRC = 0; 767 uint8_t Cost = 1; 768 switch (VT.getSimpleVT().SimpleTy) { 769 default: 770 return TargetLowering::findRepresentativeClass(VT); 771 // Use DPR as representative register class for all floating point 772 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 773 // the cost is 1 for both f32 and f64. 774 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 775 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 776 RRC = ARM::DPRRegisterClass; 777 // When NEON is used for SP, only half of the register file is available 778 // because operations that define both SP and DP results will be constrained 779 // to the VFP2 class (D0-D15). We currently model this constraint prior to 780 // coalescing by double-counting the SP regs. See the FIXME above. 781 if (Subtarget->useNEONForSinglePrecisionFP()) 782 Cost = 2; 783 break; 784 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 785 case MVT::v4f32: case MVT::v2f64: 786 RRC = ARM::DPRRegisterClass; 787 Cost = 2; 788 break; 789 case MVT::v4i64: 790 RRC = ARM::DPRRegisterClass; 791 Cost = 4; 792 break; 793 case MVT::v8i64: 794 RRC = ARM::DPRRegisterClass; 795 Cost = 8; 796 break; 797 } 798 return std::make_pair(RRC, Cost); 799} 800 801const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 802 switch (Opcode) { 803 default: return 0; 804 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 805 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 806 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 807 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 808 case ARMISD::CALL: return "ARMISD::CALL"; 809 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 810 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 811 case ARMISD::tCALL: return "ARMISD::tCALL"; 812 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 813 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 814 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 815 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 816 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 817 case ARMISD::CMP: return "ARMISD::CMP"; 818 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 819 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 820 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 821 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 822 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 823 case ARMISD::CMOV: return "ARMISD::CMOV"; 824 825 case ARMISD::RBIT: return "ARMISD::RBIT"; 826 827 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 828 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 829 case ARMISD::SITOF: return "ARMISD::SITOF"; 830 case ARMISD::UITOF: return "ARMISD::UITOF"; 831 832 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 833 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 834 case ARMISD::RRX: return "ARMISD::RRX"; 835 836 case ARMISD::ADDC: return "ARMISD::ADDC"; 837 case ARMISD::ADDE: return "ARMISD::ADDE"; 838 case ARMISD::SUBC: return "ARMISD::SUBC"; 839 case ARMISD::SUBE: return "ARMISD::SUBE"; 840 841 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 842 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 843 844 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 845 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 846 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 847 848 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 849 850 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 851 852 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 853 854 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 855 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 856 857 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 858 859 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 860 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 861 case ARMISD::VCGE: return "ARMISD::VCGE"; 862 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 863 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 864 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 865 case ARMISD::VCGT: return "ARMISD::VCGT"; 866 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 867 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 868 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 869 case ARMISD::VTST: return "ARMISD::VTST"; 870 871 case ARMISD::VSHL: return "ARMISD::VSHL"; 872 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 873 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 874 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 875 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 876 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 877 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 878 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 879 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 880 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 881 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 882 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 883 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 884 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 885 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 886 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 887 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 888 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 889 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 890 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 891 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 892 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 893 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 894 case ARMISD::VDUP: return "ARMISD::VDUP"; 895 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 896 case ARMISD::VEXT: return "ARMISD::VEXT"; 897 case ARMISD::VREV64: return "ARMISD::VREV64"; 898 case ARMISD::VREV32: return "ARMISD::VREV32"; 899 case ARMISD::VREV16: return "ARMISD::VREV16"; 900 case ARMISD::VZIP: return "ARMISD::VZIP"; 901 case ARMISD::VUZP: return "ARMISD::VUZP"; 902 case ARMISD::VTRN: return "ARMISD::VTRN"; 903 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 904 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 905 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 906 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 907 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 908 case ARMISD::FMAX: return "ARMISD::FMAX"; 909 case ARMISD::FMIN: return "ARMISD::FMIN"; 910 case ARMISD::BFI: return "ARMISD::BFI"; 911 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 912 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 913 case ARMISD::VBSL: return "ARMISD::VBSL"; 914 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 915 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 916 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 917 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 918 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 919 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 920 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 921 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 922 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 923 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 924 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 925 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 926 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 927 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 928 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 929 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 930 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 931 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 932 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 933 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 934 } 935} 936 937EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 938 if (!VT.isVector()) return getPointerTy(); 939 return VT.changeVectorElementTypeToInteger(); 940} 941 942/// getRegClassFor - Return the register class that should be used for the 943/// specified value type. 944TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 945 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 946 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 947 // load / store 4 to 8 consecutive D registers. 948 if (Subtarget->hasNEON()) { 949 if (VT == MVT::v4i64) 950 return ARM::QQPRRegisterClass; 951 else if (VT == MVT::v8i64) 952 return ARM::QQQQPRRegisterClass; 953 } 954 return TargetLowering::getRegClassFor(VT); 955} 956 957// Create a fast isel object. 958FastISel * 959ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 960 return ARM::createFastISel(funcInfo); 961} 962 963/// getMaximalGlobalOffset - Returns the maximal possible offset which can 964/// be used for loads / stores from the global. 965unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 966 return (Subtarget->isThumb1Only() ? 127 : 4095); 967} 968 969Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 970 unsigned NumVals = N->getNumValues(); 971 if (!NumVals) 972 return Sched::RegPressure; 973 974 for (unsigned i = 0; i != NumVals; ++i) { 975 EVT VT = N->getValueType(i); 976 if (VT == MVT::Glue || VT == MVT::Other) 977 continue; 978 if (VT.isFloatingPoint() || VT.isVector()) 979 return Sched::Latency; 980 } 981 982 if (!N->isMachineOpcode()) 983 return Sched::RegPressure; 984 985 // Load are scheduled for latency even if there instruction itinerary 986 // is not available. 987 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 988 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 989 990 if (MCID.getNumDefs() == 0) 991 return Sched::RegPressure; 992 if (!Itins->isEmpty() && 993 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 994 return Sched::Latency; 995 996 return Sched::RegPressure; 997} 998 999//===----------------------------------------------------------------------===// 1000// Lowering Code 1001//===----------------------------------------------------------------------===// 1002 1003/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1004static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1005 switch (CC) { 1006 default: llvm_unreachable("Unknown condition code!"); 1007 case ISD::SETNE: return ARMCC::NE; 1008 case ISD::SETEQ: return ARMCC::EQ; 1009 case ISD::SETGT: return ARMCC::GT; 1010 case ISD::SETGE: return ARMCC::GE; 1011 case ISD::SETLT: return ARMCC::LT; 1012 case ISD::SETLE: return ARMCC::LE; 1013 case ISD::SETUGT: return ARMCC::HI; 1014 case ISD::SETUGE: return ARMCC::HS; 1015 case ISD::SETULT: return ARMCC::LO; 1016 case ISD::SETULE: return ARMCC::LS; 1017 } 1018} 1019 1020/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1021static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1022 ARMCC::CondCodes &CondCode2) { 1023 CondCode2 = ARMCC::AL; 1024 switch (CC) { 1025 default: llvm_unreachable("Unknown FP condition!"); 1026 case ISD::SETEQ: 1027 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1028 case ISD::SETGT: 1029 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1030 case ISD::SETGE: 1031 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1032 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1033 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1034 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1035 case ISD::SETO: CondCode = ARMCC::VC; break; 1036 case ISD::SETUO: CondCode = ARMCC::VS; break; 1037 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1038 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1039 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1040 case ISD::SETLT: 1041 case ISD::SETULT: CondCode = ARMCC::LT; break; 1042 case ISD::SETLE: 1043 case ISD::SETULE: CondCode = ARMCC::LE; break; 1044 case ISD::SETNE: 1045 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1046 } 1047} 1048 1049//===----------------------------------------------------------------------===// 1050// Calling Convention Implementation 1051//===----------------------------------------------------------------------===// 1052 1053#include "ARMGenCallingConv.inc" 1054 1055/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1056/// given CallingConvention value. 1057CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1058 bool Return, 1059 bool isVarArg) const { 1060 switch (CC) { 1061 default: 1062 llvm_unreachable("Unsupported calling convention"); 1063 case CallingConv::Fast: 1064 if (Subtarget->hasVFP2() && !isVarArg) { 1065 if (!Subtarget->isAAPCS_ABI()) 1066 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1067 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1068 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1069 } 1070 // Fallthrough 1071 case CallingConv::C: { 1072 // Use target triple & subtarget features to do actual dispatch. 1073 if (!Subtarget->isAAPCS_ABI()) 1074 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1075 else if (Subtarget->hasVFP2() && 1076 FloatABIType == FloatABI::Hard && !isVarArg) 1077 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1078 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1079 } 1080 case CallingConv::ARM_AAPCS_VFP: 1081 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1082 case CallingConv::ARM_AAPCS: 1083 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1084 case CallingConv::ARM_APCS: 1085 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1086 } 1087} 1088 1089/// LowerCallResult - Lower the result values of a call into the 1090/// appropriate copies out of appropriate physical registers. 1091SDValue 1092ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1093 CallingConv::ID CallConv, bool isVarArg, 1094 const SmallVectorImpl<ISD::InputArg> &Ins, 1095 DebugLoc dl, SelectionDAG &DAG, 1096 SmallVectorImpl<SDValue> &InVals) const { 1097 1098 // Assign locations to each value returned by this call. 1099 SmallVector<CCValAssign, 16> RVLocs; 1100 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1101 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1102 CCInfo.AnalyzeCallResult(Ins, 1103 CCAssignFnForNode(CallConv, /* Return*/ true, 1104 isVarArg)); 1105 1106 // Copy all of the result registers out of their specified physreg. 1107 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1108 CCValAssign VA = RVLocs[i]; 1109 1110 SDValue Val; 1111 if (VA.needsCustom()) { 1112 // Handle f64 or half of a v2f64. 1113 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1114 InFlag); 1115 Chain = Lo.getValue(1); 1116 InFlag = Lo.getValue(2); 1117 VA = RVLocs[++i]; // skip ahead to next loc 1118 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1119 InFlag); 1120 Chain = Hi.getValue(1); 1121 InFlag = Hi.getValue(2); 1122 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1123 1124 if (VA.getLocVT() == MVT::v2f64) { 1125 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1126 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1127 DAG.getConstant(0, MVT::i32)); 1128 1129 VA = RVLocs[++i]; // skip ahead to next loc 1130 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1131 Chain = Lo.getValue(1); 1132 InFlag = Lo.getValue(2); 1133 VA = RVLocs[++i]; // skip ahead to next loc 1134 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1135 Chain = Hi.getValue(1); 1136 InFlag = Hi.getValue(2); 1137 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1138 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1139 DAG.getConstant(1, MVT::i32)); 1140 } 1141 } else { 1142 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1143 InFlag); 1144 Chain = Val.getValue(1); 1145 InFlag = Val.getValue(2); 1146 } 1147 1148 switch (VA.getLocInfo()) { 1149 default: llvm_unreachable("Unknown loc info!"); 1150 case CCValAssign::Full: break; 1151 case CCValAssign::BCvt: 1152 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1153 break; 1154 } 1155 1156 InVals.push_back(Val); 1157 } 1158 1159 return Chain; 1160} 1161 1162/// LowerMemOpCallTo - Store the argument to the stack. 1163SDValue 1164ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1165 SDValue StackPtr, SDValue Arg, 1166 DebugLoc dl, SelectionDAG &DAG, 1167 const CCValAssign &VA, 1168 ISD::ArgFlagsTy Flags) const { 1169 unsigned LocMemOffset = VA.getLocMemOffset(); 1170 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1171 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1172 return DAG.getStore(Chain, dl, Arg, PtrOff, 1173 MachinePointerInfo::getStack(LocMemOffset), 1174 false, false, 0); 1175} 1176 1177void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1178 SDValue Chain, SDValue &Arg, 1179 RegsToPassVector &RegsToPass, 1180 CCValAssign &VA, CCValAssign &NextVA, 1181 SDValue &StackPtr, 1182 SmallVector<SDValue, 8> &MemOpChains, 1183 ISD::ArgFlagsTy Flags) const { 1184 1185 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1186 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1187 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1188 1189 if (NextVA.isRegLoc()) 1190 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1191 else { 1192 assert(NextVA.isMemLoc()); 1193 if (StackPtr.getNode() == 0) 1194 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1195 1196 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1197 dl, DAG, NextVA, 1198 Flags)); 1199 } 1200} 1201 1202/// LowerCall - Lowering a call into a callseq_start <- 1203/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1204/// nodes. 1205SDValue 1206ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1207 CallingConv::ID CallConv, bool isVarArg, 1208 bool &isTailCall, 1209 const SmallVectorImpl<ISD::OutputArg> &Outs, 1210 const SmallVectorImpl<SDValue> &OutVals, 1211 const SmallVectorImpl<ISD::InputArg> &Ins, 1212 DebugLoc dl, SelectionDAG &DAG, 1213 SmallVectorImpl<SDValue> &InVals) const { 1214 MachineFunction &MF = DAG.getMachineFunction(); 1215 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1216 bool IsSibCall = false; 1217 // Temporarily disable tail calls so things don't break. 1218 if (!EnableARMTailCalls) 1219 isTailCall = false; 1220 if (isTailCall) { 1221 // Check if it's really possible to do a tail call. 1222 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1223 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1224 Outs, OutVals, Ins, DAG); 1225 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1226 // detected sibcalls. 1227 if (isTailCall) { 1228 ++NumTailCalls; 1229 IsSibCall = true; 1230 } 1231 } 1232 1233 // Analyze operands of the call, assigning locations to each operand. 1234 SmallVector<CCValAssign, 16> ArgLocs; 1235 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1236 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1237 CCInfo.AnalyzeCallOperands(Outs, 1238 CCAssignFnForNode(CallConv, /* Return*/ false, 1239 isVarArg)); 1240 1241 // Get a count of how many bytes are to be pushed on the stack. 1242 unsigned NumBytes = CCInfo.getNextStackOffset(); 1243 1244 // For tail calls, memory operands are available in our caller's stack. 1245 if (IsSibCall) 1246 NumBytes = 0; 1247 1248 // Adjust the stack pointer for the new arguments... 1249 // These operations are automatically eliminated by the prolog/epilog pass 1250 if (!IsSibCall) 1251 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1252 1253 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1254 1255 RegsToPassVector RegsToPass; 1256 SmallVector<SDValue, 8> MemOpChains; 1257 1258 // Walk the register/memloc assignments, inserting copies/loads. In the case 1259 // of tail call optimization, arguments are handled later. 1260 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1261 i != e; 1262 ++i, ++realArgIdx) { 1263 CCValAssign &VA = ArgLocs[i]; 1264 SDValue Arg = OutVals[realArgIdx]; 1265 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1266 bool isByVal = Flags.isByVal(); 1267 1268 // Promote the value if needed. 1269 switch (VA.getLocInfo()) { 1270 default: llvm_unreachable("Unknown loc info!"); 1271 case CCValAssign::Full: break; 1272 case CCValAssign::SExt: 1273 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1274 break; 1275 case CCValAssign::ZExt: 1276 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1277 break; 1278 case CCValAssign::AExt: 1279 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1280 break; 1281 case CCValAssign::BCvt: 1282 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1283 break; 1284 } 1285 1286 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1287 if (VA.needsCustom()) { 1288 if (VA.getLocVT() == MVT::v2f64) { 1289 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1290 DAG.getConstant(0, MVT::i32)); 1291 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1292 DAG.getConstant(1, MVT::i32)); 1293 1294 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1295 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1296 1297 VA = ArgLocs[++i]; // skip ahead to next loc 1298 if (VA.isRegLoc()) { 1299 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1300 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1301 } else { 1302 assert(VA.isMemLoc()); 1303 1304 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1305 dl, DAG, VA, Flags)); 1306 } 1307 } else { 1308 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1309 StackPtr, MemOpChains, Flags); 1310 } 1311 } else if (VA.isRegLoc()) { 1312 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1313 } else if (isByVal) { 1314 assert(VA.isMemLoc()); 1315 unsigned offset = 0; 1316 1317 // True if this byval aggregate will be split between registers 1318 // and memory. 1319 if (CCInfo.isFirstByValRegValid()) { 1320 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1321 unsigned int i, j; 1322 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1323 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1324 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1325 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1326 MachinePointerInfo(), 1327 false, false, 0); 1328 MemOpChains.push_back(Load.getValue(1)); 1329 RegsToPass.push_back(std::make_pair(j, Load)); 1330 } 1331 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1332 CCInfo.clearFirstByValReg(); 1333 } 1334 1335 unsigned LocMemOffset = VA.getLocMemOffset(); 1336 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1337 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1338 StkPtrOff); 1339 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1340 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1341 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1342 MVT::i32); 1343 MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 1344 Flags.getByValAlign(), 1345 /*isVolatile=*/false, 1346 /*AlwaysInline=*/false, 1347 MachinePointerInfo(0), 1348 MachinePointerInfo(0))); 1349 1350 } else if (!IsSibCall) { 1351 assert(VA.isMemLoc()); 1352 1353 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1354 dl, DAG, VA, Flags)); 1355 } 1356 } 1357 1358 if (!MemOpChains.empty()) 1359 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1360 &MemOpChains[0], MemOpChains.size()); 1361 1362 // Build a sequence of copy-to-reg nodes chained together with token chain 1363 // and flag operands which copy the outgoing args into the appropriate regs. 1364 SDValue InFlag; 1365 // Tail call byval lowering might overwrite argument registers so in case of 1366 // tail call optimization the copies to registers are lowered later. 1367 if (!isTailCall) 1368 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1369 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1370 RegsToPass[i].second, InFlag); 1371 InFlag = Chain.getValue(1); 1372 } 1373 1374 // For tail calls lower the arguments to the 'real' stack slot. 1375 if (isTailCall) { 1376 // Force all the incoming stack arguments to be loaded from the stack 1377 // before any new outgoing arguments are stored to the stack, because the 1378 // outgoing stack slots may alias the incoming argument stack slots, and 1379 // the alias isn't otherwise explicit. This is slightly more conservative 1380 // than necessary, because it means that each store effectively depends 1381 // on every argument instead of just those arguments it would clobber. 1382 1383 // Do not flag preceding copytoreg stuff together with the following stuff. 1384 InFlag = SDValue(); 1385 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1386 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1387 RegsToPass[i].second, InFlag); 1388 InFlag = Chain.getValue(1); 1389 } 1390 InFlag =SDValue(); 1391 } 1392 1393 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1394 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1395 // node so that legalize doesn't hack it. 1396 bool isDirect = false; 1397 bool isARMFunc = false; 1398 bool isLocalARMFunc = false; 1399 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1400 1401 if (EnableARMLongCalls) { 1402 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1403 && "long-calls with non-static relocation model!"); 1404 // Handle a global address or an external symbol. If it's not one of 1405 // those, the target's already in a register, so we don't need to do 1406 // anything extra. 1407 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1408 const GlobalValue *GV = G->getGlobal(); 1409 // Create a constant pool entry for the callee address 1410 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1411 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1412 ARMPCLabelIndex, 1413 ARMCP::CPValue, 0); 1414 // Get the address of the callee into a register 1415 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1416 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1417 Callee = DAG.getLoad(getPointerTy(), dl, 1418 DAG.getEntryNode(), CPAddr, 1419 MachinePointerInfo::getConstantPool(), 1420 false, false, 0); 1421 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1422 const char *Sym = S->getSymbol(); 1423 1424 // Create a constant pool entry for the callee address 1425 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1426 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1427 Sym, ARMPCLabelIndex, 0); 1428 // Get the address of the callee into a register 1429 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1430 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1431 Callee = DAG.getLoad(getPointerTy(), dl, 1432 DAG.getEntryNode(), CPAddr, 1433 MachinePointerInfo::getConstantPool(), 1434 false, false, 0); 1435 } 1436 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1437 const GlobalValue *GV = G->getGlobal(); 1438 isDirect = true; 1439 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1440 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1441 getTargetMachine().getRelocationModel() != Reloc::Static; 1442 isARMFunc = !Subtarget->isThumb() || isStub; 1443 // ARM call to a local ARM function is predicable. 1444 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1445 // tBX takes a register source operand. 1446 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1447 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1448 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1449 ARMPCLabelIndex, 1450 ARMCP::CPValue, 4); 1451 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1452 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1453 Callee = DAG.getLoad(getPointerTy(), dl, 1454 DAG.getEntryNode(), CPAddr, 1455 MachinePointerInfo::getConstantPool(), 1456 false, false, 0); 1457 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1458 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1459 getPointerTy(), Callee, PICLabel); 1460 } else { 1461 // On ELF targets for PIC code, direct calls should go through the PLT 1462 unsigned OpFlags = 0; 1463 if (Subtarget->isTargetELF() && 1464 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1465 OpFlags = ARMII::MO_PLT; 1466 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1467 } 1468 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1469 isDirect = true; 1470 bool isStub = Subtarget->isTargetDarwin() && 1471 getTargetMachine().getRelocationModel() != Reloc::Static; 1472 isARMFunc = !Subtarget->isThumb() || isStub; 1473 // tBX takes a register source operand. 1474 const char *Sym = S->getSymbol(); 1475 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1476 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1477 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1478 Sym, ARMPCLabelIndex, 4); 1479 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1480 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1481 Callee = DAG.getLoad(getPointerTy(), dl, 1482 DAG.getEntryNode(), CPAddr, 1483 MachinePointerInfo::getConstantPool(), 1484 false, false, 0); 1485 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1486 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1487 getPointerTy(), Callee, PICLabel); 1488 } else { 1489 unsigned OpFlags = 0; 1490 // On ELF targets for PIC code, direct calls should go through the PLT 1491 if (Subtarget->isTargetELF() && 1492 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1493 OpFlags = ARMII::MO_PLT; 1494 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1495 } 1496 } 1497 1498 // FIXME: handle tail calls differently. 1499 unsigned CallOpc; 1500 if (Subtarget->isThumb()) { 1501 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1502 CallOpc = ARMISD::CALL_NOLINK; 1503 else 1504 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1505 } else { 1506 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1507 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1508 : ARMISD::CALL_NOLINK; 1509 } 1510 1511 std::vector<SDValue> Ops; 1512 Ops.push_back(Chain); 1513 Ops.push_back(Callee); 1514 1515 // Add argument registers to the end of the list so that they are known live 1516 // into the call. 1517 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1518 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1519 RegsToPass[i].second.getValueType())); 1520 1521 if (InFlag.getNode()) 1522 Ops.push_back(InFlag); 1523 1524 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1525 if (isTailCall) 1526 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1527 1528 // Returns a chain and a flag for retval copy to use. 1529 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1530 InFlag = Chain.getValue(1); 1531 1532 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1533 DAG.getIntPtrConstant(0, true), InFlag); 1534 if (!Ins.empty()) 1535 InFlag = Chain.getValue(1); 1536 1537 // Handle result values, copying them out of physregs into vregs that we 1538 // return. 1539 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1540 dl, DAG, InVals); 1541} 1542 1543/// HandleByVal - Every parameter *after* a byval parameter is passed 1544/// on the stack. Remember the next parameter register to allocate, 1545/// and then confiscate the rest of the parameter registers to insure 1546/// this. 1547void 1548llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { 1549 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1550 assert((State->getCallOrPrologue() == Prologue || 1551 State->getCallOrPrologue() == Call) && 1552 "unhandled ParmContext"); 1553 if ((!State->isFirstByValRegValid()) && 1554 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1555 State->setFirstByValReg(reg); 1556 // At a call site, a byval parameter that is split between 1557 // registers and memory needs its size truncated here. In a 1558 // function prologue, such byval parameters are reassembled in 1559 // memory, and are not truncated. 1560 if (State->getCallOrPrologue() == Call) { 1561 unsigned excess = 4 * (ARM::R4 - reg); 1562 assert(size >= excess && "expected larger existing stack allocation"); 1563 size -= excess; 1564 } 1565 } 1566 // Confiscate any remaining parameter registers to preclude their 1567 // assignment to subsequent parameters. 1568 while (State->AllocateReg(GPRArgRegs, 4)) 1569 ; 1570} 1571 1572/// MatchingStackOffset - Return true if the given stack call argument is 1573/// already available in the same position (relatively) of the caller's 1574/// incoming argument stack. 1575static 1576bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1577 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1578 const ARMInstrInfo *TII) { 1579 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1580 int FI = INT_MAX; 1581 if (Arg.getOpcode() == ISD::CopyFromReg) { 1582 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1583 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1584 return false; 1585 MachineInstr *Def = MRI->getVRegDef(VR); 1586 if (!Def) 1587 return false; 1588 if (!Flags.isByVal()) { 1589 if (!TII->isLoadFromStackSlot(Def, FI)) 1590 return false; 1591 } else { 1592 return false; 1593 } 1594 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1595 if (Flags.isByVal()) 1596 // ByVal argument is passed in as a pointer but it's now being 1597 // dereferenced. e.g. 1598 // define @foo(%struct.X* %A) { 1599 // tail call @bar(%struct.X* byval %A) 1600 // } 1601 return false; 1602 SDValue Ptr = Ld->getBasePtr(); 1603 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1604 if (!FINode) 1605 return false; 1606 FI = FINode->getIndex(); 1607 } else 1608 return false; 1609 1610 assert(FI != INT_MAX); 1611 if (!MFI->isFixedObjectIndex(FI)) 1612 return false; 1613 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1614} 1615 1616/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1617/// for tail call optimization. Targets which want to do tail call 1618/// optimization should implement this function. 1619bool 1620ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1621 CallingConv::ID CalleeCC, 1622 bool isVarArg, 1623 bool isCalleeStructRet, 1624 bool isCallerStructRet, 1625 const SmallVectorImpl<ISD::OutputArg> &Outs, 1626 const SmallVectorImpl<SDValue> &OutVals, 1627 const SmallVectorImpl<ISD::InputArg> &Ins, 1628 SelectionDAG& DAG) const { 1629 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1630 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1631 bool CCMatch = CallerCC == CalleeCC; 1632 1633 // Look for obvious safe cases to perform tail call optimization that do not 1634 // require ABI changes. This is what gcc calls sibcall. 1635 1636 // Do not sibcall optimize vararg calls unless the call site is not passing 1637 // any arguments. 1638 if (isVarArg && !Outs.empty()) 1639 return false; 1640 1641 // Also avoid sibcall optimization if either caller or callee uses struct 1642 // return semantics. 1643 if (isCalleeStructRet || isCallerStructRet) 1644 return false; 1645 1646 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1647 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1648 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1649 // support in the assembler and linker to be used. This would need to be 1650 // fixed to fully support tail calls in Thumb1. 1651 // 1652 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1653 // LR. This means if we need to reload LR, it takes an extra instructions, 1654 // which outweighs the value of the tail call; but here we don't know yet 1655 // whether LR is going to be used. Probably the right approach is to 1656 // generate the tail call here and turn it back into CALL/RET in 1657 // emitEpilogue if LR is used. 1658 1659 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1660 // but we need to make sure there are enough registers; the only valid 1661 // registers are the 4 used for parameters. We don't currently do this 1662 // case. 1663 if (Subtarget->isThumb1Only()) 1664 return false; 1665 1666 // If the calling conventions do not match, then we'd better make sure the 1667 // results are returned in the same way as what the caller expects. 1668 if (!CCMatch) { 1669 SmallVector<CCValAssign, 16> RVLocs1; 1670 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1671 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1672 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1673 1674 SmallVector<CCValAssign, 16> RVLocs2; 1675 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1676 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1677 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1678 1679 if (RVLocs1.size() != RVLocs2.size()) 1680 return false; 1681 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1682 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1683 return false; 1684 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1685 return false; 1686 if (RVLocs1[i].isRegLoc()) { 1687 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1688 return false; 1689 } else { 1690 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1691 return false; 1692 } 1693 } 1694 } 1695 1696 // If the callee takes no arguments then go on to check the results of the 1697 // call. 1698 if (!Outs.empty()) { 1699 // Check if stack adjustment is needed. For now, do not do this if any 1700 // argument is passed on the stack. 1701 SmallVector<CCValAssign, 16> ArgLocs; 1702 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1703 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1704 CCInfo.AnalyzeCallOperands(Outs, 1705 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1706 if (CCInfo.getNextStackOffset()) { 1707 MachineFunction &MF = DAG.getMachineFunction(); 1708 1709 // Check if the arguments are already laid out in the right way as 1710 // the caller's fixed stack objects. 1711 MachineFrameInfo *MFI = MF.getFrameInfo(); 1712 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1713 const ARMInstrInfo *TII = 1714 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1715 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1716 i != e; 1717 ++i, ++realArgIdx) { 1718 CCValAssign &VA = ArgLocs[i]; 1719 EVT RegVT = VA.getLocVT(); 1720 SDValue Arg = OutVals[realArgIdx]; 1721 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1722 if (VA.getLocInfo() == CCValAssign::Indirect) 1723 return false; 1724 if (VA.needsCustom()) { 1725 // f64 and vector types are split into multiple registers or 1726 // register/stack-slot combinations. The types will not match 1727 // the registers; give up on memory f64 refs until we figure 1728 // out what to do about this. 1729 if (!VA.isRegLoc()) 1730 return false; 1731 if (!ArgLocs[++i].isRegLoc()) 1732 return false; 1733 if (RegVT == MVT::v2f64) { 1734 if (!ArgLocs[++i].isRegLoc()) 1735 return false; 1736 if (!ArgLocs[++i].isRegLoc()) 1737 return false; 1738 } 1739 } else if (!VA.isRegLoc()) { 1740 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1741 MFI, MRI, TII)) 1742 return false; 1743 } 1744 } 1745 } 1746 } 1747 1748 return true; 1749} 1750 1751SDValue 1752ARMTargetLowering::LowerReturn(SDValue Chain, 1753 CallingConv::ID CallConv, bool isVarArg, 1754 const SmallVectorImpl<ISD::OutputArg> &Outs, 1755 const SmallVectorImpl<SDValue> &OutVals, 1756 DebugLoc dl, SelectionDAG &DAG) const { 1757 1758 // CCValAssign - represent the assignment of the return value to a location. 1759 SmallVector<CCValAssign, 16> RVLocs; 1760 1761 // CCState - Info about the registers and stack slots. 1762 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1763 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1764 1765 // Analyze outgoing return values. 1766 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1767 isVarArg)); 1768 1769 // If this is the first return lowered for this function, add 1770 // the regs to the liveout set for the function. 1771 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1772 for (unsigned i = 0; i != RVLocs.size(); ++i) 1773 if (RVLocs[i].isRegLoc()) 1774 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1775 } 1776 1777 SDValue Flag; 1778 1779 // Copy the result values into the output registers. 1780 for (unsigned i = 0, realRVLocIdx = 0; 1781 i != RVLocs.size(); 1782 ++i, ++realRVLocIdx) { 1783 CCValAssign &VA = RVLocs[i]; 1784 assert(VA.isRegLoc() && "Can only return in registers!"); 1785 1786 SDValue Arg = OutVals[realRVLocIdx]; 1787 1788 switch (VA.getLocInfo()) { 1789 default: llvm_unreachable("Unknown loc info!"); 1790 case CCValAssign::Full: break; 1791 case CCValAssign::BCvt: 1792 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1793 break; 1794 } 1795 1796 if (VA.needsCustom()) { 1797 if (VA.getLocVT() == MVT::v2f64) { 1798 // Extract the first half and return it in two registers. 1799 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1800 DAG.getConstant(0, MVT::i32)); 1801 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1802 DAG.getVTList(MVT::i32, MVT::i32), Half); 1803 1804 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1805 Flag = Chain.getValue(1); 1806 VA = RVLocs[++i]; // skip ahead to next loc 1807 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1808 HalfGPRs.getValue(1), Flag); 1809 Flag = Chain.getValue(1); 1810 VA = RVLocs[++i]; // skip ahead to next loc 1811 1812 // Extract the 2nd half and fall through to handle it as an f64 value. 1813 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1814 DAG.getConstant(1, MVT::i32)); 1815 } 1816 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1817 // available. 1818 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1819 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1820 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1821 Flag = Chain.getValue(1); 1822 VA = RVLocs[++i]; // skip ahead to next loc 1823 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1824 Flag); 1825 } else 1826 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1827 1828 // Guarantee that all emitted copies are 1829 // stuck together, avoiding something bad. 1830 Flag = Chain.getValue(1); 1831 } 1832 1833 SDValue result; 1834 if (Flag.getNode()) 1835 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1836 else // Return Void 1837 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1838 1839 return result; 1840} 1841 1842bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1843 if (N->getNumValues() != 1) 1844 return false; 1845 if (!N->hasNUsesOfValue(1, 0)) 1846 return false; 1847 1848 unsigned NumCopies = 0; 1849 SDNode* Copies[2]; 1850 SDNode *Use = *N->use_begin(); 1851 if (Use->getOpcode() == ISD::CopyToReg) { 1852 Copies[NumCopies++] = Use; 1853 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1854 // f64 returned in a pair of GPRs. 1855 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1856 UI != UE; ++UI) { 1857 if (UI->getOpcode() != ISD::CopyToReg) 1858 return false; 1859 Copies[UI.getUse().getResNo()] = *UI; 1860 ++NumCopies; 1861 } 1862 } else if (Use->getOpcode() == ISD::BITCAST) { 1863 // f32 returned in a single GPR. 1864 if (!Use->hasNUsesOfValue(1, 0)) 1865 return false; 1866 Use = *Use->use_begin(); 1867 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1868 return false; 1869 Copies[NumCopies++] = Use; 1870 } else { 1871 return false; 1872 } 1873 1874 if (NumCopies != 1 && NumCopies != 2) 1875 return false; 1876 1877 bool HasRet = false; 1878 for (unsigned i = 0; i < NumCopies; ++i) { 1879 SDNode *Copy = Copies[i]; 1880 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1881 UI != UE; ++UI) { 1882 if (UI->getOpcode() == ISD::CopyToReg) { 1883 SDNode *Use = *UI; 1884 if (Use == Copies[0] || Use == Copies[1]) 1885 continue; 1886 return false; 1887 } 1888 if (UI->getOpcode() != ARMISD::RET_FLAG) 1889 return false; 1890 HasRet = true; 1891 } 1892 } 1893 1894 return HasRet; 1895} 1896 1897bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1898 if (!EnableARMTailCalls) 1899 return false; 1900 1901 if (!CI->isTailCall()) 1902 return false; 1903 1904 return !Subtarget->isThumb1Only(); 1905} 1906 1907// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1908// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1909// one of the above mentioned nodes. It has to be wrapped because otherwise 1910// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1911// be used to form addressing mode. These wrapped nodes will be selected 1912// into MOVi. 1913static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1914 EVT PtrVT = Op.getValueType(); 1915 // FIXME there is no actual debug info here 1916 DebugLoc dl = Op.getDebugLoc(); 1917 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1918 SDValue Res; 1919 if (CP->isMachineConstantPoolEntry()) 1920 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1921 CP->getAlignment()); 1922 else 1923 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1924 CP->getAlignment()); 1925 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1926} 1927 1928unsigned ARMTargetLowering::getJumpTableEncoding() const { 1929 return MachineJumpTableInfo::EK_Inline; 1930} 1931 1932SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1933 SelectionDAG &DAG) const { 1934 MachineFunction &MF = DAG.getMachineFunction(); 1935 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1936 unsigned ARMPCLabelIndex = 0; 1937 DebugLoc DL = Op.getDebugLoc(); 1938 EVT PtrVT = getPointerTy(); 1939 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1940 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1941 SDValue CPAddr; 1942 if (RelocM == Reloc::Static) { 1943 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1944 } else { 1945 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1946 ARMPCLabelIndex = AFI->createPICLabelUId(); 1947 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1948 ARMCP::CPBlockAddress, 1949 PCAdj); 1950 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1951 } 1952 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1953 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1954 MachinePointerInfo::getConstantPool(), 1955 false, false, 0); 1956 if (RelocM == Reloc::Static) 1957 return Result; 1958 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1959 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1960} 1961 1962// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1963SDValue 1964ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1965 SelectionDAG &DAG) const { 1966 DebugLoc dl = GA->getDebugLoc(); 1967 EVT PtrVT = getPointerTy(); 1968 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1969 MachineFunction &MF = DAG.getMachineFunction(); 1970 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1971 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1972 ARMConstantPoolValue *CPV = 1973 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1974 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1975 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1976 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1977 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1978 MachinePointerInfo::getConstantPool(), 1979 false, false, 0); 1980 SDValue Chain = Argument.getValue(1); 1981 1982 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1983 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1984 1985 // call __tls_get_addr. 1986 ArgListTy Args; 1987 ArgListEntry Entry; 1988 Entry.Node = Argument; 1989 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 1990 Args.push_back(Entry); 1991 // FIXME: is there useful debug info available here? 1992 std::pair<SDValue, SDValue> CallResult = 1993 LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()), 1994 false, false, false, false, 1995 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1996 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1997 return CallResult.first; 1998} 1999 2000// Lower ISD::GlobalTLSAddress using the "initial exec" or 2001// "local exec" model. 2002SDValue 2003ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2004 SelectionDAG &DAG) const { 2005 const GlobalValue *GV = GA->getGlobal(); 2006 DebugLoc dl = GA->getDebugLoc(); 2007 SDValue Offset; 2008 SDValue Chain = DAG.getEntryNode(); 2009 EVT PtrVT = getPointerTy(); 2010 // Get the Thread Pointer 2011 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2012 2013 if (GV->isDeclaration()) { 2014 MachineFunction &MF = DAG.getMachineFunction(); 2015 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2016 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2017 // Initial exec model. 2018 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2019 ARMConstantPoolValue *CPV = 2020 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 2021 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true); 2022 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2023 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2024 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2025 MachinePointerInfo::getConstantPool(), 2026 false, false, 0); 2027 Chain = Offset.getValue(1); 2028 2029 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2030 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2031 2032 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2033 MachinePointerInfo::getConstantPool(), 2034 false, false, 0); 2035 } else { 2036 // local exec model 2037 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF); 2038 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2039 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2040 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2041 MachinePointerInfo::getConstantPool(), 2042 false, false, 0); 2043 } 2044 2045 // The address of the thread local variable is the add of the thread 2046 // pointer with the offset of the variable. 2047 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2048} 2049 2050SDValue 2051ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2052 // TODO: implement the "local dynamic" model 2053 assert(Subtarget->isTargetELF() && 2054 "TLS not implemented for non-ELF targets"); 2055 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2056 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 2057 // otherwise use the "Local Exec" TLS Model 2058 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 2059 return LowerToTLSGeneralDynamicModel(GA, DAG); 2060 else 2061 return LowerToTLSExecModels(GA, DAG); 2062} 2063 2064SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2065 SelectionDAG &DAG) const { 2066 EVT PtrVT = getPointerTy(); 2067 DebugLoc dl = Op.getDebugLoc(); 2068 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2069 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2070 if (RelocM == Reloc::PIC_) { 2071 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2072 ARMConstantPoolValue *CPV = 2073 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2074 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2075 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2076 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2077 CPAddr, 2078 MachinePointerInfo::getConstantPool(), 2079 false, false, 0); 2080 SDValue Chain = Result.getValue(1); 2081 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2082 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2083 if (!UseGOTOFF) 2084 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2085 MachinePointerInfo::getGOT(), false, false, 0); 2086 return Result; 2087 } 2088 2089 // If we have T2 ops, we can materialize the address directly via movt/movw 2090 // pair. This is always cheaper. 2091 if (Subtarget->useMovt()) { 2092 ++NumMovwMovt; 2093 // FIXME: Once remat is capable of dealing with instructions with register 2094 // operands, expand this into two nodes. 2095 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2096 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2097 } else { 2098 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2099 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2100 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2101 MachinePointerInfo::getConstantPool(), 2102 false, false, 0); 2103 } 2104} 2105 2106SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2107 SelectionDAG &DAG) const { 2108 EVT PtrVT = getPointerTy(); 2109 DebugLoc dl = Op.getDebugLoc(); 2110 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2111 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2112 MachineFunction &MF = DAG.getMachineFunction(); 2113 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2114 2115 // FIXME: Enable this for static codegen when tool issues are fixed. 2116 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2117 ++NumMovwMovt; 2118 // FIXME: Once remat is capable of dealing with instructions with register 2119 // operands, expand this into two nodes. 2120 if (RelocM == Reloc::Static) 2121 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2122 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2123 2124 unsigned Wrapper = (RelocM == Reloc::PIC_) 2125 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2126 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2127 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2128 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2129 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2130 MachinePointerInfo::getGOT(), false, false, 0); 2131 return Result; 2132 } 2133 2134 unsigned ARMPCLabelIndex = 0; 2135 SDValue CPAddr; 2136 if (RelocM == Reloc::Static) { 2137 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2138 } else { 2139 ARMPCLabelIndex = AFI->createPICLabelUId(); 2140 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2141 ARMConstantPoolValue *CPV = 2142 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 2143 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2144 } 2145 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2146 2147 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2148 MachinePointerInfo::getConstantPool(), 2149 false, false, 0); 2150 SDValue Chain = Result.getValue(1); 2151 2152 if (RelocM == Reloc::PIC_) { 2153 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2154 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2155 } 2156 2157 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2158 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2159 false, false, 0); 2160 2161 return Result; 2162} 2163 2164SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2165 SelectionDAG &DAG) const { 2166 assert(Subtarget->isTargetELF() && 2167 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2168 MachineFunction &MF = DAG.getMachineFunction(); 2169 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2170 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2171 EVT PtrVT = getPointerTy(); 2172 DebugLoc dl = Op.getDebugLoc(); 2173 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2174 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 2175 "_GLOBAL_OFFSET_TABLE_", 2176 ARMPCLabelIndex, PCAdj); 2177 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2178 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2179 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2180 MachinePointerInfo::getConstantPool(), 2181 false, false, 0); 2182 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2183 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2184} 2185 2186SDValue 2187ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2188 const { 2189 DebugLoc dl = Op.getDebugLoc(); 2190 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2191 Op.getOperand(0), Op.getOperand(1)); 2192} 2193 2194SDValue 2195ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2196 DebugLoc dl = Op.getDebugLoc(); 2197 SDValue Val = DAG.getConstant(0, MVT::i32); 2198 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2199 Op.getOperand(1), Val); 2200} 2201 2202SDValue 2203ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2204 DebugLoc dl = Op.getDebugLoc(); 2205 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2206 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2207} 2208 2209SDValue 2210ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2211 const ARMSubtarget *Subtarget) const { 2212 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2213 DebugLoc dl = Op.getDebugLoc(); 2214 switch (IntNo) { 2215 default: return SDValue(); // Don't custom lower most intrinsics. 2216 case Intrinsic::arm_thread_pointer: { 2217 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2218 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2219 } 2220 case Intrinsic::eh_sjlj_lsda: { 2221 MachineFunction &MF = DAG.getMachineFunction(); 2222 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2223 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2224 EVT PtrVT = getPointerTy(); 2225 DebugLoc dl = Op.getDebugLoc(); 2226 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2227 SDValue CPAddr; 2228 unsigned PCAdj = (RelocM != Reloc::PIC_) 2229 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2230 ARMConstantPoolValue *CPV = 2231 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2232 ARMCP::CPLSDA, PCAdj); 2233 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2234 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2235 SDValue Result = 2236 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2237 MachinePointerInfo::getConstantPool(), 2238 false, false, 0); 2239 2240 if (RelocM == Reloc::PIC_) { 2241 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2242 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2243 } 2244 return Result; 2245 } 2246 case Intrinsic::arm_neon_vmulls: 2247 case Intrinsic::arm_neon_vmullu: { 2248 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2249 ? ARMISD::VMULLs : ARMISD::VMULLu; 2250 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2251 Op.getOperand(1), Op.getOperand(2)); 2252 } 2253 } 2254} 2255 2256static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2257 const ARMSubtarget *Subtarget) { 2258 DebugLoc dl = Op.getDebugLoc(); 2259 if (!Subtarget->hasDataBarrier()) { 2260 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2261 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2262 // here. 2263 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2264 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2265 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2266 DAG.getConstant(0, MVT::i32)); 2267 } 2268 2269 SDValue Op5 = Op.getOperand(5); 2270 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2271 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2272 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2273 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2274 2275 ARM_MB::MemBOpt DMBOpt; 2276 if (isDeviceBarrier) 2277 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2278 else 2279 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2280 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2281 DAG.getConstant(DMBOpt, MVT::i32)); 2282} 2283 2284 2285static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2286 const ARMSubtarget *Subtarget) { 2287 // FIXME: handle "fence singlethread" more efficiently. 2288 DebugLoc dl = Op.getDebugLoc(); 2289 if (!Subtarget->hasDataBarrier()) { 2290 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2291 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2292 // here. 2293 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2294 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2295 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2296 DAG.getConstant(0, MVT::i32)); 2297 } 2298 2299 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2300 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2301} 2302 2303static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2304 const ARMSubtarget *Subtarget) { 2305 // ARM pre v5TE and Thumb1 does not have preload instructions. 2306 if (!(Subtarget->isThumb2() || 2307 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2308 // Just preserve the chain. 2309 return Op.getOperand(0); 2310 2311 DebugLoc dl = Op.getDebugLoc(); 2312 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2313 if (!isRead && 2314 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2315 // ARMv7 with MP extension has PLDW. 2316 return Op.getOperand(0); 2317 2318 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2319 if (Subtarget->isThumb()) { 2320 // Invert the bits. 2321 isRead = ~isRead & 1; 2322 isData = ~isData & 1; 2323 } 2324 2325 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2326 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2327 DAG.getConstant(isData, MVT::i32)); 2328} 2329 2330static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2331 MachineFunction &MF = DAG.getMachineFunction(); 2332 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2333 2334 // vastart just stores the address of the VarArgsFrameIndex slot into the 2335 // memory location argument. 2336 DebugLoc dl = Op.getDebugLoc(); 2337 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2338 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2339 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2340 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2341 MachinePointerInfo(SV), false, false, 0); 2342} 2343 2344SDValue 2345ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2346 SDValue &Root, SelectionDAG &DAG, 2347 DebugLoc dl) const { 2348 MachineFunction &MF = DAG.getMachineFunction(); 2349 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2350 2351 TargetRegisterClass *RC; 2352 if (AFI->isThumb1OnlyFunction()) 2353 RC = ARM::tGPRRegisterClass; 2354 else 2355 RC = ARM::GPRRegisterClass; 2356 2357 // Transform the arguments stored in physical registers into virtual ones. 2358 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2359 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2360 2361 SDValue ArgValue2; 2362 if (NextVA.isMemLoc()) { 2363 MachineFrameInfo *MFI = MF.getFrameInfo(); 2364 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2365 2366 // Create load node to retrieve arguments from the stack. 2367 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2368 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2369 MachinePointerInfo::getFixedStack(FI), 2370 false, false, 0); 2371 } else { 2372 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2373 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2374 } 2375 2376 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2377} 2378 2379void 2380ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2381 unsigned &VARegSize, unsigned &VARegSaveSize) 2382 const { 2383 unsigned NumGPRs; 2384 if (CCInfo.isFirstByValRegValid()) 2385 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2386 else { 2387 unsigned int firstUnalloced; 2388 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2389 sizeof(GPRArgRegs) / 2390 sizeof(GPRArgRegs[0])); 2391 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2392 } 2393 2394 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2395 VARegSize = NumGPRs * 4; 2396 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2397} 2398 2399// The remaining GPRs hold either the beginning of variable-argument 2400// data, or the beginning of an aggregate passed by value (usuall 2401// byval). Either way, we allocate stack slots adjacent to the data 2402// provided by our caller, and store the unallocated registers there. 2403// If this is a variadic function, the va_list pointer will begin with 2404// these values; otherwise, this reassembles a (byval) structure that 2405// was split between registers and memory. 2406void 2407ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2408 DebugLoc dl, SDValue &Chain, 2409 unsigned ArgOffset) const { 2410 MachineFunction &MF = DAG.getMachineFunction(); 2411 MachineFrameInfo *MFI = MF.getFrameInfo(); 2412 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2413 unsigned firstRegToSaveIndex; 2414 if (CCInfo.isFirstByValRegValid()) 2415 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2416 else { 2417 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2418 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2419 } 2420 2421 unsigned VARegSize, VARegSaveSize; 2422 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2423 if (VARegSaveSize) { 2424 // If this function is vararg, store any remaining integer argument regs 2425 // to their spots on the stack so that they may be loaded by deferencing 2426 // the result of va_next. 2427 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2428 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2429 ArgOffset + VARegSaveSize 2430 - VARegSize, 2431 false)); 2432 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2433 getPointerTy()); 2434 2435 SmallVector<SDValue, 4> MemOps; 2436 for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { 2437 TargetRegisterClass *RC; 2438 if (AFI->isThumb1OnlyFunction()) 2439 RC = ARM::tGPRRegisterClass; 2440 else 2441 RC = ARM::GPRRegisterClass; 2442 2443 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2444 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2445 SDValue Store = 2446 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2447 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2448 false, false, 0); 2449 MemOps.push_back(Store); 2450 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2451 DAG.getConstant(4, getPointerTy())); 2452 } 2453 if (!MemOps.empty()) 2454 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2455 &MemOps[0], MemOps.size()); 2456 } else 2457 // This will point to the next argument passed via stack. 2458 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2459} 2460 2461SDValue 2462ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2463 CallingConv::ID CallConv, bool isVarArg, 2464 const SmallVectorImpl<ISD::InputArg> 2465 &Ins, 2466 DebugLoc dl, SelectionDAG &DAG, 2467 SmallVectorImpl<SDValue> &InVals) 2468 const { 2469 MachineFunction &MF = DAG.getMachineFunction(); 2470 MachineFrameInfo *MFI = MF.getFrameInfo(); 2471 2472 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2473 2474 // Assign locations to all of the incoming arguments. 2475 SmallVector<CCValAssign, 16> ArgLocs; 2476 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2477 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2478 CCInfo.AnalyzeFormalArguments(Ins, 2479 CCAssignFnForNode(CallConv, /* Return*/ false, 2480 isVarArg)); 2481 2482 SmallVector<SDValue, 16> ArgValues; 2483 int lastInsIndex = -1; 2484 2485 SDValue ArgValue; 2486 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2487 CCValAssign &VA = ArgLocs[i]; 2488 2489 // Arguments stored in registers. 2490 if (VA.isRegLoc()) { 2491 EVT RegVT = VA.getLocVT(); 2492 2493 if (VA.needsCustom()) { 2494 // f64 and vector types are split up into multiple registers or 2495 // combinations of registers and stack slots. 2496 if (VA.getLocVT() == MVT::v2f64) { 2497 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2498 Chain, DAG, dl); 2499 VA = ArgLocs[++i]; // skip ahead to next loc 2500 SDValue ArgValue2; 2501 if (VA.isMemLoc()) { 2502 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2503 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2504 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2505 MachinePointerInfo::getFixedStack(FI), 2506 false, false, 0); 2507 } else { 2508 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2509 Chain, DAG, dl); 2510 } 2511 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2512 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2513 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2514 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2515 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2516 } else 2517 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2518 2519 } else { 2520 TargetRegisterClass *RC; 2521 2522 if (RegVT == MVT::f32) 2523 RC = ARM::SPRRegisterClass; 2524 else if (RegVT == MVT::f64) 2525 RC = ARM::DPRRegisterClass; 2526 else if (RegVT == MVT::v2f64) 2527 RC = ARM::QPRRegisterClass; 2528 else if (RegVT == MVT::i32) 2529 RC = (AFI->isThumb1OnlyFunction() ? 2530 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2531 else 2532 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2533 2534 // Transform the arguments in physical registers into virtual ones. 2535 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2536 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2537 } 2538 2539 // If this is an 8 or 16-bit value, it is really passed promoted 2540 // to 32 bits. Insert an assert[sz]ext to capture this, then 2541 // truncate to the right size. 2542 switch (VA.getLocInfo()) { 2543 default: llvm_unreachable("Unknown loc info!"); 2544 case CCValAssign::Full: break; 2545 case CCValAssign::BCvt: 2546 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2547 break; 2548 case CCValAssign::SExt: 2549 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2550 DAG.getValueType(VA.getValVT())); 2551 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2552 break; 2553 case CCValAssign::ZExt: 2554 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2555 DAG.getValueType(VA.getValVT())); 2556 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2557 break; 2558 } 2559 2560 InVals.push_back(ArgValue); 2561 2562 } else { // VA.isRegLoc() 2563 2564 // sanity check 2565 assert(VA.isMemLoc()); 2566 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2567 2568 int index = ArgLocs[i].getValNo(); 2569 2570 // Some Ins[] entries become multiple ArgLoc[] entries. 2571 // Process them only once. 2572 if (index != lastInsIndex) 2573 { 2574 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2575 // FIXME: For now, all byval parameter objects are marked mutable. 2576 // This can be changed with more analysis. 2577 // In case of tail call optimization mark all arguments mutable. 2578 // Since they could be overwritten by lowering of arguments in case of 2579 // a tail call. 2580 if (Flags.isByVal()) { 2581 unsigned VARegSize, VARegSaveSize; 2582 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2583 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); 2584 unsigned Bytes = Flags.getByValSize() - VARegSize; 2585 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2586 int FI = MFI->CreateFixedObject(Bytes, 2587 VA.getLocMemOffset(), false); 2588 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2589 } else { 2590 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2591 VA.getLocMemOffset(), true); 2592 2593 // Create load nodes to retrieve arguments from the stack. 2594 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2595 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2596 MachinePointerInfo::getFixedStack(FI), 2597 false, false, 0)); 2598 } 2599 lastInsIndex = index; 2600 } 2601 } 2602 } 2603 2604 // varargs 2605 if (isVarArg) 2606 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); 2607 2608 return Chain; 2609} 2610 2611/// isFloatingPointZero - Return true if this is +0.0. 2612static bool isFloatingPointZero(SDValue Op) { 2613 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2614 return CFP->getValueAPF().isPosZero(); 2615 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2616 // Maybe this has already been legalized into the constant pool? 2617 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2618 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2619 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2620 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2621 return CFP->getValueAPF().isPosZero(); 2622 } 2623 } 2624 return false; 2625} 2626 2627/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2628/// the given operands. 2629SDValue 2630ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2631 SDValue &ARMcc, SelectionDAG &DAG, 2632 DebugLoc dl) const { 2633 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2634 unsigned C = RHSC->getZExtValue(); 2635 if (!isLegalICmpImmediate(C)) { 2636 // Constant does not fit, try adjusting it by one? 2637 switch (CC) { 2638 default: break; 2639 case ISD::SETLT: 2640 case ISD::SETGE: 2641 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2642 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2643 RHS = DAG.getConstant(C-1, MVT::i32); 2644 } 2645 break; 2646 case ISD::SETULT: 2647 case ISD::SETUGE: 2648 if (C != 0 && isLegalICmpImmediate(C-1)) { 2649 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2650 RHS = DAG.getConstant(C-1, MVT::i32); 2651 } 2652 break; 2653 case ISD::SETLE: 2654 case ISD::SETGT: 2655 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2656 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2657 RHS = DAG.getConstant(C+1, MVT::i32); 2658 } 2659 break; 2660 case ISD::SETULE: 2661 case ISD::SETUGT: 2662 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2663 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2664 RHS = DAG.getConstant(C+1, MVT::i32); 2665 } 2666 break; 2667 } 2668 } 2669 } 2670 2671 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2672 ARMISD::NodeType CompareType; 2673 switch (CondCode) { 2674 default: 2675 CompareType = ARMISD::CMP; 2676 break; 2677 case ARMCC::EQ: 2678 case ARMCC::NE: 2679 // Uses only Z Flag 2680 CompareType = ARMISD::CMPZ; 2681 break; 2682 } 2683 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2684 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2685} 2686 2687/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2688SDValue 2689ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2690 DebugLoc dl) const { 2691 SDValue Cmp; 2692 if (!isFloatingPointZero(RHS)) 2693 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2694 else 2695 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2696 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2697} 2698 2699/// duplicateCmp - Glue values can have only one use, so this function 2700/// duplicates a comparison node. 2701SDValue 2702ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2703 unsigned Opc = Cmp.getOpcode(); 2704 DebugLoc DL = Cmp.getDebugLoc(); 2705 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2706 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2707 2708 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2709 Cmp = Cmp.getOperand(0); 2710 Opc = Cmp.getOpcode(); 2711 if (Opc == ARMISD::CMPFP) 2712 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2713 else { 2714 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2715 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2716 } 2717 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2718} 2719 2720SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2721 SDValue Cond = Op.getOperand(0); 2722 SDValue SelectTrue = Op.getOperand(1); 2723 SDValue SelectFalse = Op.getOperand(2); 2724 DebugLoc dl = Op.getDebugLoc(); 2725 2726 // Convert: 2727 // 2728 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2729 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2730 // 2731 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2732 const ConstantSDNode *CMOVTrue = 2733 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2734 const ConstantSDNode *CMOVFalse = 2735 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2736 2737 if (CMOVTrue && CMOVFalse) { 2738 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2739 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2740 2741 SDValue True; 2742 SDValue False; 2743 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2744 True = SelectTrue; 2745 False = SelectFalse; 2746 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2747 True = SelectFalse; 2748 False = SelectTrue; 2749 } 2750 2751 if (True.getNode() && False.getNode()) { 2752 EVT VT = Op.getValueType(); 2753 SDValue ARMcc = Cond.getOperand(2); 2754 SDValue CCR = Cond.getOperand(3); 2755 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2756 assert(True.getValueType() == VT); 2757 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2758 } 2759 } 2760 } 2761 2762 return DAG.getSelectCC(dl, Cond, 2763 DAG.getConstant(0, Cond.getValueType()), 2764 SelectTrue, SelectFalse, ISD::SETNE); 2765} 2766 2767SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2768 EVT VT = Op.getValueType(); 2769 SDValue LHS = Op.getOperand(0); 2770 SDValue RHS = Op.getOperand(1); 2771 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2772 SDValue TrueVal = Op.getOperand(2); 2773 SDValue FalseVal = Op.getOperand(3); 2774 DebugLoc dl = Op.getDebugLoc(); 2775 2776 if (LHS.getValueType() == MVT::i32) { 2777 SDValue ARMcc; 2778 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2779 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2780 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2781 } 2782 2783 ARMCC::CondCodes CondCode, CondCode2; 2784 FPCCToARMCC(CC, CondCode, CondCode2); 2785 2786 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2787 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2788 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2789 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2790 ARMcc, CCR, Cmp); 2791 if (CondCode2 != ARMCC::AL) { 2792 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2793 // FIXME: Needs another CMP because flag can have but one use. 2794 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2795 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2796 Result, TrueVal, ARMcc2, CCR, Cmp2); 2797 } 2798 return Result; 2799} 2800 2801/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2802/// to morph to an integer compare sequence. 2803static bool canChangeToInt(SDValue Op, bool &SeenZero, 2804 const ARMSubtarget *Subtarget) { 2805 SDNode *N = Op.getNode(); 2806 if (!N->hasOneUse()) 2807 // Otherwise it requires moving the value from fp to integer registers. 2808 return false; 2809 if (!N->getNumValues()) 2810 return false; 2811 EVT VT = Op.getValueType(); 2812 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2813 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2814 // vmrs are very slow, e.g. cortex-a8. 2815 return false; 2816 2817 if (isFloatingPointZero(Op)) { 2818 SeenZero = true; 2819 return true; 2820 } 2821 return ISD::isNormalLoad(N); 2822} 2823 2824static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2825 if (isFloatingPointZero(Op)) 2826 return DAG.getConstant(0, MVT::i32); 2827 2828 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2829 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2830 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2831 Ld->isVolatile(), Ld->isNonTemporal(), 2832 Ld->getAlignment()); 2833 2834 llvm_unreachable("Unknown VFP cmp argument!"); 2835} 2836 2837static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2838 SDValue &RetVal1, SDValue &RetVal2) { 2839 if (isFloatingPointZero(Op)) { 2840 RetVal1 = DAG.getConstant(0, MVT::i32); 2841 RetVal2 = DAG.getConstant(0, MVT::i32); 2842 return; 2843 } 2844 2845 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2846 SDValue Ptr = Ld->getBasePtr(); 2847 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2848 Ld->getChain(), Ptr, 2849 Ld->getPointerInfo(), 2850 Ld->isVolatile(), Ld->isNonTemporal(), 2851 Ld->getAlignment()); 2852 2853 EVT PtrType = Ptr.getValueType(); 2854 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2855 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2856 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2857 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2858 Ld->getChain(), NewPtr, 2859 Ld->getPointerInfo().getWithOffset(4), 2860 Ld->isVolatile(), Ld->isNonTemporal(), 2861 NewAlign); 2862 return; 2863 } 2864 2865 llvm_unreachable("Unknown VFP cmp argument!"); 2866} 2867 2868/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2869/// f32 and even f64 comparisons to integer ones. 2870SDValue 2871ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2872 SDValue Chain = Op.getOperand(0); 2873 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2874 SDValue LHS = Op.getOperand(2); 2875 SDValue RHS = Op.getOperand(3); 2876 SDValue Dest = Op.getOperand(4); 2877 DebugLoc dl = Op.getDebugLoc(); 2878 2879 bool SeenZero = false; 2880 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2881 canChangeToInt(RHS, SeenZero, Subtarget) && 2882 // If one of the operand is zero, it's safe to ignore the NaN case since 2883 // we only care about equality comparisons. 2884 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2885 // If unsafe fp math optimization is enabled and there are no other uses of 2886 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2887 // to an integer comparison. 2888 if (CC == ISD::SETOEQ) 2889 CC = ISD::SETEQ; 2890 else if (CC == ISD::SETUNE) 2891 CC = ISD::SETNE; 2892 2893 SDValue ARMcc; 2894 if (LHS.getValueType() == MVT::f32) { 2895 LHS = bitcastf32Toi32(LHS, DAG); 2896 RHS = bitcastf32Toi32(RHS, DAG); 2897 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2898 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2899 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2900 Chain, Dest, ARMcc, CCR, Cmp); 2901 } 2902 2903 SDValue LHS1, LHS2; 2904 SDValue RHS1, RHS2; 2905 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2906 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2907 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2908 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2909 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2910 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2911 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2912 } 2913 2914 return SDValue(); 2915} 2916 2917SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2918 SDValue Chain = Op.getOperand(0); 2919 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2920 SDValue LHS = Op.getOperand(2); 2921 SDValue RHS = Op.getOperand(3); 2922 SDValue Dest = Op.getOperand(4); 2923 DebugLoc dl = Op.getDebugLoc(); 2924 2925 if (LHS.getValueType() == MVT::i32) { 2926 SDValue ARMcc; 2927 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2928 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2929 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2930 Chain, Dest, ARMcc, CCR, Cmp); 2931 } 2932 2933 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2934 2935 if (UnsafeFPMath && 2936 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2937 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2938 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2939 if (Result.getNode()) 2940 return Result; 2941 } 2942 2943 ARMCC::CondCodes CondCode, CondCode2; 2944 FPCCToARMCC(CC, CondCode, CondCode2); 2945 2946 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2947 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2948 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2949 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2950 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2951 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2952 if (CondCode2 != ARMCC::AL) { 2953 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2954 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2955 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2956 } 2957 return Res; 2958} 2959 2960SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2961 SDValue Chain = Op.getOperand(0); 2962 SDValue Table = Op.getOperand(1); 2963 SDValue Index = Op.getOperand(2); 2964 DebugLoc dl = Op.getDebugLoc(); 2965 2966 EVT PTy = getPointerTy(); 2967 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2968 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2969 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2970 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2971 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2972 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2973 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2974 if (Subtarget->isThumb2()) { 2975 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2976 // which does another jump to the destination. This also makes it easier 2977 // to translate it to TBB / TBH later. 2978 // FIXME: This might not work if the function is extremely large. 2979 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2980 Addr, Op.getOperand(2), JTI, UId); 2981 } 2982 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2983 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2984 MachinePointerInfo::getJumpTable(), 2985 false, false, 0); 2986 Chain = Addr.getValue(1); 2987 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2988 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2989 } else { 2990 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2991 MachinePointerInfo::getJumpTable(), false, false, 0); 2992 Chain = Addr.getValue(1); 2993 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2994 } 2995} 2996 2997static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2998 DebugLoc dl = Op.getDebugLoc(); 2999 unsigned Opc; 3000 3001 switch (Op.getOpcode()) { 3002 default: 3003 assert(0 && "Invalid opcode!"); 3004 case ISD::FP_TO_SINT: 3005 Opc = ARMISD::FTOSI; 3006 break; 3007 case ISD::FP_TO_UINT: 3008 Opc = ARMISD::FTOUI; 3009 break; 3010 } 3011 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3012 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3013} 3014 3015static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3016 EVT VT = Op.getValueType(); 3017 DebugLoc dl = Op.getDebugLoc(); 3018 3019 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3020 "Invalid type for custom lowering!"); 3021 if (VT != MVT::v4f32) 3022 return DAG.UnrollVectorOp(Op.getNode()); 3023 3024 unsigned CastOpc; 3025 unsigned Opc; 3026 switch (Op.getOpcode()) { 3027 default: 3028 assert(0 && "Invalid opcode!"); 3029 case ISD::SINT_TO_FP: 3030 CastOpc = ISD::SIGN_EXTEND; 3031 Opc = ISD::SINT_TO_FP; 3032 break; 3033 case ISD::UINT_TO_FP: 3034 CastOpc = ISD::ZERO_EXTEND; 3035 Opc = ISD::UINT_TO_FP; 3036 break; 3037 } 3038 3039 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3040 return DAG.getNode(Opc, dl, VT, Op); 3041} 3042 3043static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3044 EVT VT = Op.getValueType(); 3045 if (VT.isVector()) 3046 return LowerVectorINT_TO_FP(Op, DAG); 3047 3048 DebugLoc dl = Op.getDebugLoc(); 3049 unsigned Opc; 3050 3051 switch (Op.getOpcode()) { 3052 default: 3053 assert(0 && "Invalid opcode!"); 3054 case ISD::SINT_TO_FP: 3055 Opc = ARMISD::SITOF; 3056 break; 3057 case ISD::UINT_TO_FP: 3058 Opc = ARMISD::UITOF; 3059 break; 3060 } 3061 3062 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3063 return DAG.getNode(Opc, dl, VT, Op); 3064} 3065 3066SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3067 // Implement fcopysign with a fabs and a conditional fneg. 3068 SDValue Tmp0 = Op.getOperand(0); 3069 SDValue Tmp1 = Op.getOperand(1); 3070 DebugLoc dl = Op.getDebugLoc(); 3071 EVT VT = Op.getValueType(); 3072 EVT SrcVT = Tmp1.getValueType(); 3073 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3074 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3075 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3076 3077 if (UseNEON) { 3078 // Use VBSL to copy the sign bit. 3079 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3080 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3081 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3082 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3083 if (VT == MVT::f64) 3084 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3085 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3086 DAG.getConstant(32, MVT::i32)); 3087 else /*if (VT == MVT::f32)*/ 3088 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3089 if (SrcVT == MVT::f32) { 3090 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3091 if (VT == MVT::f64) 3092 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3093 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3094 DAG.getConstant(32, MVT::i32)); 3095 } else if (VT == MVT::f32) 3096 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3097 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3098 DAG.getConstant(32, MVT::i32)); 3099 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3100 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3101 3102 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3103 MVT::i32); 3104 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3105 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3106 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3107 3108 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3109 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3110 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3111 if (VT == MVT::f32) { 3112 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3113 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3114 DAG.getConstant(0, MVT::i32)); 3115 } else { 3116 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3117 } 3118 3119 return Res; 3120 } 3121 3122 // Bitcast operand 1 to i32. 3123 if (SrcVT == MVT::f64) 3124 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3125 &Tmp1, 1).getValue(1); 3126 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3127 3128 // Or in the signbit with integer operations. 3129 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3130 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3131 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3132 if (VT == MVT::f32) { 3133 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3134 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3135 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3136 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3137 } 3138 3139 // f64: Or the high part with signbit and then combine two parts. 3140 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3141 &Tmp0, 1); 3142 SDValue Lo = Tmp0.getValue(0); 3143 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3144 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3145 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3146} 3147 3148SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3149 MachineFunction &MF = DAG.getMachineFunction(); 3150 MachineFrameInfo *MFI = MF.getFrameInfo(); 3151 MFI->setReturnAddressIsTaken(true); 3152 3153 EVT VT = Op.getValueType(); 3154 DebugLoc dl = Op.getDebugLoc(); 3155 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3156 if (Depth) { 3157 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3158 SDValue Offset = DAG.getConstant(4, MVT::i32); 3159 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3160 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3161 MachinePointerInfo(), false, false, 0); 3162 } 3163 3164 // Return LR, which contains the return address. Mark it an implicit live-in. 3165 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3166 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3167} 3168 3169SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3170 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3171 MFI->setFrameAddressIsTaken(true); 3172 3173 EVT VT = Op.getValueType(); 3174 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3175 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3176 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3177 ? ARM::R7 : ARM::R11; 3178 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3179 while (Depth--) 3180 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3181 MachinePointerInfo(), 3182 false, false, 0); 3183 return FrameAddr; 3184} 3185 3186/// ExpandBITCAST - If the target supports VFP, this function is called to 3187/// expand a bit convert where either the source or destination type is i64 to 3188/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3189/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3190/// vectors), since the legalizer won't know what to do with that. 3191static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3192 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3193 DebugLoc dl = N->getDebugLoc(); 3194 SDValue Op = N->getOperand(0); 3195 3196 // This function is only supposed to be called for i64 types, either as the 3197 // source or destination of the bit convert. 3198 EVT SrcVT = Op.getValueType(); 3199 EVT DstVT = N->getValueType(0); 3200 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3201 "ExpandBITCAST called for non-i64 type"); 3202 3203 // Turn i64->f64 into VMOVDRR. 3204 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3205 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3206 DAG.getConstant(0, MVT::i32)); 3207 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3208 DAG.getConstant(1, MVT::i32)); 3209 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3210 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3211 } 3212 3213 // Turn f64->i64 into VMOVRRD. 3214 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3215 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3216 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3217 // Merge the pieces into a single i64 value. 3218 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3219 } 3220 3221 return SDValue(); 3222} 3223 3224/// getZeroVector - Returns a vector of specified type with all zero elements. 3225/// Zero vectors are used to represent vector negation and in those cases 3226/// will be implemented with the NEON VNEG instruction. However, VNEG does 3227/// not support i64 elements, so sometimes the zero vectors will need to be 3228/// explicitly constructed. Regardless, use a canonical VMOV to create the 3229/// zero vector. 3230static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3231 assert(VT.isVector() && "Expected a vector type"); 3232 // The canonical modified immediate encoding of a zero vector is....0! 3233 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3234 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3235 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3236 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3237} 3238 3239/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3240/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3241SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3242 SelectionDAG &DAG) const { 3243 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3244 EVT VT = Op.getValueType(); 3245 unsigned VTBits = VT.getSizeInBits(); 3246 DebugLoc dl = Op.getDebugLoc(); 3247 SDValue ShOpLo = Op.getOperand(0); 3248 SDValue ShOpHi = Op.getOperand(1); 3249 SDValue ShAmt = Op.getOperand(2); 3250 SDValue ARMcc; 3251 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3252 3253 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3254 3255 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3256 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3257 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3258 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3259 DAG.getConstant(VTBits, MVT::i32)); 3260 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3261 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3262 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3263 3264 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3265 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3266 ARMcc, DAG, dl); 3267 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3268 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3269 CCR, Cmp); 3270 3271 SDValue Ops[2] = { Lo, Hi }; 3272 return DAG.getMergeValues(Ops, 2, dl); 3273} 3274 3275/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3276/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3277SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3278 SelectionDAG &DAG) const { 3279 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3280 EVT VT = Op.getValueType(); 3281 unsigned VTBits = VT.getSizeInBits(); 3282 DebugLoc dl = Op.getDebugLoc(); 3283 SDValue ShOpLo = Op.getOperand(0); 3284 SDValue ShOpHi = Op.getOperand(1); 3285 SDValue ShAmt = Op.getOperand(2); 3286 SDValue ARMcc; 3287 3288 assert(Op.getOpcode() == ISD::SHL_PARTS); 3289 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3290 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3291 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3292 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3293 DAG.getConstant(VTBits, MVT::i32)); 3294 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3295 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3296 3297 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3298 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3299 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3300 ARMcc, DAG, dl); 3301 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3302 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3303 CCR, Cmp); 3304 3305 SDValue Ops[2] = { Lo, Hi }; 3306 return DAG.getMergeValues(Ops, 2, dl); 3307} 3308 3309SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3310 SelectionDAG &DAG) const { 3311 // The rounding mode is in bits 23:22 of the FPSCR. 3312 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3313 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3314 // so that the shift + and get folded into a bitfield extract. 3315 DebugLoc dl = Op.getDebugLoc(); 3316 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3317 DAG.getConstant(Intrinsic::arm_get_fpscr, 3318 MVT::i32)); 3319 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3320 DAG.getConstant(1U << 22, MVT::i32)); 3321 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3322 DAG.getConstant(22, MVT::i32)); 3323 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3324 DAG.getConstant(3, MVT::i32)); 3325} 3326 3327static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3328 const ARMSubtarget *ST) { 3329 EVT VT = N->getValueType(0); 3330 DebugLoc dl = N->getDebugLoc(); 3331 3332 if (!ST->hasV6T2Ops()) 3333 return SDValue(); 3334 3335 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3336 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3337} 3338 3339static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3340 const ARMSubtarget *ST) { 3341 EVT VT = N->getValueType(0); 3342 DebugLoc dl = N->getDebugLoc(); 3343 3344 if (!VT.isVector()) 3345 return SDValue(); 3346 3347 // Lower vector shifts on NEON to use VSHL. 3348 assert(ST->hasNEON() && "unexpected vector shift"); 3349 3350 // Left shifts translate directly to the vshiftu intrinsic. 3351 if (N->getOpcode() == ISD::SHL) 3352 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3353 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3354 N->getOperand(0), N->getOperand(1)); 3355 3356 assert((N->getOpcode() == ISD::SRA || 3357 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3358 3359 // NEON uses the same intrinsics for both left and right shifts. For 3360 // right shifts, the shift amounts are negative, so negate the vector of 3361 // shift amounts. 3362 EVT ShiftVT = N->getOperand(1).getValueType(); 3363 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3364 getZeroVector(ShiftVT, DAG, dl), 3365 N->getOperand(1)); 3366 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3367 Intrinsic::arm_neon_vshifts : 3368 Intrinsic::arm_neon_vshiftu); 3369 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3370 DAG.getConstant(vshiftInt, MVT::i32), 3371 N->getOperand(0), NegatedCount); 3372} 3373 3374static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3375 const ARMSubtarget *ST) { 3376 EVT VT = N->getValueType(0); 3377 DebugLoc dl = N->getDebugLoc(); 3378 3379 // We can get here for a node like i32 = ISD::SHL i32, i64 3380 if (VT != MVT::i64) 3381 return SDValue(); 3382 3383 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3384 "Unknown shift to lower!"); 3385 3386 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3387 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3388 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3389 return SDValue(); 3390 3391 // If we are in thumb mode, we don't have RRX. 3392 if (ST->isThumb1Only()) return SDValue(); 3393 3394 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3395 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3396 DAG.getConstant(0, MVT::i32)); 3397 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3398 DAG.getConstant(1, MVT::i32)); 3399 3400 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3401 // captures the result into a carry flag. 3402 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3403 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3404 3405 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3406 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3407 3408 // Merge the pieces into a single i64 value. 3409 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3410} 3411 3412static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3413 SDValue TmpOp0, TmpOp1; 3414 bool Invert = false; 3415 bool Swap = false; 3416 unsigned Opc = 0; 3417 3418 SDValue Op0 = Op.getOperand(0); 3419 SDValue Op1 = Op.getOperand(1); 3420 SDValue CC = Op.getOperand(2); 3421 EVT VT = Op.getValueType(); 3422 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3423 DebugLoc dl = Op.getDebugLoc(); 3424 3425 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3426 switch (SetCCOpcode) { 3427 default: llvm_unreachable("Illegal FP comparison"); break; 3428 case ISD::SETUNE: 3429 case ISD::SETNE: Invert = true; // Fallthrough 3430 case ISD::SETOEQ: 3431 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3432 case ISD::SETOLT: 3433 case ISD::SETLT: Swap = true; // Fallthrough 3434 case ISD::SETOGT: 3435 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3436 case ISD::SETOLE: 3437 case ISD::SETLE: Swap = true; // Fallthrough 3438 case ISD::SETOGE: 3439 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3440 case ISD::SETUGE: Swap = true; // Fallthrough 3441 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3442 case ISD::SETUGT: Swap = true; // Fallthrough 3443 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3444 case ISD::SETUEQ: Invert = true; // Fallthrough 3445 case ISD::SETONE: 3446 // Expand this to (OLT | OGT). 3447 TmpOp0 = Op0; 3448 TmpOp1 = Op1; 3449 Opc = ISD::OR; 3450 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3451 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3452 break; 3453 case ISD::SETUO: Invert = true; // Fallthrough 3454 case ISD::SETO: 3455 // Expand this to (OLT | OGE). 3456 TmpOp0 = Op0; 3457 TmpOp1 = Op1; 3458 Opc = ISD::OR; 3459 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3460 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3461 break; 3462 } 3463 } else { 3464 // Integer comparisons. 3465 switch (SetCCOpcode) { 3466 default: llvm_unreachable("Illegal integer comparison"); break; 3467 case ISD::SETNE: Invert = true; 3468 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3469 case ISD::SETLT: Swap = true; 3470 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3471 case ISD::SETLE: Swap = true; 3472 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3473 case ISD::SETULT: Swap = true; 3474 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3475 case ISD::SETULE: Swap = true; 3476 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3477 } 3478 3479 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3480 if (Opc == ARMISD::VCEQ) { 3481 3482 SDValue AndOp; 3483 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3484 AndOp = Op0; 3485 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3486 AndOp = Op1; 3487 3488 // Ignore bitconvert. 3489 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3490 AndOp = AndOp.getOperand(0); 3491 3492 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3493 Opc = ARMISD::VTST; 3494 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3495 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3496 Invert = !Invert; 3497 } 3498 } 3499 } 3500 3501 if (Swap) 3502 std::swap(Op0, Op1); 3503 3504 // If one of the operands is a constant vector zero, attempt to fold the 3505 // comparison to a specialized compare-against-zero form. 3506 SDValue SingleOp; 3507 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3508 SingleOp = Op0; 3509 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3510 if (Opc == ARMISD::VCGE) 3511 Opc = ARMISD::VCLEZ; 3512 else if (Opc == ARMISD::VCGT) 3513 Opc = ARMISD::VCLTZ; 3514 SingleOp = Op1; 3515 } 3516 3517 SDValue Result; 3518 if (SingleOp.getNode()) { 3519 switch (Opc) { 3520 case ARMISD::VCEQ: 3521 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3522 case ARMISD::VCGE: 3523 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3524 case ARMISD::VCLEZ: 3525 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3526 case ARMISD::VCGT: 3527 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3528 case ARMISD::VCLTZ: 3529 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3530 default: 3531 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3532 } 3533 } else { 3534 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3535 } 3536 3537 if (Invert) 3538 Result = DAG.getNOT(dl, Result, VT); 3539 3540 return Result; 3541} 3542 3543/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3544/// valid vector constant for a NEON instruction with a "modified immediate" 3545/// operand (e.g., VMOV). If so, return the encoded value. 3546static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3547 unsigned SplatBitSize, SelectionDAG &DAG, 3548 EVT &VT, bool is128Bits, NEONModImmType type) { 3549 unsigned OpCmode, Imm; 3550 3551 // SplatBitSize is set to the smallest size that splats the vector, so a 3552 // zero vector will always have SplatBitSize == 8. However, NEON modified 3553 // immediate instructions others than VMOV do not support the 8-bit encoding 3554 // of a zero vector, and the default encoding of zero is supposed to be the 3555 // 32-bit version. 3556 if (SplatBits == 0) 3557 SplatBitSize = 32; 3558 3559 switch (SplatBitSize) { 3560 case 8: 3561 if (type != VMOVModImm) 3562 return SDValue(); 3563 // Any 1-byte value is OK. Op=0, Cmode=1110. 3564 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3565 OpCmode = 0xe; 3566 Imm = SplatBits; 3567 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3568 break; 3569 3570 case 16: 3571 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3572 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3573 if ((SplatBits & ~0xff) == 0) { 3574 // Value = 0x00nn: Op=x, Cmode=100x. 3575 OpCmode = 0x8; 3576 Imm = SplatBits; 3577 break; 3578 } 3579 if ((SplatBits & ~0xff00) == 0) { 3580 // Value = 0xnn00: Op=x, Cmode=101x. 3581 OpCmode = 0xa; 3582 Imm = SplatBits >> 8; 3583 break; 3584 } 3585 return SDValue(); 3586 3587 case 32: 3588 // NEON's 32-bit VMOV supports splat values where: 3589 // * only one byte is nonzero, or 3590 // * the least significant byte is 0xff and the second byte is nonzero, or 3591 // * the least significant 2 bytes are 0xff and the third is nonzero. 3592 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3593 if ((SplatBits & ~0xff) == 0) { 3594 // Value = 0x000000nn: Op=x, Cmode=000x. 3595 OpCmode = 0; 3596 Imm = SplatBits; 3597 break; 3598 } 3599 if ((SplatBits & ~0xff00) == 0) { 3600 // Value = 0x0000nn00: Op=x, Cmode=001x. 3601 OpCmode = 0x2; 3602 Imm = SplatBits >> 8; 3603 break; 3604 } 3605 if ((SplatBits & ~0xff0000) == 0) { 3606 // Value = 0x00nn0000: Op=x, Cmode=010x. 3607 OpCmode = 0x4; 3608 Imm = SplatBits >> 16; 3609 break; 3610 } 3611 if ((SplatBits & ~0xff000000) == 0) { 3612 // Value = 0xnn000000: Op=x, Cmode=011x. 3613 OpCmode = 0x6; 3614 Imm = SplatBits >> 24; 3615 break; 3616 } 3617 3618 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3619 if (type == OtherModImm) return SDValue(); 3620 3621 if ((SplatBits & ~0xffff) == 0 && 3622 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3623 // Value = 0x0000nnff: Op=x, Cmode=1100. 3624 OpCmode = 0xc; 3625 Imm = SplatBits >> 8; 3626 SplatBits |= 0xff; 3627 break; 3628 } 3629 3630 if ((SplatBits & ~0xffffff) == 0 && 3631 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3632 // Value = 0x00nnffff: Op=x, Cmode=1101. 3633 OpCmode = 0xd; 3634 Imm = SplatBits >> 16; 3635 SplatBits |= 0xffff; 3636 break; 3637 } 3638 3639 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3640 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3641 // VMOV.I32. A (very) minor optimization would be to replicate the value 3642 // and fall through here to test for a valid 64-bit splat. But, then the 3643 // caller would also need to check and handle the change in size. 3644 return SDValue(); 3645 3646 case 64: { 3647 if (type != VMOVModImm) 3648 return SDValue(); 3649 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3650 uint64_t BitMask = 0xff; 3651 uint64_t Val = 0; 3652 unsigned ImmMask = 1; 3653 Imm = 0; 3654 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3655 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3656 Val |= BitMask; 3657 Imm |= ImmMask; 3658 } else if ((SplatBits & BitMask) != 0) { 3659 return SDValue(); 3660 } 3661 BitMask <<= 8; 3662 ImmMask <<= 1; 3663 } 3664 // Op=1, Cmode=1110. 3665 OpCmode = 0x1e; 3666 SplatBits = Val; 3667 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3668 break; 3669 } 3670 3671 default: 3672 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3673 return SDValue(); 3674 } 3675 3676 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3677 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3678} 3679 3680static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3681 bool &ReverseVEXT, unsigned &Imm) { 3682 unsigned NumElts = VT.getVectorNumElements(); 3683 ReverseVEXT = false; 3684 3685 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3686 if (M[0] < 0) 3687 return false; 3688 3689 Imm = M[0]; 3690 3691 // If this is a VEXT shuffle, the immediate value is the index of the first 3692 // element. The other shuffle indices must be the successive elements after 3693 // the first one. 3694 unsigned ExpectedElt = Imm; 3695 for (unsigned i = 1; i < NumElts; ++i) { 3696 // Increment the expected index. If it wraps around, it may still be 3697 // a VEXT but the source vectors must be swapped. 3698 ExpectedElt += 1; 3699 if (ExpectedElt == NumElts * 2) { 3700 ExpectedElt = 0; 3701 ReverseVEXT = true; 3702 } 3703 3704 if (M[i] < 0) continue; // ignore UNDEF indices 3705 if (ExpectedElt != static_cast<unsigned>(M[i])) 3706 return false; 3707 } 3708 3709 // Adjust the index value if the source operands will be swapped. 3710 if (ReverseVEXT) 3711 Imm -= NumElts; 3712 3713 return true; 3714} 3715 3716/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3717/// instruction with the specified blocksize. (The order of the elements 3718/// within each block of the vector is reversed.) 3719static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3720 unsigned BlockSize) { 3721 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3722 "Only possible block sizes for VREV are: 16, 32, 64"); 3723 3724 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3725 if (EltSz == 64) 3726 return false; 3727 3728 unsigned NumElts = VT.getVectorNumElements(); 3729 unsigned BlockElts = M[0] + 1; 3730 // If the first shuffle index is UNDEF, be optimistic. 3731 if (M[0] < 0) 3732 BlockElts = BlockSize / EltSz; 3733 3734 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3735 return false; 3736 3737 for (unsigned i = 0; i < NumElts; ++i) { 3738 if (M[i] < 0) continue; // ignore UNDEF indices 3739 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3740 return false; 3741 } 3742 3743 return true; 3744} 3745 3746static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3747 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3748 // range, then 0 is placed into the resulting vector. So pretty much any mask 3749 // of 8 elements can work here. 3750 return VT == MVT::v8i8 && M.size() == 8; 3751} 3752 3753static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3754 unsigned &WhichResult) { 3755 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3756 if (EltSz == 64) 3757 return false; 3758 3759 unsigned NumElts = VT.getVectorNumElements(); 3760 WhichResult = (M[0] == 0 ? 0 : 1); 3761 for (unsigned i = 0; i < NumElts; i += 2) { 3762 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3763 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3764 return false; 3765 } 3766 return true; 3767} 3768 3769/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3770/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3771/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3772static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3773 unsigned &WhichResult) { 3774 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3775 if (EltSz == 64) 3776 return false; 3777 3778 unsigned NumElts = VT.getVectorNumElements(); 3779 WhichResult = (M[0] == 0 ? 0 : 1); 3780 for (unsigned i = 0; i < NumElts; i += 2) { 3781 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3782 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3783 return false; 3784 } 3785 return true; 3786} 3787 3788static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3789 unsigned &WhichResult) { 3790 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3791 if (EltSz == 64) 3792 return false; 3793 3794 unsigned NumElts = VT.getVectorNumElements(); 3795 WhichResult = (M[0] == 0 ? 0 : 1); 3796 for (unsigned i = 0; i != NumElts; ++i) { 3797 if (M[i] < 0) continue; // ignore UNDEF indices 3798 if ((unsigned) M[i] != 2 * i + WhichResult) 3799 return false; 3800 } 3801 3802 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3803 if (VT.is64BitVector() && EltSz == 32) 3804 return false; 3805 3806 return true; 3807} 3808 3809/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3810/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3811/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3812static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3813 unsigned &WhichResult) { 3814 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3815 if (EltSz == 64) 3816 return false; 3817 3818 unsigned Half = VT.getVectorNumElements() / 2; 3819 WhichResult = (M[0] == 0 ? 0 : 1); 3820 for (unsigned j = 0; j != 2; ++j) { 3821 unsigned Idx = WhichResult; 3822 for (unsigned i = 0; i != Half; ++i) { 3823 int MIdx = M[i + j * Half]; 3824 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3825 return false; 3826 Idx += 2; 3827 } 3828 } 3829 3830 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3831 if (VT.is64BitVector() && EltSz == 32) 3832 return false; 3833 3834 return true; 3835} 3836 3837static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3838 unsigned &WhichResult) { 3839 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3840 if (EltSz == 64) 3841 return false; 3842 3843 unsigned NumElts = VT.getVectorNumElements(); 3844 WhichResult = (M[0] == 0 ? 0 : 1); 3845 unsigned Idx = WhichResult * NumElts / 2; 3846 for (unsigned i = 0; i != NumElts; i += 2) { 3847 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3848 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3849 return false; 3850 Idx += 1; 3851 } 3852 3853 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3854 if (VT.is64BitVector() && EltSz == 32) 3855 return false; 3856 3857 return true; 3858} 3859 3860/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3861/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3862/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3863static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3864 unsigned &WhichResult) { 3865 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3866 if (EltSz == 64) 3867 return false; 3868 3869 unsigned NumElts = VT.getVectorNumElements(); 3870 WhichResult = (M[0] == 0 ? 0 : 1); 3871 unsigned Idx = WhichResult * NumElts / 2; 3872 for (unsigned i = 0; i != NumElts; i += 2) { 3873 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3874 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3875 return false; 3876 Idx += 1; 3877 } 3878 3879 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3880 if (VT.is64BitVector() && EltSz == 32) 3881 return false; 3882 3883 return true; 3884} 3885 3886// If N is an integer constant that can be moved into a register in one 3887// instruction, return an SDValue of such a constant (will become a MOV 3888// instruction). Otherwise return null. 3889static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3890 const ARMSubtarget *ST, DebugLoc dl) { 3891 uint64_t Val; 3892 if (!isa<ConstantSDNode>(N)) 3893 return SDValue(); 3894 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3895 3896 if (ST->isThumb1Only()) { 3897 if (Val <= 255 || ~Val <= 255) 3898 return DAG.getConstant(Val, MVT::i32); 3899 } else { 3900 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3901 return DAG.getConstant(Val, MVT::i32); 3902 } 3903 return SDValue(); 3904} 3905 3906// If this is a case we can't handle, return null and let the default 3907// expansion code take care of it. 3908SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3909 const ARMSubtarget *ST) const { 3910 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3911 DebugLoc dl = Op.getDebugLoc(); 3912 EVT VT = Op.getValueType(); 3913 3914 APInt SplatBits, SplatUndef; 3915 unsigned SplatBitSize; 3916 bool HasAnyUndefs; 3917 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3918 if (SplatBitSize <= 64) { 3919 // Check if an immediate VMOV works. 3920 EVT VmovVT; 3921 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3922 SplatUndef.getZExtValue(), SplatBitSize, 3923 DAG, VmovVT, VT.is128BitVector(), 3924 VMOVModImm); 3925 if (Val.getNode()) { 3926 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3927 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3928 } 3929 3930 // Try an immediate VMVN. 3931 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3932 ((1LL << SplatBitSize) - 1)); 3933 Val = isNEONModifiedImm(NegatedImm, 3934 SplatUndef.getZExtValue(), SplatBitSize, 3935 DAG, VmovVT, VT.is128BitVector(), 3936 VMVNModImm); 3937 if (Val.getNode()) { 3938 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3939 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3940 } 3941 } 3942 } 3943 3944 // Scan through the operands to see if only one value is used. 3945 unsigned NumElts = VT.getVectorNumElements(); 3946 bool isOnlyLowElement = true; 3947 bool usesOnlyOneValue = true; 3948 bool isConstant = true; 3949 SDValue Value; 3950 for (unsigned i = 0; i < NumElts; ++i) { 3951 SDValue V = Op.getOperand(i); 3952 if (V.getOpcode() == ISD::UNDEF) 3953 continue; 3954 if (i > 0) 3955 isOnlyLowElement = false; 3956 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3957 isConstant = false; 3958 3959 if (!Value.getNode()) 3960 Value = V; 3961 else if (V != Value) 3962 usesOnlyOneValue = false; 3963 } 3964 3965 if (!Value.getNode()) 3966 return DAG.getUNDEF(VT); 3967 3968 if (isOnlyLowElement) 3969 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3970 3971 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3972 3973 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3974 // i32 and try again. 3975 if (usesOnlyOneValue && EltSize <= 32) { 3976 if (!isConstant) 3977 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3978 if (VT.getVectorElementType().isFloatingPoint()) { 3979 SmallVector<SDValue, 8> Ops; 3980 for (unsigned i = 0; i < NumElts; ++i) 3981 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3982 Op.getOperand(i))); 3983 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3984 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3985 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3986 if (Val.getNode()) 3987 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3988 } 3989 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3990 if (Val.getNode()) 3991 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3992 } 3993 3994 // If all elements are constants and the case above didn't get hit, fall back 3995 // to the default expansion, which will generate a load from the constant 3996 // pool. 3997 if (isConstant) 3998 return SDValue(); 3999 4000 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4001 if (NumElts >= 4) { 4002 SDValue shuffle = ReconstructShuffle(Op, DAG); 4003 if (shuffle != SDValue()) 4004 return shuffle; 4005 } 4006 4007 // Vectors with 32- or 64-bit elements can be built by directly assigning 4008 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4009 // will be legalized. 4010 if (EltSize >= 32) { 4011 // Do the expansion with floating-point types, since that is what the VFP 4012 // registers are defined to use, and since i64 is not legal. 4013 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4014 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4015 SmallVector<SDValue, 8> Ops; 4016 for (unsigned i = 0; i < NumElts; ++i) 4017 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4018 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4019 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4020 } 4021 4022 return SDValue(); 4023} 4024 4025// Gather data to see if the operation can be modelled as a 4026// shuffle in combination with VEXTs. 4027SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4028 SelectionDAG &DAG) const { 4029 DebugLoc dl = Op.getDebugLoc(); 4030 EVT VT = Op.getValueType(); 4031 unsigned NumElts = VT.getVectorNumElements(); 4032 4033 SmallVector<SDValue, 2> SourceVecs; 4034 SmallVector<unsigned, 2> MinElts; 4035 SmallVector<unsigned, 2> MaxElts; 4036 4037 for (unsigned i = 0; i < NumElts; ++i) { 4038 SDValue V = Op.getOperand(i); 4039 if (V.getOpcode() == ISD::UNDEF) 4040 continue; 4041 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4042 // A shuffle can only come from building a vector from various 4043 // elements of other vectors. 4044 return SDValue(); 4045 } 4046 4047 // Record this extraction against the appropriate vector if possible... 4048 SDValue SourceVec = V.getOperand(0); 4049 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4050 bool FoundSource = false; 4051 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4052 if (SourceVecs[j] == SourceVec) { 4053 if (MinElts[j] > EltNo) 4054 MinElts[j] = EltNo; 4055 if (MaxElts[j] < EltNo) 4056 MaxElts[j] = EltNo; 4057 FoundSource = true; 4058 break; 4059 } 4060 } 4061 4062 // Or record a new source if not... 4063 if (!FoundSource) { 4064 SourceVecs.push_back(SourceVec); 4065 MinElts.push_back(EltNo); 4066 MaxElts.push_back(EltNo); 4067 } 4068 } 4069 4070 // Currently only do something sane when at most two source vectors 4071 // involved. 4072 if (SourceVecs.size() > 2) 4073 return SDValue(); 4074 4075 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4076 int VEXTOffsets[2] = {0, 0}; 4077 4078 // This loop extracts the usage patterns of the source vectors 4079 // and prepares appropriate SDValues for a shuffle if possible. 4080 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4081 if (SourceVecs[i].getValueType() == VT) { 4082 // No VEXT necessary 4083 ShuffleSrcs[i] = SourceVecs[i]; 4084 VEXTOffsets[i] = 0; 4085 continue; 4086 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4087 // It probably isn't worth padding out a smaller vector just to 4088 // break it down again in a shuffle. 4089 return SDValue(); 4090 } 4091 4092 // Since only 64-bit and 128-bit vectors are legal on ARM and 4093 // we've eliminated the other cases... 4094 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4095 "unexpected vector sizes in ReconstructShuffle"); 4096 4097 if (MaxElts[i] - MinElts[i] >= NumElts) { 4098 // Span too large for a VEXT to cope 4099 return SDValue(); 4100 } 4101 4102 if (MinElts[i] >= NumElts) { 4103 // The extraction can just take the second half 4104 VEXTOffsets[i] = NumElts; 4105 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4106 SourceVecs[i], 4107 DAG.getIntPtrConstant(NumElts)); 4108 } else if (MaxElts[i] < NumElts) { 4109 // The extraction can just take the first half 4110 VEXTOffsets[i] = 0; 4111 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4112 SourceVecs[i], 4113 DAG.getIntPtrConstant(0)); 4114 } else { 4115 // An actual VEXT is needed 4116 VEXTOffsets[i] = MinElts[i]; 4117 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4118 SourceVecs[i], 4119 DAG.getIntPtrConstant(0)); 4120 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4121 SourceVecs[i], 4122 DAG.getIntPtrConstant(NumElts)); 4123 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4124 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4125 } 4126 } 4127 4128 SmallVector<int, 8> Mask; 4129 4130 for (unsigned i = 0; i < NumElts; ++i) { 4131 SDValue Entry = Op.getOperand(i); 4132 if (Entry.getOpcode() == ISD::UNDEF) { 4133 Mask.push_back(-1); 4134 continue; 4135 } 4136 4137 SDValue ExtractVec = Entry.getOperand(0); 4138 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4139 .getOperand(1))->getSExtValue(); 4140 if (ExtractVec == SourceVecs[0]) { 4141 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4142 } else { 4143 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4144 } 4145 } 4146 4147 // Final check before we try to produce nonsense... 4148 if (isShuffleMaskLegal(Mask, VT)) 4149 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4150 &Mask[0]); 4151 4152 return SDValue(); 4153} 4154 4155/// isShuffleMaskLegal - Targets can use this to indicate that they only 4156/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4157/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4158/// are assumed to be legal. 4159bool 4160ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4161 EVT VT) const { 4162 if (VT.getVectorNumElements() == 4 && 4163 (VT.is128BitVector() || VT.is64BitVector())) { 4164 unsigned PFIndexes[4]; 4165 for (unsigned i = 0; i != 4; ++i) { 4166 if (M[i] < 0) 4167 PFIndexes[i] = 8; 4168 else 4169 PFIndexes[i] = M[i]; 4170 } 4171 4172 // Compute the index in the perfect shuffle table. 4173 unsigned PFTableIndex = 4174 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4175 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4176 unsigned Cost = (PFEntry >> 30); 4177 4178 if (Cost <= 4) 4179 return true; 4180 } 4181 4182 bool ReverseVEXT; 4183 unsigned Imm, WhichResult; 4184 4185 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4186 return (EltSize >= 32 || 4187 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4188 isVREVMask(M, VT, 64) || 4189 isVREVMask(M, VT, 32) || 4190 isVREVMask(M, VT, 16) || 4191 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4192 isVTBLMask(M, VT) || 4193 isVTRNMask(M, VT, WhichResult) || 4194 isVUZPMask(M, VT, WhichResult) || 4195 isVZIPMask(M, VT, WhichResult) || 4196 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4197 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4198 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4199} 4200 4201/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4202/// the specified operations to build the shuffle. 4203static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4204 SDValue RHS, SelectionDAG &DAG, 4205 DebugLoc dl) { 4206 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4207 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4208 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4209 4210 enum { 4211 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4212 OP_VREV, 4213 OP_VDUP0, 4214 OP_VDUP1, 4215 OP_VDUP2, 4216 OP_VDUP3, 4217 OP_VEXT1, 4218 OP_VEXT2, 4219 OP_VEXT3, 4220 OP_VUZPL, // VUZP, left result 4221 OP_VUZPR, // VUZP, right result 4222 OP_VZIPL, // VZIP, left result 4223 OP_VZIPR, // VZIP, right result 4224 OP_VTRNL, // VTRN, left result 4225 OP_VTRNR // VTRN, right result 4226 }; 4227 4228 if (OpNum == OP_COPY) { 4229 if (LHSID == (1*9+2)*9+3) return LHS; 4230 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4231 return RHS; 4232 } 4233 4234 SDValue OpLHS, OpRHS; 4235 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4236 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4237 EVT VT = OpLHS.getValueType(); 4238 4239 switch (OpNum) { 4240 default: llvm_unreachable("Unknown shuffle opcode!"); 4241 case OP_VREV: 4242 // VREV divides the vector in half and swaps within the half. 4243 if (VT.getVectorElementType() == MVT::i32 || 4244 VT.getVectorElementType() == MVT::f32) 4245 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4246 // vrev <4 x i16> -> VREV32 4247 if (VT.getVectorElementType() == MVT::i16) 4248 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4249 // vrev <4 x i8> -> VREV16 4250 assert(VT.getVectorElementType() == MVT::i8); 4251 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4252 case OP_VDUP0: 4253 case OP_VDUP1: 4254 case OP_VDUP2: 4255 case OP_VDUP3: 4256 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4257 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4258 case OP_VEXT1: 4259 case OP_VEXT2: 4260 case OP_VEXT3: 4261 return DAG.getNode(ARMISD::VEXT, dl, VT, 4262 OpLHS, OpRHS, 4263 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4264 case OP_VUZPL: 4265 case OP_VUZPR: 4266 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4267 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4268 case OP_VZIPL: 4269 case OP_VZIPR: 4270 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4271 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4272 case OP_VTRNL: 4273 case OP_VTRNR: 4274 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4275 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4276 } 4277} 4278 4279static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4280 SmallVectorImpl<int> &ShuffleMask, 4281 SelectionDAG &DAG) { 4282 // Check to see if we can use the VTBL instruction. 4283 SDValue V1 = Op.getOperand(0); 4284 SDValue V2 = Op.getOperand(1); 4285 DebugLoc DL = Op.getDebugLoc(); 4286 4287 SmallVector<SDValue, 8> VTBLMask; 4288 for (SmallVectorImpl<int>::iterator 4289 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4290 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4291 4292 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4293 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4294 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4295 &VTBLMask[0], 8)); 4296 4297 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4298 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4299 &VTBLMask[0], 8)); 4300} 4301 4302static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4303 SDValue V1 = Op.getOperand(0); 4304 SDValue V2 = Op.getOperand(1); 4305 DebugLoc dl = Op.getDebugLoc(); 4306 EVT VT = Op.getValueType(); 4307 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4308 SmallVector<int, 8> ShuffleMask; 4309 4310 // Convert shuffles that are directly supported on NEON to target-specific 4311 // DAG nodes, instead of keeping them as shuffles and matching them again 4312 // during code selection. This is more efficient and avoids the possibility 4313 // of inconsistencies between legalization and selection. 4314 // FIXME: floating-point vectors should be canonicalized to integer vectors 4315 // of the same time so that they get CSEd properly. 4316 SVN->getMask(ShuffleMask); 4317 4318 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4319 if (EltSize <= 32) { 4320 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4321 int Lane = SVN->getSplatIndex(); 4322 // If this is undef splat, generate it via "just" vdup, if possible. 4323 if (Lane == -1) Lane = 0; 4324 4325 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4326 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4327 } 4328 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4329 DAG.getConstant(Lane, MVT::i32)); 4330 } 4331 4332 bool ReverseVEXT; 4333 unsigned Imm; 4334 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4335 if (ReverseVEXT) 4336 std::swap(V1, V2); 4337 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4338 DAG.getConstant(Imm, MVT::i32)); 4339 } 4340 4341 if (isVREVMask(ShuffleMask, VT, 64)) 4342 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4343 if (isVREVMask(ShuffleMask, VT, 32)) 4344 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4345 if (isVREVMask(ShuffleMask, VT, 16)) 4346 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4347 4348 // Check for Neon shuffles that modify both input vectors in place. 4349 // If both results are used, i.e., if there are two shuffles with the same 4350 // source operands and with masks corresponding to both results of one of 4351 // these operations, DAG memoization will ensure that a single node is 4352 // used for both shuffles. 4353 unsigned WhichResult; 4354 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4355 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4356 V1, V2).getValue(WhichResult); 4357 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4358 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4359 V1, V2).getValue(WhichResult); 4360 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4361 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4362 V1, V2).getValue(WhichResult); 4363 4364 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4365 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4366 V1, V1).getValue(WhichResult); 4367 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4368 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4369 V1, V1).getValue(WhichResult); 4370 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4371 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4372 V1, V1).getValue(WhichResult); 4373 } 4374 4375 // If the shuffle is not directly supported and it has 4 elements, use 4376 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4377 unsigned NumElts = VT.getVectorNumElements(); 4378 if (NumElts == 4) { 4379 unsigned PFIndexes[4]; 4380 for (unsigned i = 0; i != 4; ++i) { 4381 if (ShuffleMask[i] < 0) 4382 PFIndexes[i] = 8; 4383 else 4384 PFIndexes[i] = ShuffleMask[i]; 4385 } 4386 4387 // Compute the index in the perfect shuffle table. 4388 unsigned PFTableIndex = 4389 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4390 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4391 unsigned Cost = (PFEntry >> 30); 4392 4393 if (Cost <= 4) 4394 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4395 } 4396 4397 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4398 if (EltSize >= 32) { 4399 // Do the expansion with floating-point types, since that is what the VFP 4400 // registers are defined to use, and since i64 is not legal. 4401 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4402 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4403 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4404 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4405 SmallVector<SDValue, 8> Ops; 4406 for (unsigned i = 0; i < NumElts; ++i) { 4407 if (ShuffleMask[i] < 0) 4408 Ops.push_back(DAG.getUNDEF(EltVT)); 4409 else 4410 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4411 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4412 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4413 MVT::i32))); 4414 } 4415 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4416 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4417 } 4418 4419 if (VT == MVT::v8i8) { 4420 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4421 if (NewOp.getNode()) 4422 return NewOp; 4423 } 4424 4425 return SDValue(); 4426} 4427 4428static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4429 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4430 SDValue Lane = Op.getOperand(1); 4431 if (!isa<ConstantSDNode>(Lane)) 4432 return SDValue(); 4433 4434 SDValue Vec = Op.getOperand(0); 4435 if (Op.getValueType() == MVT::i32 && 4436 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4437 DebugLoc dl = Op.getDebugLoc(); 4438 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4439 } 4440 4441 return Op; 4442} 4443 4444static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4445 // The only time a CONCAT_VECTORS operation can have legal types is when 4446 // two 64-bit vectors are concatenated to a 128-bit vector. 4447 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4448 "unexpected CONCAT_VECTORS"); 4449 DebugLoc dl = Op.getDebugLoc(); 4450 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4451 SDValue Op0 = Op.getOperand(0); 4452 SDValue Op1 = Op.getOperand(1); 4453 if (Op0.getOpcode() != ISD::UNDEF) 4454 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4455 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4456 DAG.getIntPtrConstant(0)); 4457 if (Op1.getOpcode() != ISD::UNDEF) 4458 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4459 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4460 DAG.getIntPtrConstant(1)); 4461 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4462} 4463 4464/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4465/// element has been zero/sign-extended, depending on the isSigned parameter, 4466/// from an integer type half its size. 4467static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4468 bool isSigned) { 4469 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4470 EVT VT = N->getValueType(0); 4471 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4472 SDNode *BVN = N->getOperand(0).getNode(); 4473 if (BVN->getValueType(0) != MVT::v4i32 || 4474 BVN->getOpcode() != ISD::BUILD_VECTOR) 4475 return false; 4476 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4477 unsigned HiElt = 1 - LoElt; 4478 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4479 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4480 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4481 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4482 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4483 return false; 4484 if (isSigned) { 4485 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4486 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4487 return true; 4488 } else { 4489 if (Hi0->isNullValue() && Hi1->isNullValue()) 4490 return true; 4491 } 4492 return false; 4493 } 4494 4495 if (N->getOpcode() != ISD::BUILD_VECTOR) 4496 return false; 4497 4498 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4499 SDNode *Elt = N->getOperand(i).getNode(); 4500 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4501 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4502 unsigned HalfSize = EltSize / 2; 4503 if (isSigned) { 4504 int64_t SExtVal = C->getSExtValue(); 4505 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 4506 return false; 4507 } else { 4508 if ((C->getZExtValue() >> HalfSize) != 0) 4509 return false; 4510 } 4511 continue; 4512 } 4513 return false; 4514 } 4515 4516 return true; 4517} 4518 4519/// isSignExtended - Check if a node is a vector value that is sign-extended 4520/// or a constant BUILD_VECTOR with sign-extended elements. 4521static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4522 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4523 return true; 4524 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4525 return true; 4526 return false; 4527} 4528 4529/// isZeroExtended - Check if a node is a vector value that is zero-extended 4530/// or a constant BUILD_VECTOR with zero-extended elements. 4531static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4532 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4533 return true; 4534 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4535 return true; 4536 return false; 4537} 4538 4539/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4540/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4541static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4542 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4543 return N->getOperand(0); 4544 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4545 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4546 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4547 LD->isNonTemporal(), LD->getAlignment()); 4548 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4549 // have been legalized as a BITCAST from v4i32. 4550 if (N->getOpcode() == ISD::BITCAST) { 4551 SDNode *BVN = N->getOperand(0).getNode(); 4552 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4553 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4554 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4555 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4556 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4557 } 4558 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4559 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4560 EVT VT = N->getValueType(0); 4561 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4562 unsigned NumElts = VT.getVectorNumElements(); 4563 MVT TruncVT = MVT::getIntegerVT(EltSize); 4564 SmallVector<SDValue, 8> Ops; 4565 for (unsigned i = 0; i != NumElts; ++i) { 4566 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4567 const APInt &CInt = C->getAPIntValue(); 4568 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4569 } 4570 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4571 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4572} 4573 4574static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4575 unsigned Opcode = N->getOpcode(); 4576 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4577 SDNode *N0 = N->getOperand(0).getNode(); 4578 SDNode *N1 = N->getOperand(1).getNode(); 4579 return N0->hasOneUse() && N1->hasOneUse() && 4580 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4581 } 4582 return false; 4583} 4584 4585static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4586 unsigned Opcode = N->getOpcode(); 4587 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4588 SDNode *N0 = N->getOperand(0).getNode(); 4589 SDNode *N1 = N->getOperand(1).getNode(); 4590 return N0->hasOneUse() && N1->hasOneUse() && 4591 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4592 } 4593 return false; 4594} 4595 4596static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4597 // Multiplications are only custom-lowered for 128-bit vectors so that 4598 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4599 EVT VT = Op.getValueType(); 4600 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4601 SDNode *N0 = Op.getOperand(0).getNode(); 4602 SDNode *N1 = Op.getOperand(1).getNode(); 4603 unsigned NewOpc = 0; 4604 bool isMLA = false; 4605 bool isN0SExt = isSignExtended(N0, DAG); 4606 bool isN1SExt = isSignExtended(N1, DAG); 4607 if (isN0SExt && isN1SExt) 4608 NewOpc = ARMISD::VMULLs; 4609 else { 4610 bool isN0ZExt = isZeroExtended(N0, DAG); 4611 bool isN1ZExt = isZeroExtended(N1, DAG); 4612 if (isN0ZExt && isN1ZExt) 4613 NewOpc = ARMISD::VMULLu; 4614 else if (isN1SExt || isN1ZExt) { 4615 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4616 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4617 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4618 NewOpc = ARMISD::VMULLs; 4619 isMLA = true; 4620 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4621 NewOpc = ARMISD::VMULLu; 4622 isMLA = true; 4623 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4624 std::swap(N0, N1); 4625 NewOpc = ARMISD::VMULLu; 4626 isMLA = true; 4627 } 4628 } 4629 4630 if (!NewOpc) { 4631 if (VT == MVT::v2i64) 4632 // Fall through to expand this. It is not legal. 4633 return SDValue(); 4634 else 4635 // Other vector multiplications are legal. 4636 return Op; 4637 } 4638 } 4639 4640 // Legalize to a VMULL instruction. 4641 DebugLoc DL = Op.getDebugLoc(); 4642 SDValue Op0; 4643 SDValue Op1 = SkipExtension(N1, DAG); 4644 if (!isMLA) { 4645 Op0 = SkipExtension(N0, DAG); 4646 assert(Op0.getValueType().is64BitVector() && 4647 Op1.getValueType().is64BitVector() && 4648 "unexpected types for extended operands to VMULL"); 4649 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4650 } 4651 4652 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4653 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4654 // vmull q0, d4, d6 4655 // vmlal q0, d5, d6 4656 // is faster than 4657 // vaddl q0, d4, d5 4658 // vmovl q1, d6 4659 // vmul q0, q0, q1 4660 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4661 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4662 EVT Op1VT = Op1.getValueType(); 4663 return DAG.getNode(N0->getOpcode(), DL, VT, 4664 DAG.getNode(NewOpc, DL, VT, 4665 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4666 DAG.getNode(NewOpc, DL, VT, 4667 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4668} 4669 4670static SDValue 4671LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4672 // Convert to float 4673 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4674 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4675 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4676 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4677 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4678 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4679 // Get reciprocal estimate. 4680 // float4 recip = vrecpeq_f32(yf); 4681 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4682 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4683 // Because char has a smaller range than uchar, we can actually get away 4684 // without any newton steps. This requires that we use a weird bias 4685 // of 0xb000, however (again, this has been exhaustively tested). 4686 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4687 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4688 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4689 Y = DAG.getConstant(0xb000, MVT::i32); 4690 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4691 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4692 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4693 // Convert back to short. 4694 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4695 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4696 return X; 4697} 4698 4699static SDValue 4700LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4701 SDValue N2; 4702 // Convert to float. 4703 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4704 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4705 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4706 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4707 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4708 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4709 4710 // Use reciprocal estimate and one refinement step. 4711 // float4 recip = vrecpeq_f32(yf); 4712 // recip *= vrecpsq_f32(yf, recip); 4713 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4714 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4715 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4716 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4717 N1, N2); 4718 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4719 // Because short has a smaller range than ushort, we can actually get away 4720 // with only a single newton step. This requires that we use a weird bias 4721 // of 89, however (again, this has been exhaustively tested). 4722 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 4723 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4724 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4725 N1 = DAG.getConstant(0x89, MVT::i32); 4726 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4727 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4728 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4729 // Convert back to integer and return. 4730 // return vmovn_s32(vcvt_s32_f32(result)); 4731 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4732 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4733 return N0; 4734} 4735 4736static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4737 EVT VT = Op.getValueType(); 4738 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4739 "unexpected type for custom-lowering ISD::SDIV"); 4740 4741 DebugLoc dl = Op.getDebugLoc(); 4742 SDValue N0 = Op.getOperand(0); 4743 SDValue N1 = Op.getOperand(1); 4744 SDValue N2, N3; 4745 4746 if (VT == MVT::v8i8) { 4747 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4748 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4749 4750 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4751 DAG.getIntPtrConstant(4)); 4752 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4753 DAG.getIntPtrConstant(4)); 4754 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4755 DAG.getIntPtrConstant(0)); 4756 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4757 DAG.getIntPtrConstant(0)); 4758 4759 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4760 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4761 4762 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4763 N0 = LowerCONCAT_VECTORS(N0, DAG); 4764 4765 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4766 return N0; 4767 } 4768 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4769} 4770 4771static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4772 EVT VT = Op.getValueType(); 4773 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4774 "unexpected type for custom-lowering ISD::UDIV"); 4775 4776 DebugLoc dl = Op.getDebugLoc(); 4777 SDValue N0 = Op.getOperand(0); 4778 SDValue N1 = Op.getOperand(1); 4779 SDValue N2, N3; 4780 4781 if (VT == MVT::v8i8) { 4782 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4783 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4784 4785 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4786 DAG.getIntPtrConstant(4)); 4787 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4788 DAG.getIntPtrConstant(4)); 4789 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4790 DAG.getIntPtrConstant(0)); 4791 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4792 DAG.getIntPtrConstant(0)); 4793 4794 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4795 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4796 4797 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4798 N0 = LowerCONCAT_VECTORS(N0, DAG); 4799 4800 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4801 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4802 N0); 4803 return N0; 4804 } 4805 4806 // v4i16 sdiv ... Convert to float. 4807 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4808 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4809 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4810 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4811 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4812 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4813 4814 // Use reciprocal estimate and two refinement steps. 4815 // float4 recip = vrecpeq_f32(yf); 4816 // recip *= vrecpsq_f32(yf, recip); 4817 // recip *= vrecpsq_f32(yf, recip); 4818 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4819 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 4820 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4821 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4822 BN1, N2); 4823 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4824 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4825 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4826 BN1, N2); 4827 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4828 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4829 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4830 // and that it will never cause us to return an answer too large). 4831 // float4 result = as_float4(as_int4(xf*recip) + 2); 4832 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4833 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4834 N1 = DAG.getConstant(2, MVT::i32); 4835 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4836 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4837 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4838 // Convert back to integer and return. 4839 // return vmovn_u32(vcvt_s32_f32(result)); 4840 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4841 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4842 return N0; 4843} 4844 4845static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 4846 EVT VT = Op.getNode()->getValueType(0); 4847 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4848 4849 unsigned Opc; 4850 bool ExtraOp = false; 4851 switch (Op.getOpcode()) { 4852 default: assert(0 && "Invalid code"); 4853 case ISD::ADDC: Opc = ARMISD::ADDC; break; 4854 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 4855 case ISD::SUBC: Opc = ARMISD::SUBC; break; 4856 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 4857 } 4858 4859 if (!ExtraOp) 4860 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4861 Op.getOperand(1)); 4862 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4863 Op.getOperand(1), Op.getOperand(2)); 4864} 4865 4866static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 4867 // Monotonic load/store is legal for all targets 4868 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 4869 return Op; 4870 4871 // Aquire/Release load/store is not legal for targets without a 4872 // dmb or equivalent available. 4873 return SDValue(); 4874} 4875 4876 4877static void 4878ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 4879 SelectionDAG &DAG, unsigned NewOp) { 4880 EVT T = Node->getValueType(0); 4881 DebugLoc dl = Node->getDebugLoc(); 4882 assert (T == MVT::i64 && "Only know how to expand i64 atomics"); 4883 4884 SmallVector<SDValue, 6> Ops; 4885 Ops.push_back(Node->getOperand(0)); // Chain 4886 Ops.push_back(Node->getOperand(1)); // Ptr 4887 // Low part of Val1 4888 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4889 Node->getOperand(2), DAG.getIntPtrConstant(0))); 4890 // High part of Val1 4891 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4892 Node->getOperand(2), DAG.getIntPtrConstant(1))); 4893 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 4894 // High part of Val1 4895 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4896 Node->getOperand(3), DAG.getIntPtrConstant(0))); 4897 // High part of Val2 4898 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4899 Node->getOperand(3), DAG.getIntPtrConstant(1))); 4900 } 4901 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4902 SDValue Result = 4903 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 4904 cast<MemSDNode>(Node)->getMemOperand()); 4905 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 4906 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 4907 Results.push_back(Result.getValue(2)); 4908} 4909 4910SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4911 switch (Op.getOpcode()) { 4912 default: llvm_unreachable("Don't know how to custom lower this!"); 4913 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4914 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4915 case ISD::GlobalAddress: 4916 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4917 LowerGlobalAddressELF(Op, DAG); 4918 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4919 case ISD::SELECT: return LowerSELECT(Op, DAG); 4920 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4921 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4922 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4923 case ISD::VASTART: return LowerVASTART(Op, DAG); 4924 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4925 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 4926 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4927 case ISD::SINT_TO_FP: 4928 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4929 case ISD::FP_TO_SINT: 4930 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4931 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4932 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4933 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4934 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4935 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4936 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4937 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4938 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4939 Subtarget); 4940 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4941 case ISD::SHL: 4942 case ISD::SRL: 4943 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4944 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4945 case ISD::SRL_PARTS: 4946 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4947 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4948 case ISD::SETCC: return LowerVSETCC(Op, DAG); 4949 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4950 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4951 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4952 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4953 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4954 case ISD::MUL: return LowerMUL(Op, DAG); 4955 case ISD::SDIV: return LowerSDIV(Op, DAG); 4956 case ISD::UDIV: return LowerUDIV(Op, DAG); 4957 case ISD::ADDC: 4958 case ISD::ADDE: 4959 case ISD::SUBC: 4960 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 4961 case ISD::ATOMIC_LOAD: 4962 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 4963 } 4964 return SDValue(); 4965} 4966 4967/// ReplaceNodeResults - Replace the results of node with an illegal result 4968/// type with new values built out of custom code. 4969void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4970 SmallVectorImpl<SDValue>&Results, 4971 SelectionDAG &DAG) const { 4972 SDValue Res; 4973 switch (N->getOpcode()) { 4974 default: 4975 llvm_unreachable("Don't know how to custom expand this!"); 4976 break; 4977 case ISD::BITCAST: 4978 Res = ExpandBITCAST(N, DAG); 4979 break; 4980 case ISD::SRL: 4981 case ISD::SRA: 4982 Res = Expand64BitShift(N, DAG, Subtarget); 4983 break; 4984 case ISD::ATOMIC_LOAD_ADD: 4985 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 4986 return; 4987 case ISD::ATOMIC_LOAD_AND: 4988 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 4989 return; 4990 case ISD::ATOMIC_LOAD_NAND: 4991 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 4992 return; 4993 case ISD::ATOMIC_LOAD_OR: 4994 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 4995 return; 4996 case ISD::ATOMIC_LOAD_SUB: 4997 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 4998 return; 4999 case ISD::ATOMIC_LOAD_XOR: 5000 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5001 return; 5002 case ISD::ATOMIC_SWAP: 5003 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5004 return; 5005 case ISD::ATOMIC_CMP_SWAP: 5006 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5007 return; 5008 } 5009 if (Res.getNode()) 5010 Results.push_back(Res); 5011} 5012 5013//===----------------------------------------------------------------------===// 5014// ARM Scheduler Hooks 5015//===----------------------------------------------------------------------===// 5016 5017MachineBasicBlock * 5018ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5019 MachineBasicBlock *BB, 5020 unsigned Size) const { 5021 unsigned dest = MI->getOperand(0).getReg(); 5022 unsigned ptr = MI->getOperand(1).getReg(); 5023 unsigned oldval = MI->getOperand(2).getReg(); 5024 unsigned newval = MI->getOperand(3).getReg(); 5025 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5026 DebugLoc dl = MI->getDebugLoc(); 5027 bool isThumb2 = Subtarget->isThumb2(); 5028 5029 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5030 unsigned scratch = 5031 MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass 5032 : ARM::GPRRegisterClass); 5033 5034 if (isThumb2) { 5035 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5036 MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass); 5037 MRI.constrainRegClass(newval, ARM::rGPRRegisterClass); 5038 } 5039 5040 unsigned ldrOpc, strOpc; 5041 switch (Size) { 5042 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5043 case 1: 5044 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5045 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5046 break; 5047 case 2: 5048 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5049 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5050 break; 5051 case 4: 5052 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5053 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5054 break; 5055 } 5056 5057 MachineFunction *MF = BB->getParent(); 5058 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5059 MachineFunction::iterator It = BB; 5060 ++It; // insert the new blocks after the current block 5061 5062 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5063 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5064 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5065 MF->insert(It, loop1MBB); 5066 MF->insert(It, loop2MBB); 5067 MF->insert(It, exitMBB); 5068 5069 // Transfer the remainder of BB and its successor edges to exitMBB. 5070 exitMBB->splice(exitMBB->begin(), BB, 5071 llvm::next(MachineBasicBlock::iterator(MI)), 5072 BB->end()); 5073 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5074 5075 // thisMBB: 5076 // ... 5077 // fallthrough --> loop1MBB 5078 BB->addSuccessor(loop1MBB); 5079 5080 // loop1MBB: 5081 // ldrex dest, [ptr] 5082 // cmp dest, oldval 5083 // bne exitMBB 5084 BB = loop1MBB; 5085 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5086 if (ldrOpc == ARM::t2LDREX) 5087 MIB.addImm(0); 5088 AddDefaultPred(MIB); 5089 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5090 .addReg(dest).addReg(oldval)); 5091 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5092 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5093 BB->addSuccessor(loop2MBB); 5094 BB->addSuccessor(exitMBB); 5095 5096 // loop2MBB: 5097 // strex scratch, newval, [ptr] 5098 // cmp scratch, #0 5099 // bne loop1MBB 5100 BB = loop2MBB; 5101 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5102 if (strOpc == ARM::t2STREX) 5103 MIB.addImm(0); 5104 AddDefaultPred(MIB); 5105 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5106 .addReg(scratch).addImm(0)); 5107 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5108 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5109 BB->addSuccessor(loop1MBB); 5110 BB->addSuccessor(exitMBB); 5111 5112 // exitMBB: 5113 // ... 5114 BB = exitMBB; 5115 5116 MI->eraseFromParent(); // The instruction is gone now. 5117 5118 return BB; 5119} 5120 5121MachineBasicBlock * 5122ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5123 unsigned Size, unsigned BinOpcode) const { 5124 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5125 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5126 5127 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5128 MachineFunction *MF = BB->getParent(); 5129 MachineFunction::iterator It = BB; 5130 ++It; 5131 5132 unsigned dest = MI->getOperand(0).getReg(); 5133 unsigned ptr = MI->getOperand(1).getReg(); 5134 unsigned incr = MI->getOperand(2).getReg(); 5135 DebugLoc dl = MI->getDebugLoc(); 5136 bool isThumb2 = Subtarget->isThumb2(); 5137 5138 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5139 if (isThumb2) { 5140 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5141 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5142 } 5143 5144 unsigned ldrOpc, strOpc; 5145 switch (Size) { 5146 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5147 case 1: 5148 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5149 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5150 break; 5151 case 2: 5152 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5153 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5154 break; 5155 case 4: 5156 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5157 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5158 break; 5159 } 5160 5161 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5162 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5163 MF->insert(It, loopMBB); 5164 MF->insert(It, exitMBB); 5165 5166 // Transfer the remainder of BB and its successor edges to exitMBB. 5167 exitMBB->splice(exitMBB->begin(), BB, 5168 llvm::next(MachineBasicBlock::iterator(MI)), 5169 BB->end()); 5170 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5171 5172 TargetRegisterClass *TRC = 5173 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5174 unsigned scratch = MRI.createVirtualRegister(TRC); 5175 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5176 5177 // thisMBB: 5178 // ... 5179 // fallthrough --> loopMBB 5180 BB->addSuccessor(loopMBB); 5181 5182 // loopMBB: 5183 // ldrex dest, ptr 5184 // <binop> scratch2, dest, incr 5185 // strex scratch, scratch2, ptr 5186 // cmp scratch, #0 5187 // bne- loopMBB 5188 // fallthrough --> exitMBB 5189 BB = loopMBB; 5190 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5191 if (ldrOpc == ARM::t2LDREX) 5192 MIB.addImm(0); 5193 AddDefaultPred(MIB); 5194 if (BinOpcode) { 5195 // operand order needs to go the other way for NAND 5196 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5197 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5198 addReg(incr).addReg(dest)).addReg(0); 5199 else 5200 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5201 addReg(dest).addReg(incr)).addReg(0); 5202 } 5203 5204 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5205 if (strOpc == ARM::t2STREX) 5206 MIB.addImm(0); 5207 AddDefaultPred(MIB); 5208 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5209 .addReg(scratch).addImm(0)); 5210 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5211 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5212 5213 BB->addSuccessor(loopMBB); 5214 BB->addSuccessor(exitMBB); 5215 5216 // exitMBB: 5217 // ... 5218 BB = exitMBB; 5219 5220 MI->eraseFromParent(); // The instruction is gone now. 5221 5222 return BB; 5223} 5224 5225MachineBasicBlock * 5226ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5227 MachineBasicBlock *BB, 5228 unsigned Size, 5229 bool signExtend, 5230 ARMCC::CondCodes Cond) const { 5231 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5232 5233 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5234 MachineFunction *MF = BB->getParent(); 5235 MachineFunction::iterator It = BB; 5236 ++It; 5237 5238 unsigned dest = MI->getOperand(0).getReg(); 5239 unsigned ptr = MI->getOperand(1).getReg(); 5240 unsigned incr = MI->getOperand(2).getReg(); 5241 unsigned oldval = dest; 5242 DebugLoc dl = MI->getDebugLoc(); 5243 bool isThumb2 = Subtarget->isThumb2(); 5244 5245 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5246 if (isThumb2) { 5247 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5248 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5249 } 5250 5251 unsigned ldrOpc, strOpc, extendOpc; 5252 switch (Size) { 5253 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5254 case 1: 5255 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5256 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5257 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 5258 break; 5259 case 2: 5260 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5261 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5262 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 5263 break; 5264 case 4: 5265 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5266 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5267 extendOpc = 0; 5268 break; 5269 } 5270 5271 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5272 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5273 MF->insert(It, loopMBB); 5274 MF->insert(It, exitMBB); 5275 5276 // Transfer the remainder of BB and its successor edges to exitMBB. 5277 exitMBB->splice(exitMBB->begin(), BB, 5278 llvm::next(MachineBasicBlock::iterator(MI)), 5279 BB->end()); 5280 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5281 5282 TargetRegisterClass *TRC = 5283 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5284 unsigned scratch = MRI.createVirtualRegister(TRC); 5285 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5286 5287 // thisMBB: 5288 // ... 5289 // fallthrough --> loopMBB 5290 BB->addSuccessor(loopMBB); 5291 5292 // loopMBB: 5293 // ldrex dest, ptr 5294 // (sign extend dest, if required) 5295 // cmp dest, incr 5296 // cmov.cond scratch2, dest, incr 5297 // strex scratch, scratch2, ptr 5298 // cmp scratch, #0 5299 // bne- loopMBB 5300 // fallthrough --> exitMBB 5301 BB = loopMBB; 5302 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5303 if (ldrOpc == ARM::t2LDREX) 5304 MIB.addImm(0); 5305 AddDefaultPred(MIB); 5306 5307 // Sign extend the value, if necessary. 5308 if (signExtend && extendOpc) { 5309 oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass); 5310 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 5311 .addReg(dest) 5312 .addImm(0)); 5313 } 5314 5315 // Build compare and cmov instructions. 5316 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5317 .addReg(oldval).addReg(incr)); 5318 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5319 .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); 5320 5321 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5322 if (strOpc == ARM::t2STREX) 5323 MIB.addImm(0); 5324 AddDefaultPred(MIB); 5325 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5326 .addReg(scratch).addImm(0)); 5327 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5328 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5329 5330 BB->addSuccessor(loopMBB); 5331 BB->addSuccessor(exitMBB); 5332 5333 // exitMBB: 5334 // ... 5335 BB = exitMBB; 5336 5337 MI->eraseFromParent(); // The instruction is gone now. 5338 5339 return BB; 5340} 5341 5342MachineBasicBlock * 5343ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 5344 unsigned Op1, unsigned Op2, 5345 bool NeedsCarry, bool IsCmpxchg) const { 5346 // This also handles ATOMIC_SWAP, indicated by Op1==0. 5347 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5348 5349 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5350 MachineFunction *MF = BB->getParent(); 5351 MachineFunction::iterator It = BB; 5352 ++It; 5353 5354 unsigned destlo = MI->getOperand(0).getReg(); 5355 unsigned desthi = MI->getOperand(1).getReg(); 5356 unsigned ptr = MI->getOperand(2).getReg(); 5357 unsigned vallo = MI->getOperand(3).getReg(); 5358 unsigned valhi = MI->getOperand(4).getReg(); 5359 DebugLoc dl = MI->getDebugLoc(); 5360 bool isThumb2 = Subtarget->isThumb2(); 5361 5362 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5363 if (isThumb2) { 5364 MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass); 5365 MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass); 5366 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5367 } 5368 5369 unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; 5370 unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD; 5371 5372 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5373 MachineBasicBlock *contBB = 0, *cont2BB = 0; 5374 if (IsCmpxchg) { 5375 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 5376 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 5377 } 5378 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5379 MF->insert(It, loopMBB); 5380 if (IsCmpxchg) { 5381 MF->insert(It, contBB); 5382 MF->insert(It, cont2BB); 5383 } 5384 MF->insert(It, exitMBB); 5385 5386 // Transfer the remainder of BB and its successor edges to exitMBB. 5387 exitMBB->splice(exitMBB->begin(), BB, 5388 llvm::next(MachineBasicBlock::iterator(MI)), 5389 BB->end()); 5390 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5391 5392 TargetRegisterClass *TRC = 5393 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5394 unsigned storesuccess = MRI.createVirtualRegister(TRC); 5395 5396 // thisMBB: 5397 // ... 5398 // fallthrough --> loopMBB 5399 BB->addSuccessor(loopMBB); 5400 5401 // loopMBB: 5402 // ldrexd r2, r3, ptr 5403 // <binopa> r0, r2, incr 5404 // <binopb> r1, r3, incr 5405 // strexd storesuccess, r0, r1, ptr 5406 // cmp storesuccess, #0 5407 // bne- loopMBB 5408 // fallthrough --> exitMBB 5409 // 5410 // Note that the registers are explicitly specified because there is not any 5411 // way to force the register allocator to allocate a register pair. 5412 // 5413 // FIXME: The hardcoded registers are not necessary for Thumb2, but we 5414 // need to properly enforce the restriction that the two output registers 5415 // for ldrexd must be different. 5416 BB = loopMBB; 5417 // Load 5418 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 5419 .addReg(ARM::R2, RegState::Define) 5420 .addReg(ARM::R3, RegState::Define).addReg(ptr)); 5421 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 5422 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2); 5423 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3); 5424 5425 if (IsCmpxchg) { 5426 // Add early exit 5427 for (unsigned i = 0; i < 2; i++) { 5428 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 5429 ARM::CMPrr)) 5430 .addReg(i == 0 ? destlo : desthi) 5431 .addReg(i == 0 ? vallo : valhi)); 5432 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5433 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5434 BB->addSuccessor(exitMBB); 5435 BB->addSuccessor(i == 0 ? contBB : cont2BB); 5436 BB = (i == 0 ? contBB : cont2BB); 5437 } 5438 5439 // Copy to physregs for strexd 5440 unsigned setlo = MI->getOperand(5).getReg(); 5441 unsigned sethi = MI->getOperand(6).getReg(); 5442 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo); 5443 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi); 5444 } else if (Op1) { 5445 // Perform binary operation 5446 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0) 5447 .addReg(destlo).addReg(vallo)) 5448 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 5449 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1) 5450 .addReg(desthi).addReg(valhi)).addReg(0); 5451 } else { 5452 // Copy to physregs for strexd 5453 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo); 5454 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi); 5455 } 5456 5457 // Store 5458 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 5459 .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr)); 5460 // Cmp+jump 5461 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5462 .addReg(storesuccess).addImm(0)); 5463 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5464 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5465 5466 BB->addSuccessor(loopMBB); 5467 BB->addSuccessor(exitMBB); 5468 5469 // exitMBB: 5470 // ... 5471 BB = exitMBB; 5472 5473 MI->eraseFromParent(); // The instruction is gone now. 5474 5475 return BB; 5476} 5477 5478static 5479MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 5480 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 5481 E = MBB->succ_end(); I != E; ++I) 5482 if (*I != Succ) 5483 return *I; 5484 llvm_unreachable("Expecting a BB with two successors!"); 5485} 5486 5487MachineBasicBlock * 5488ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5489 MachineBasicBlock *BB) const { 5490 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5491 DebugLoc dl = MI->getDebugLoc(); 5492 bool isThumb2 = Subtarget->isThumb2(); 5493 switch (MI->getOpcode()) { 5494 default: { 5495 MI->dump(); 5496 llvm_unreachable("Unexpected instr type to insert"); 5497 } 5498 // The Thumb2 pre-indexed stores have the same MI operands, they just 5499 // define them differently in the .td files from the isel patterns, so 5500 // they need pseudos. 5501 case ARM::t2STR_preidx: 5502 MI->setDesc(TII->get(ARM::t2STR_PRE)); 5503 return BB; 5504 case ARM::t2STRB_preidx: 5505 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 5506 return BB; 5507 case ARM::t2STRH_preidx: 5508 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 5509 return BB; 5510 5511 case ARM::STRi_preidx: 5512 case ARM::STRBi_preidx: { 5513 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 5514 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 5515 // Decode the offset. 5516 unsigned Offset = MI->getOperand(4).getImm(); 5517 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 5518 Offset = ARM_AM::getAM2Offset(Offset); 5519 if (isSub) 5520 Offset = -Offset; 5521 5522 MachineMemOperand *MMO = *MI->memoperands_begin(); 5523 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 5524 .addOperand(MI->getOperand(0)) // Rn_wb 5525 .addOperand(MI->getOperand(1)) // Rt 5526 .addOperand(MI->getOperand(2)) // Rn 5527 .addImm(Offset) // offset (skip GPR==zero_reg) 5528 .addOperand(MI->getOperand(5)) // pred 5529 .addOperand(MI->getOperand(6)) 5530 .addMemOperand(MMO); 5531 MI->eraseFromParent(); 5532 return BB; 5533 } 5534 case ARM::STRr_preidx: 5535 case ARM::STRBr_preidx: 5536 case ARM::STRH_preidx: { 5537 unsigned NewOpc; 5538 switch (MI->getOpcode()) { 5539 default: llvm_unreachable("unexpected opcode!"); 5540 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 5541 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 5542 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 5543 } 5544 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 5545 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 5546 MIB.addOperand(MI->getOperand(i)); 5547 MI->eraseFromParent(); 5548 return BB; 5549 } 5550 case ARM::ATOMIC_LOAD_ADD_I8: 5551 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5552 case ARM::ATOMIC_LOAD_ADD_I16: 5553 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5554 case ARM::ATOMIC_LOAD_ADD_I32: 5555 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5556 5557 case ARM::ATOMIC_LOAD_AND_I8: 5558 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5559 case ARM::ATOMIC_LOAD_AND_I16: 5560 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5561 case ARM::ATOMIC_LOAD_AND_I32: 5562 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5563 5564 case ARM::ATOMIC_LOAD_OR_I8: 5565 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5566 case ARM::ATOMIC_LOAD_OR_I16: 5567 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5568 case ARM::ATOMIC_LOAD_OR_I32: 5569 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5570 5571 case ARM::ATOMIC_LOAD_XOR_I8: 5572 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5573 case ARM::ATOMIC_LOAD_XOR_I16: 5574 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5575 case ARM::ATOMIC_LOAD_XOR_I32: 5576 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5577 5578 case ARM::ATOMIC_LOAD_NAND_I8: 5579 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5580 case ARM::ATOMIC_LOAD_NAND_I16: 5581 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5582 case ARM::ATOMIC_LOAD_NAND_I32: 5583 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5584 5585 case ARM::ATOMIC_LOAD_SUB_I8: 5586 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5587 case ARM::ATOMIC_LOAD_SUB_I16: 5588 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5589 case ARM::ATOMIC_LOAD_SUB_I32: 5590 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5591 5592 case ARM::ATOMIC_LOAD_MIN_I8: 5593 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 5594 case ARM::ATOMIC_LOAD_MIN_I16: 5595 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 5596 case ARM::ATOMIC_LOAD_MIN_I32: 5597 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 5598 5599 case ARM::ATOMIC_LOAD_MAX_I8: 5600 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 5601 case ARM::ATOMIC_LOAD_MAX_I16: 5602 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 5603 case ARM::ATOMIC_LOAD_MAX_I32: 5604 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 5605 5606 case ARM::ATOMIC_LOAD_UMIN_I8: 5607 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 5608 case ARM::ATOMIC_LOAD_UMIN_I16: 5609 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 5610 case ARM::ATOMIC_LOAD_UMIN_I32: 5611 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 5612 5613 case ARM::ATOMIC_LOAD_UMAX_I8: 5614 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 5615 case ARM::ATOMIC_LOAD_UMAX_I16: 5616 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 5617 case ARM::ATOMIC_LOAD_UMAX_I32: 5618 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 5619 5620 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 5621 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 5622 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 5623 5624 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 5625 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 5626 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 5627 5628 5629 case ARM::ATOMADD6432: 5630 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 5631 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 5632 /*NeedsCarry*/ true); 5633 case ARM::ATOMSUB6432: 5634 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 5635 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 5636 /*NeedsCarry*/ true); 5637 case ARM::ATOMOR6432: 5638 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 5639 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5640 case ARM::ATOMXOR6432: 5641 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 5642 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5643 case ARM::ATOMAND6432: 5644 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 5645 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5646 case ARM::ATOMSWAP6432: 5647 return EmitAtomicBinary64(MI, BB, 0, 0, false); 5648 case ARM::ATOMCMPXCHG6432: 5649 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 5650 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 5651 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 5652 5653 case ARM::tMOVCCr_pseudo: { 5654 // To "insert" a SELECT_CC instruction, we actually have to insert the 5655 // diamond control-flow pattern. The incoming instruction knows the 5656 // destination vreg to set, the condition code register to branch on, the 5657 // true/false values to select between, and a branch opcode to use. 5658 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5659 MachineFunction::iterator It = BB; 5660 ++It; 5661 5662 // thisMBB: 5663 // ... 5664 // TrueVal = ... 5665 // cmpTY ccX, r1, r2 5666 // bCC copy1MBB 5667 // fallthrough --> copy0MBB 5668 MachineBasicBlock *thisMBB = BB; 5669 MachineFunction *F = BB->getParent(); 5670 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 5671 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 5672 F->insert(It, copy0MBB); 5673 F->insert(It, sinkMBB); 5674 5675 // Transfer the remainder of BB and its successor edges to sinkMBB. 5676 sinkMBB->splice(sinkMBB->begin(), BB, 5677 llvm::next(MachineBasicBlock::iterator(MI)), 5678 BB->end()); 5679 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 5680 5681 BB->addSuccessor(copy0MBB); 5682 BB->addSuccessor(sinkMBB); 5683 5684 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 5685 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 5686 5687 // copy0MBB: 5688 // %FalseValue = ... 5689 // # fallthrough to sinkMBB 5690 BB = copy0MBB; 5691 5692 // Update machine-CFG edges 5693 BB->addSuccessor(sinkMBB); 5694 5695 // sinkMBB: 5696 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5697 // ... 5698 BB = sinkMBB; 5699 BuildMI(*BB, BB->begin(), dl, 5700 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 5701 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5702 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5703 5704 MI->eraseFromParent(); // The pseudo instruction is gone now. 5705 return BB; 5706 } 5707 5708 case ARM::BCCi64: 5709 case ARM::BCCZi64: { 5710 // If there is an unconditional branch to the other successor, remove it. 5711 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 5712 5713 // Compare both parts that make up the double comparison separately for 5714 // equality. 5715 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 5716 5717 unsigned LHS1 = MI->getOperand(1).getReg(); 5718 unsigned LHS2 = MI->getOperand(2).getReg(); 5719 if (RHSisZero) { 5720 AddDefaultPred(BuildMI(BB, dl, 5721 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5722 .addReg(LHS1).addImm(0)); 5723 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5724 .addReg(LHS2).addImm(0) 5725 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5726 } else { 5727 unsigned RHS1 = MI->getOperand(3).getReg(); 5728 unsigned RHS2 = MI->getOperand(4).getReg(); 5729 AddDefaultPred(BuildMI(BB, dl, 5730 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5731 .addReg(LHS1).addReg(RHS1)); 5732 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5733 .addReg(LHS2).addReg(RHS2) 5734 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5735 } 5736 5737 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 5738 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 5739 if (MI->getOperand(0).getImm() == ARMCC::NE) 5740 std::swap(destMBB, exitMBB); 5741 5742 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5743 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 5744 if (isThumb2) 5745 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 5746 else 5747 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 5748 5749 MI->eraseFromParent(); // The pseudo instruction is gone now. 5750 return BB; 5751 } 5752 } 5753} 5754 5755/// Generally, ARM instructions may be optionally encoded with a 's' 5756/// bit. However, some opcodes have a compact encoding that forces an implicit 5757/// 's' bit. List these exceptions here. 5758static bool hasForcedCPSRDef(const MCInstrDesc &MCID) { 5759 switch (MCID.getOpcode()) { 5760 case ARM::t2ADDSri: 5761 case ARM::t2ADDSrr: 5762 case ARM::t2ADDSrs: 5763 case ARM::t2SUBSri: 5764 case ARM::t2SUBSrr: 5765 case ARM::t2SUBSrs: 5766 return true; 5767 } 5768 return false; 5769} 5770 5771void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 5772 SDNode *Node) const { 5773 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 5774 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 5775 // operand is still set to noreg. If needed, set the optional operand's 5776 // register to CPSR, and remove the redundant implicit def. 5777 5778 const MCInstrDesc &MCID = MI->getDesc(); 5779 unsigned ccOutIdx = MCID.getNumOperands() - 1; 5780 bool forcedCPSR = hasForcedCPSRDef(MCID); 5781 5782 // Any ARM instruction that sets the 's' bit should specify an optional 5783 // "cc_out" operand in the last operand position. 5784 if (!MCID.hasOptionalDef() || !MCID.OpInfo[ccOutIdx].isOptionalDef()) { 5785 assert(!forcedCPSR && "Optional cc_out operand required"); 5786 return; 5787 } 5788 // Look for an implicit def of CPSR added by MachineInstr ctor. 5789 bool definesCPSR = false; 5790 bool deadCPSR = false; 5791 for (unsigned i = MCID.getNumOperands(), e = MI->getNumOperands(); 5792 i != e; ++i) { 5793 const MachineOperand &MO = MI->getOperand(i); 5794 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 5795 definesCPSR = true; 5796 if (MO.isDead()) 5797 deadCPSR = true; 5798 MI->RemoveOperand(i); 5799 break; 5800 } 5801 } 5802 if (!definesCPSR) { 5803 assert(!forcedCPSR && "Optional cc_out operand required"); 5804 return; 5805 } 5806 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 5807 5808 // If possible, select the encoding that does not set the 's' bit. 5809 if (deadCPSR && !forcedCPSR) 5810 return; 5811 5812 MachineOperand &MO = MI->getOperand(ccOutIdx); 5813 MO.setReg(ARM::CPSR); 5814 MO.setIsDef(true); 5815 if (deadCPSR) 5816 MO.setIsDead(); 5817} 5818 5819//===----------------------------------------------------------------------===// 5820// ARM Optimization Hooks 5821//===----------------------------------------------------------------------===// 5822 5823static 5824SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 5825 TargetLowering::DAGCombinerInfo &DCI) { 5826 SelectionDAG &DAG = DCI.DAG; 5827 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5828 EVT VT = N->getValueType(0); 5829 unsigned Opc = N->getOpcode(); 5830 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 5831 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 5832 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 5833 ISD::CondCode CC = ISD::SETCC_INVALID; 5834 5835 if (isSlctCC) { 5836 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 5837 } else { 5838 SDValue CCOp = Slct.getOperand(0); 5839 if (CCOp.getOpcode() == ISD::SETCC) 5840 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 5841 } 5842 5843 bool DoXform = false; 5844 bool InvCC = false; 5845 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 5846 "Bad input!"); 5847 5848 if (LHS.getOpcode() == ISD::Constant && 5849 cast<ConstantSDNode>(LHS)->isNullValue()) { 5850 DoXform = true; 5851 } else if (CC != ISD::SETCC_INVALID && 5852 RHS.getOpcode() == ISD::Constant && 5853 cast<ConstantSDNode>(RHS)->isNullValue()) { 5854 std::swap(LHS, RHS); 5855 SDValue Op0 = Slct.getOperand(0); 5856 EVT OpVT = isSlctCC ? Op0.getValueType() : 5857 Op0.getOperand(0).getValueType(); 5858 bool isInt = OpVT.isInteger(); 5859 CC = ISD::getSetCCInverse(CC, isInt); 5860 5861 if (!TLI.isCondCodeLegal(CC, OpVT)) 5862 return SDValue(); // Inverse operator isn't legal. 5863 5864 DoXform = true; 5865 InvCC = true; 5866 } 5867 5868 if (DoXform) { 5869 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 5870 if (isSlctCC) 5871 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 5872 Slct.getOperand(0), Slct.getOperand(1), CC); 5873 SDValue CCOp = Slct.getOperand(0); 5874 if (InvCC) 5875 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 5876 CCOp.getOperand(0), CCOp.getOperand(1), CC); 5877 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 5878 CCOp, OtherOp, Result); 5879 } 5880 return SDValue(); 5881} 5882 5883// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 5884// (only after legalization). 5885static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 5886 TargetLowering::DAGCombinerInfo &DCI, 5887 const ARMSubtarget *Subtarget) { 5888 5889 // Only perform optimization if after legalize, and if NEON is available. We 5890 // also expected both operands to be BUILD_VECTORs. 5891 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 5892 || N0.getOpcode() != ISD::BUILD_VECTOR 5893 || N1.getOpcode() != ISD::BUILD_VECTOR) 5894 return SDValue(); 5895 5896 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 5897 EVT VT = N->getValueType(0); 5898 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 5899 return SDValue(); 5900 5901 // Check that the vector operands are of the right form. 5902 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 5903 // operands, where N is the size of the formed vector. 5904 // Each EXTRACT_VECTOR should have the same input vector and odd or even 5905 // index such that we have a pair wise add pattern. 5906 5907 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 5908 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5909 return SDValue(); 5910 SDValue Vec = N0->getOperand(0)->getOperand(0); 5911 SDNode *V = Vec.getNode(); 5912 unsigned nextIndex = 0; 5913 5914 // For each operands to the ADD which are BUILD_VECTORs, 5915 // check to see if each of their operands are an EXTRACT_VECTOR with 5916 // the same vector and appropriate index. 5917 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 5918 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 5919 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 5920 5921 SDValue ExtVec0 = N0->getOperand(i); 5922 SDValue ExtVec1 = N1->getOperand(i); 5923 5924 // First operand is the vector, verify its the same. 5925 if (V != ExtVec0->getOperand(0).getNode() || 5926 V != ExtVec1->getOperand(0).getNode()) 5927 return SDValue(); 5928 5929 // Second is the constant, verify its correct. 5930 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 5931 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 5932 5933 // For the constant, we want to see all the even or all the odd. 5934 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 5935 || C1->getZExtValue() != nextIndex+1) 5936 return SDValue(); 5937 5938 // Increment index. 5939 nextIndex+=2; 5940 } else 5941 return SDValue(); 5942 } 5943 5944 // Create VPADDL node. 5945 SelectionDAG &DAG = DCI.DAG; 5946 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5947 5948 // Build operand list. 5949 SmallVector<SDValue, 8> Ops; 5950 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 5951 TLI.getPointerTy())); 5952 5953 // Input is the vector. 5954 Ops.push_back(Vec); 5955 5956 // Get widened type and narrowed type. 5957 MVT widenType; 5958 unsigned numElem = VT.getVectorNumElements(); 5959 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 5960 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 5961 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 5962 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 5963 default: 5964 assert(0 && "Invalid vector element type for padd optimization."); 5965 } 5966 5967 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 5968 widenType, &Ops[0], Ops.size()); 5969 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 5970} 5971 5972/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 5973/// operands N0 and N1. This is a helper for PerformADDCombine that is 5974/// called with the default operands, and if that fails, with commuted 5975/// operands. 5976static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 5977 TargetLowering::DAGCombinerInfo &DCI, 5978 const ARMSubtarget *Subtarget){ 5979 5980 // Attempt to create vpaddl for this add. 5981 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 5982 if (Result.getNode()) 5983 return Result; 5984 5985 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 5986 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 5987 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 5988 if (Result.getNode()) return Result; 5989 } 5990 return SDValue(); 5991} 5992 5993/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 5994/// 5995static SDValue PerformADDCombine(SDNode *N, 5996 TargetLowering::DAGCombinerInfo &DCI, 5997 const ARMSubtarget *Subtarget) { 5998 SDValue N0 = N->getOperand(0); 5999 SDValue N1 = N->getOperand(1); 6000 6001 // First try with the default operand order. 6002 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 6003 if (Result.getNode()) 6004 return Result; 6005 6006 // If that didn't work, try again with the operands commuted. 6007 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 6008} 6009 6010/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 6011/// 6012static SDValue PerformSUBCombine(SDNode *N, 6013 TargetLowering::DAGCombinerInfo &DCI) { 6014 SDValue N0 = N->getOperand(0); 6015 SDValue N1 = N->getOperand(1); 6016 6017 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 6018 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 6019 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 6020 if (Result.getNode()) return Result; 6021 } 6022 6023 return SDValue(); 6024} 6025 6026/// PerformVMULCombine 6027/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 6028/// special multiplier accumulator forwarding. 6029/// vmul d3, d0, d2 6030/// vmla d3, d1, d2 6031/// is faster than 6032/// vadd d3, d0, d1 6033/// vmul d3, d3, d2 6034static SDValue PerformVMULCombine(SDNode *N, 6035 TargetLowering::DAGCombinerInfo &DCI, 6036 const ARMSubtarget *Subtarget) { 6037 if (!Subtarget->hasVMLxForwarding()) 6038 return SDValue(); 6039 6040 SelectionDAG &DAG = DCI.DAG; 6041 SDValue N0 = N->getOperand(0); 6042 SDValue N1 = N->getOperand(1); 6043 unsigned Opcode = N0.getOpcode(); 6044 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6045 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 6046 Opcode = N1.getOpcode(); 6047 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6048 Opcode != ISD::FADD && Opcode != ISD::FSUB) 6049 return SDValue(); 6050 std::swap(N0, N1); 6051 } 6052 6053 EVT VT = N->getValueType(0); 6054 DebugLoc DL = N->getDebugLoc(); 6055 SDValue N00 = N0->getOperand(0); 6056 SDValue N01 = N0->getOperand(1); 6057 return DAG.getNode(Opcode, DL, VT, 6058 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 6059 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 6060} 6061 6062static SDValue PerformMULCombine(SDNode *N, 6063 TargetLowering::DAGCombinerInfo &DCI, 6064 const ARMSubtarget *Subtarget) { 6065 SelectionDAG &DAG = DCI.DAG; 6066 6067 if (Subtarget->isThumb1Only()) 6068 return SDValue(); 6069 6070 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6071 return SDValue(); 6072 6073 EVT VT = N->getValueType(0); 6074 if (VT.is64BitVector() || VT.is128BitVector()) 6075 return PerformVMULCombine(N, DCI, Subtarget); 6076 if (VT != MVT::i32) 6077 return SDValue(); 6078 6079 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6080 if (!C) 6081 return SDValue(); 6082 6083 uint64_t MulAmt = C->getZExtValue(); 6084 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 6085 ShiftAmt = ShiftAmt & (32 - 1); 6086 SDValue V = N->getOperand(0); 6087 DebugLoc DL = N->getDebugLoc(); 6088 6089 SDValue Res; 6090 MulAmt >>= ShiftAmt; 6091 if (isPowerOf2_32(MulAmt - 1)) { 6092 // (mul x, 2^N + 1) => (add (shl x, N), x) 6093 Res = DAG.getNode(ISD::ADD, DL, VT, 6094 V, DAG.getNode(ISD::SHL, DL, VT, 6095 V, DAG.getConstant(Log2_32(MulAmt-1), 6096 MVT::i32))); 6097 } else if (isPowerOf2_32(MulAmt + 1)) { 6098 // (mul x, 2^N - 1) => (sub (shl x, N), x) 6099 Res = DAG.getNode(ISD::SUB, DL, VT, 6100 DAG.getNode(ISD::SHL, DL, VT, 6101 V, DAG.getConstant(Log2_32(MulAmt+1), 6102 MVT::i32)), 6103 V); 6104 } else 6105 return SDValue(); 6106 6107 if (ShiftAmt != 0) 6108 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 6109 DAG.getConstant(ShiftAmt, MVT::i32)); 6110 6111 // Do not add new nodes to DAG combiner worklist. 6112 DCI.CombineTo(N, Res, false); 6113 return SDValue(); 6114} 6115 6116static SDValue PerformANDCombine(SDNode *N, 6117 TargetLowering::DAGCombinerInfo &DCI) { 6118 6119 // Attempt to use immediate-form VBIC 6120 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6121 DebugLoc dl = N->getDebugLoc(); 6122 EVT VT = N->getValueType(0); 6123 SelectionDAG &DAG = DCI.DAG; 6124 6125 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6126 return SDValue(); 6127 6128 APInt SplatBits, SplatUndef; 6129 unsigned SplatBitSize; 6130 bool HasAnyUndefs; 6131 if (BVN && 6132 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6133 if (SplatBitSize <= 64) { 6134 EVT VbicVT; 6135 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 6136 SplatUndef.getZExtValue(), SplatBitSize, 6137 DAG, VbicVT, VT.is128BitVector(), 6138 OtherModImm); 6139 if (Val.getNode()) { 6140 SDValue Input = 6141 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 6142 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 6143 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 6144 } 6145 } 6146 } 6147 6148 return SDValue(); 6149} 6150 6151/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 6152static SDValue PerformORCombine(SDNode *N, 6153 TargetLowering::DAGCombinerInfo &DCI, 6154 const ARMSubtarget *Subtarget) { 6155 // Attempt to use immediate-form VORR 6156 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6157 DebugLoc dl = N->getDebugLoc(); 6158 EVT VT = N->getValueType(0); 6159 SelectionDAG &DAG = DCI.DAG; 6160 6161 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6162 return SDValue(); 6163 6164 APInt SplatBits, SplatUndef; 6165 unsigned SplatBitSize; 6166 bool HasAnyUndefs; 6167 if (BVN && Subtarget->hasNEON() && 6168 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6169 if (SplatBitSize <= 64) { 6170 EVT VorrVT; 6171 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 6172 SplatUndef.getZExtValue(), SplatBitSize, 6173 DAG, VorrVT, VT.is128BitVector(), 6174 OtherModImm); 6175 if (Val.getNode()) { 6176 SDValue Input = 6177 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 6178 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 6179 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 6180 } 6181 } 6182 } 6183 6184 SDValue N0 = N->getOperand(0); 6185 if (N0.getOpcode() != ISD::AND) 6186 return SDValue(); 6187 SDValue N1 = N->getOperand(1); 6188 6189 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 6190 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 6191 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 6192 APInt SplatUndef; 6193 unsigned SplatBitSize; 6194 bool HasAnyUndefs; 6195 6196 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 6197 APInt SplatBits0; 6198 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 6199 HasAnyUndefs) && !HasAnyUndefs) { 6200 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 6201 APInt SplatBits1; 6202 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 6203 HasAnyUndefs) && !HasAnyUndefs && 6204 SplatBits0 == ~SplatBits1) { 6205 // Canonicalize the vector type to make instruction selection simpler. 6206 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 6207 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 6208 N0->getOperand(1), N0->getOperand(0), 6209 N1->getOperand(0)); 6210 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 6211 } 6212 } 6213 } 6214 6215 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 6216 // reasonable. 6217 6218 // BFI is only available on V6T2+ 6219 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 6220 return SDValue(); 6221 6222 DebugLoc DL = N->getDebugLoc(); 6223 // 1) or (and A, mask), val => ARMbfi A, val, mask 6224 // iff (val & mask) == val 6225 // 6226 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6227 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 6228 // && mask == ~mask2 6229 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 6230 // && ~mask == mask2 6231 // (i.e., copy a bitfield value into another bitfield of the same width) 6232 6233 if (VT != MVT::i32) 6234 return SDValue(); 6235 6236 SDValue N00 = N0.getOperand(0); 6237 6238 // The value and the mask need to be constants so we can verify this is 6239 // actually a bitfield set. If the mask is 0xffff, we can do better 6240 // via a movt instruction, so don't use BFI in that case. 6241 SDValue MaskOp = N0.getOperand(1); 6242 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 6243 if (!MaskC) 6244 return SDValue(); 6245 unsigned Mask = MaskC->getZExtValue(); 6246 if (Mask == 0xffff) 6247 return SDValue(); 6248 SDValue Res; 6249 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 6250 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 6251 if (N1C) { 6252 unsigned Val = N1C->getZExtValue(); 6253 if ((Val & ~Mask) != Val) 6254 return SDValue(); 6255 6256 if (ARM::isBitFieldInvertedMask(Mask)) { 6257 Val >>= CountTrailingZeros_32(~Mask); 6258 6259 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 6260 DAG.getConstant(Val, MVT::i32), 6261 DAG.getConstant(Mask, MVT::i32)); 6262 6263 // Do not add new nodes to DAG combiner worklist. 6264 DCI.CombineTo(N, Res, false); 6265 return SDValue(); 6266 } 6267 } else if (N1.getOpcode() == ISD::AND) { 6268 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6269 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 6270 if (!N11C) 6271 return SDValue(); 6272 unsigned Mask2 = N11C->getZExtValue(); 6273 6274 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 6275 // as is to match. 6276 if (ARM::isBitFieldInvertedMask(Mask) && 6277 (Mask == ~Mask2)) { 6278 // The pack halfword instruction works better for masks that fit it, 6279 // so use that when it's available. 6280 if (Subtarget->hasT2ExtractPack() && 6281 (Mask == 0xffff || Mask == 0xffff0000)) 6282 return SDValue(); 6283 // 2a 6284 unsigned amt = CountTrailingZeros_32(Mask2); 6285 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 6286 DAG.getConstant(amt, MVT::i32)); 6287 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 6288 DAG.getConstant(Mask, MVT::i32)); 6289 // Do not add new nodes to DAG combiner worklist. 6290 DCI.CombineTo(N, Res, false); 6291 return SDValue(); 6292 } else if (ARM::isBitFieldInvertedMask(~Mask) && 6293 (~Mask == Mask2)) { 6294 // The pack halfword instruction works better for masks that fit it, 6295 // so use that when it's available. 6296 if (Subtarget->hasT2ExtractPack() && 6297 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 6298 return SDValue(); 6299 // 2b 6300 unsigned lsb = CountTrailingZeros_32(Mask); 6301 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 6302 DAG.getConstant(lsb, MVT::i32)); 6303 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 6304 DAG.getConstant(Mask2, MVT::i32)); 6305 // Do not add new nodes to DAG combiner worklist. 6306 DCI.CombineTo(N, Res, false); 6307 return SDValue(); 6308 } 6309 } 6310 6311 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 6312 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 6313 ARM::isBitFieldInvertedMask(~Mask)) { 6314 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 6315 // where lsb(mask) == #shamt and masked bits of B are known zero. 6316 SDValue ShAmt = N00.getOperand(1); 6317 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 6318 unsigned LSB = CountTrailingZeros_32(Mask); 6319 if (ShAmtC != LSB) 6320 return SDValue(); 6321 6322 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 6323 DAG.getConstant(~Mask, MVT::i32)); 6324 6325 // Do not add new nodes to DAG combiner worklist. 6326 DCI.CombineTo(N, Res, false); 6327 } 6328 6329 return SDValue(); 6330} 6331 6332/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 6333/// the bits being cleared by the AND are not demanded by the BFI. 6334static SDValue PerformBFICombine(SDNode *N, 6335 TargetLowering::DAGCombinerInfo &DCI) { 6336 SDValue N1 = N->getOperand(1); 6337 if (N1.getOpcode() == ISD::AND) { 6338 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 6339 if (!N11C) 6340 return SDValue(); 6341 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 6342 unsigned LSB = CountTrailingZeros_32(~InvMask); 6343 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 6344 unsigned Mask = (1 << Width)-1; 6345 unsigned Mask2 = N11C->getZExtValue(); 6346 if ((Mask & (~Mask2)) == 0) 6347 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 6348 N->getOperand(0), N1.getOperand(0), 6349 N->getOperand(2)); 6350 } 6351 return SDValue(); 6352} 6353 6354/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 6355/// ARMISD::VMOVRRD. 6356static SDValue PerformVMOVRRDCombine(SDNode *N, 6357 TargetLowering::DAGCombinerInfo &DCI) { 6358 // vmovrrd(vmovdrr x, y) -> x,y 6359 SDValue InDouble = N->getOperand(0); 6360 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 6361 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 6362 6363 // vmovrrd(load f64) -> (load i32), (load i32) 6364 SDNode *InNode = InDouble.getNode(); 6365 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 6366 InNode->getValueType(0) == MVT::f64 && 6367 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 6368 !cast<LoadSDNode>(InNode)->isVolatile()) { 6369 // TODO: Should this be done for non-FrameIndex operands? 6370 LoadSDNode *LD = cast<LoadSDNode>(InNode); 6371 6372 SelectionDAG &DAG = DCI.DAG; 6373 DebugLoc DL = LD->getDebugLoc(); 6374 SDValue BasePtr = LD->getBasePtr(); 6375 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 6376 LD->getPointerInfo(), LD->isVolatile(), 6377 LD->isNonTemporal(), LD->getAlignment()); 6378 6379 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 6380 DAG.getConstant(4, MVT::i32)); 6381 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 6382 LD->getPointerInfo(), LD->isVolatile(), 6383 LD->isNonTemporal(), 6384 std::min(4U, LD->getAlignment() / 2)); 6385 6386 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 6387 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 6388 DCI.RemoveFromWorklist(LD); 6389 DAG.DeleteNode(LD); 6390 return Result; 6391 } 6392 6393 return SDValue(); 6394} 6395 6396/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 6397/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 6398static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 6399 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 6400 SDValue Op0 = N->getOperand(0); 6401 SDValue Op1 = N->getOperand(1); 6402 if (Op0.getOpcode() == ISD::BITCAST) 6403 Op0 = Op0.getOperand(0); 6404 if (Op1.getOpcode() == ISD::BITCAST) 6405 Op1 = Op1.getOperand(0); 6406 if (Op0.getOpcode() == ARMISD::VMOVRRD && 6407 Op0.getNode() == Op1.getNode() && 6408 Op0.getResNo() == 0 && Op1.getResNo() == 1) 6409 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 6410 N->getValueType(0), Op0.getOperand(0)); 6411 return SDValue(); 6412} 6413 6414/// PerformSTORECombine - Target-specific dag combine xforms for 6415/// ISD::STORE. 6416static SDValue PerformSTORECombine(SDNode *N, 6417 TargetLowering::DAGCombinerInfo &DCI) { 6418 // Bitcast an i64 store extracted from a vector to f64. 6419 // Otherwise, the i64 value will be legalized to a pair of i32 values. 6420 StoreSDNode *St = cast<StoreSDNode>(N); 6421 SDValue StVal = St->getValue(); 6422 if (!ISD::isNormalStore(St) || St->isVolatile()) 6423 return SDValue(); 6424 6425 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 6426 StVal.getNode()->hasOneUse() && !St->isVolatile()) { 6427 SelectionDAG &DAG = DCI.DAG; 6428 DebugLoc DL = St->getDebugLoc(); 6429 SDValue BasePtr = St->getBasePtr(); 6430 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 6431 StVal.getNode()->getOperand(0), BasePtr, 6432 St->getPointerInfo(), St->isVolatile(), 6433 St->isNonTemporal(), St->getAlignment()); 6434 6435 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 6436 DAG.getConstant(4, MVT::i32)); 6437 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 6438 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 6439 St->isNonTemporal(), 6440 std::min(4U, St->getAlignment() / 2)); 6441 } 6442 6443 if (StVal.getValueType() != MVT::i64 || 6444 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6445 return SDValue(); 6446 6447 SelectionDAG &DAG = DCI.DAG; 6448 DebugLoc dl = StVal.getDebugLoc(); 6449 SDValue IntVec = StVal.getOperand(0); 6450 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 6451 IntVec.getValueType().getVectorNumElements()); 6452 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 6453 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6454 Vec, StVal.getOperand(1)); 6455 dl = N->getDebugLoc(); 6456 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 6457 // Make the DAGCombiner fold the bitcasts. 6458 DCI.AddToWorklist(Vec.getNode()); 6459 DCI.AddToWorklist(ExtElt.getNode()); 6460 DCI.AddToWorklist(V.getNode()); 6461 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 6462 St->getPointerInfo(), St->isVolatile(), 6463 St->isNonTemporal(), St->getAlignment(), 6464 St->getTBAAInfo()); 6465} 6466 6467/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 6468/// are normal, non-volatile loads. If so, it is profitable to bitcast an 6469/// i64 vector to have f64 elements, since the value can then be loaded 6470/// directly into a VFP register. 6471static bool hasNormalLoadOperand(SDNode *N) { 6472 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 6473 for (unsigned i = 0; i < NumElts; ++i) { 6474 SDNode *Elt = N->getOperand(i).getNode(); 6475 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 6476 return true; 6477 } 6478 return false; 6479} 6480 6481/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 6482/// ISD::BUILD_VECTOR. 6483static SDValue PerformBUILD_VECTORCombine(SDNode *N, 6484 TargetLowering::DAGCombinerInfo &DCI){ 6485 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 6486 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 6487 // into a pair of GPRs, which is fine when the value is used as a scalar, 6488 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 6489 SelectionDAG &DAG = DCI.DAG; 6490 if (N->getNumOperands() == 2) { 6491 SDValue RV = PerformVMOVDRRCombine(N, DAG); 6492 if (RV.getNode()) 6493 return RV; 6494 } 6495 6496 // Load i64 elements as f64 values so that type legalization does not split 6497 // them up into i32 values. 6498 EVT VT = N->getValueType(0); 6499 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 6500 return SDValue(); 6501 DebugLoc dl = N->getDebugLoc(); 6502 SmallVector<SDValue, 8> Ops; 6503 unsigned NumElts = VT.getVectorNumElements(); 6504 for (unsigned i = 0; i < NumElts; ++i) { 6505 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 6506 Ops.push_back(V); 6507 // Make the DAGCombiner fold the bitcast. 6508 DCI.AddToWorklist(V.getNode()); 6509 } 6510 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 6511 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 6512 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 6513} 6514 6515/// PerformInsertEltCombine - Target-specific dag combine xforms for 6516/// ISD::INSERT_VECTOR_ELT. 6517static SDValue PerformInsertEltCombine(SDNode *N, 6518 TargetLowering::DAGCombinerInfo &DCI) { 6519 // Bitcast an i64 load inserted into a vector to f64. 6520 // Otherwise, the i64 value will be legalized to a pair of i32 values. 6521 EVT VT = N->getValueType(0); 6522 SDNode *Elt = N->getOperand(1).getNode(); 6523 if (VT.getVectorElementType() != MVT::i64 || 6524 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 6525 return SDValue(); 6526 6527 SelectionDAG &DAG = DCI.DAG; 6528 DebugLoc dl = N->getDebugLoc(); 6529 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 6530 VT.getVectorNumElements()); 6531 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 6532 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 6533 // Make the DAGCombiner fold the bitcasts. 6534 DCI.AddToWorklist(Vec.getNode()); 6535 DCI.AddToWorklist(V.getNode()); 6536 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 6537 Vec, V, N->getOperand(2)); 6538 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 6539} 6540 6541/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 6542/// ISD::VECTOR_SHUFFLE. 6543static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 6544 // The LLVM shufflevector instruction does not require the shuffle mask 6545 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 6546 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 6547 // operands do not match the mask length, they are extended by concatenating 6548 // them with undef vectors. That is probably the right thing for other 6549 // targets, but for NEON it is better to concatenate two double-register 6550 // size vector operands into a single quad-register size vector. Do that 6551 // transformation here: 6552 // shuffle(concat(v1, undef), concat(v2, undef)) -> 6553 // shuffle(concat(v1, v2), undef) 6554 SDValue Op0 = N->getOperand(0); 6555 SDValue Op1 = N->getOperand(1); 6556 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 6557 Op1.getOpcode() != ISD::CONCAT_VECTORS || 6558 Op0.getNumOperands() != 2 || 6559 Op1.getNumOperands() != 2) 6560 return SDValue(); 6561 SDValue Concat0Op1 = Op0.getOperand(1); 6562 SDValue Concat1Op1 = Op1.getOperand(1); 6563 if (Concat0Op1.getOpcode() != ISD::UNDEF || 6564 Concat1Op1.getOpcode() != ISD::UNDEF) 6565 return SDValue(); 6566 // Skip the transformation if any of the types are illegal. 6567 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6568 EVT VT = N->getValueType(0); 6569 if (!TLI.isTypeLegal(VT) || 6570 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 6571 !TLI.isTypeLegal(Concat1Op1.getValueType())) 6572 return SDValue(); 6573 6574 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 6575 Op0.getOperand(0), Op1.getOperand(0)); 6576 // Translate the shuffle mask. 6577 SmallVector<int, 16> NewMask; 6578 unsigned NumElts = VT.getVectorNumElements(); 6579 unsigned HalfElts = NumElts/2; 6580 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 6581 for (unsigned n = 0; n < NumElts; ++n) { 6582 int MaskElt = SVN->getMaskElt(n); 6583 int NewElt = -1; 6584 if (MaskElt < (int)HalfElts) 6585 NewElt = MaskElt; 6586 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 6587 NewElt = HalfElts + MaskElt - NumElts; 6588 NewMask.push_back(NewElt); 6589 } 6590 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 6591 DAG.getUNDEF(VT), NewMask.data()); 6592} 6593 6594/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 6595/// NEON load/store intrinsics to merge base address updates. 6596static SDValue CombineBaseUpdate(SDNode *N, 6597 TargetLowering::DAGCombinerInfo &DCI) { 6598 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6599 return SDValue(); 6600 6601 SelectionDAG &DAG = DCI.DAG; 6602 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 6603 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 6604 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 6605 SDValue Addr = N->getOperand(AddrOpIdx); 6606 6607 // Search for a use of the address operand that is an increment. 6608 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 6609 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 6610 SDNode *User = *UI; 6611 if (User->getOpcode() != ISD::ADD || 6612 UI.getUse().getResNo() != Addr.getResNo()) 6613 continue; 6614 6615 // Check that the add is independent of the load/store. Otherwise, folding 6616 // it would create a cycle. 6617 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 6618 continue; 6619 6620 // Find the new opcode for the updating load/store. 6621 bool isLoad = true; 6622 bool isLaneOp = false; 6623 unsigned NewOpc = 0; 6624 unsigned NumVecs = 0; 6625 if (isIntrinsic) { 6626 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 6627 switch (IntNo) { 6628 default: assert(0 && "unexpected intrinsic for Neon base update"); 6629 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 6630 NumVecs = 1; break; 6631 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 6632 NumVecs = 2; break; 6633 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 6634 NumVecs = 3; break; 6635 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 6636 NumVecs = 4; break; 6637 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 6638 NumVecs = 2; isLaneOp = true; break; 6639 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 6640 NumVecs = 3; isLaneOp = true; break; 6641 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 6642 NumVecs = 4; isLaneOp = true; break; 6643 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 6644 NumVecs = 1; isLoad = false; break; 6645 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 6646 NumVecs = 2; isLoad = false; break; 6647 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 6648 NumVecs = 3; isLoad = false; break; 6649 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 6650 NumVecs = 4; isLoad = false; break; 6651 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 6652 NumVecs = 2; isLoad = false; isLaneOp = true; break; 6653 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 6654 NumVecs = 3; isLoad = false; isLaneOp = true; break; 6655 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 6656 NumVecs = 4; isLoad = false; isLaneOp = true; break; 6657 } 6658 } else { 6659 isLaneOp = true; 6660 switch (N->getOpcode()) { 6661 default: assert(0 && "unexpected opcode for Neon base update"); 6662 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 6663 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 6664 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 6665 } 6666 } 6667 6668 // Find the size of memory referenced by the load/store. 6669 EVT VecTy; 6670 if (isLoad) 6671 VecTy = N->getValueType(0); 6672 else 6673 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 6674 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 6675 if (isLaneOp) 6676 NumBytes /= VecTy.getVectorNumElements(); 6677 6678 // If the increment is a constant, it must match the memory ref size. 6679 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 6680 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 6681 uint64_t IncVal = CInc->getZExtValue(); 6682 if (IncVal != NumBytes) 6683 continue; 6684 } else if (NumBytes >= 3 * 16) { 6685 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 6686 // separate instructions that make it harder to use a non-constant update. 6687 continue; 6688 } 6689 6690 // Create the new updating load/store node. 6691 EVT Tys[6]; 6692 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 6693 unsigned n; 6694 for (n = 0; n < NumResultVecs; ++n) 6695 Tys[n] = VecTy; 6696 Tys[n++] = MVT::i32; 6697 Tys[n] = MVT::Other; 6698 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 6699 SmallVector<SDValue, 8> Ops; 6700 Ops.push_back(N->getOperand(0)); // incoming chain 6701 Ops.push_back(N->getOperand(AddrOpIdx)); 6702 Ops.push_back(Inc); 6703 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 6704 Ops.push_back(N->getOperand(i)); 6705 } 6706 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 6707 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 6708 Ops.data(), Ops.size(), 6709 MemInt->getMemoryVT(), 6710 MemInt->getMemOperand()); 6711 6712 // Update the uses. 6713 std::vector<SDValue> NewResults; 6714 for (unsigned i = 0; i < NumResultVecs; ++i) { 6715 NewResults.push_back(SDValue(UpdN.getNode(), i)); 6716 } 6717 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 6718 DCI.CombineTo(N, NewResults); 6719 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 6720 6721 break; 6722 } 6723 return SDValue(); 6724} 6725 6726/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 6727/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 6728/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 6729/// return true. 6730static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 6731 SelectionDAG &DAG = DCI.DAG; 6732 EVT VT = N->getValueType(0); 6733 // vldN-dup instructions only support 64-bit vectors for N > 1. 6734 if (!VT.is64BitVector()) 6735 return false; 6736 6737 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 6738 SDNode *VLD = N->getOperand(0).getNode(); 6739 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 6740 return false; 6741 unsigned NumVecs = 0; 6742 unsigned NewOpc = 0; 6743 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 6744 if (IntNo == Intrinsic::arm_neon_vld2lane) { 6745 NumVecs = 2; 6746 NewOpc = ARMISD::VLD2DUP; 6747 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 6748 NumVecs = 3; 6749 NewOpc = ARMISD::VLD3DUP; 6750 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 6751 NumVecs = 4; 6752 NewOpc = ARMISD::VLD4DUP; 6753 } else { 6754 return false; 6755 } 6756 6757 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 6758 // numbers match the load. 6759 unsigned VLDLaneNo = 6760 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 6761 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 6762 UI != UE; ++UI) { 6763 // Ignore uses of the chain result. 6764 if (UI.getUse().getResNo() == NumVecs) 6765 continue; 6766 SDNode *User = *UI; 6767 if (User->getOpcode() != ARMISD::VDUPLANE || 6768 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 6769 return false; 6770 } 6771 6772 // Create the vldN-dup node. 6773 EVT Tys[5]; 6774 unsigned n; 6775 for (n = 0; n < NumVecs; ++n) 6776 Tys[n] = VT; 6777 Tys[n] = MVT::Other; 6778 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 6779 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 6780 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 6781 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 6782 Ops, 2, VLDMemInt->getMemoryVT(), 6783 VLDMemInt->getMemOperand()); 6784 6785 // Update the uses. 6786 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 6787 UI != UE; ++UI) { 6788 unsigned ResNo = UI.getUse().getResNo(); 6789 // Ignore uses of the chain result. 6790 if (ResNo == NumVecs) 6791 continue; 6792 SDNode *User = *UI; 6793 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 6794 } 6795 6796 // Now the vldN-lane intrinsic is dead except for its chain result. 6797 // Update uses of the chain. 6798 std::vector<SDValue> VLDDupResults; 6799 for (unsigned n = 0; n < NumVecs; ++n) 6800 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 6801 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 6802 DCI.CombineTo(VLD, VLDDupResults); 6803 6804 return true; 6805} 6806 6807/// PerformVDUPLANECombine - Target-specific dag combine xforms for 6808/// ARMISD::VDUPLANE. 6809static SDValue PerformVDUPLANECombine(SDNode *N, 6810 TargetLowering::DAGCombinerInfo &DCI) { 6811 SDValue Op = N->getOperand(0); 6812 6813 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 6814 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 6815 if (CombineVLDDUP(N, DCI)) 6816 return SDValue(N, 0); 6817 6818 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 6819 // redundant. Ignore bit_converts for now; element sizes are checked below. 6820 while (Op.getOpcode() == ISD::BITCAST) 6821 Op = Op.getOperand(0); 6822 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 6823 return SDValue(); 6824 6825 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 6826 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 6827 // The canonical VMOV for a zero vector uses a 32-bit element size. 6828 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6829 unsigned EltBits; 6830 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 6831 EltSize = 8; 6832 EVT VT = N->getValueType(0); 6833 if (EltSize > VT.getVectorElementType().getSizeInBits()) 6834 return SDValue(); 6835 6836 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 6837} 6838 6839// isConstVecPow2 - Return true if each vector element is a power of 2, all 6840// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 6841static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 6842{ 6843 integerPart cN; 6844 integerPart c0 = 0; 6845 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 6846 I != E; I++) { 6847 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 6848 if (!C) 6849 return false; 6850 6851 bool isExact; 6852 APFloat APF = C->getValueAPF(); 6853 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 6854 != APFloat::opOK || !isExact) 6855 return false; 6856 6857 c0 = (I == 0) ? cN : c0; 6858 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 6859 return false; 6860 } 6861 C = c0; 6862 return true; 6863} 6864 6865/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 6866/// can replace combinations of VMUL and VCVT (floating-point to integer) 6867/// when the VMUL has a constant operand that is a power of 2. 6868/// 6869/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 6870/// vmul.f32 d16, d17, d16 6871/// vcvt.s32.f32 d16, d16 6872/// becomes: 6873/// vcvt.s32.f32 d16, d16, #3 6874static SDValue PerformVCVTCombine(SDNode *N, 6875 TargetLowering::DAGCombinerInfo &DCI, 6876 const ARMSubtarget *Subtarget) { 6877 SelectionDAG &DAG = DCI.DAG; 6878 SDValue Op = N->getOperand(0); 6879 6880 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 6881 Op.getOpcode() != ISD::FMUL) 6882 return SDValue(); 6883 6884 uint64_t C; 6885 SDValue N0 = Op->getOperand(0); 6886 SDValue ConstVec = Op->getOperand(1); 6887 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 6888 6889 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 6890 !isConstVecPow2(ConstVec, isSigned, C)) 6891 return SDValue(); 6892 6893 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 6894 Intrinsic::arm_neon_vcvtfp2fxu; 6895 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 6896 N->getValueType(0), 6897 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 6898 DAG.getConstant(Log2_64(C), MVT::i32)); 6899} 6900 6901/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 6902/// can replace combinations of VCVT (integer to floating-point) and VDIV 6903/// when the VDIV has a constant operand that is a power of 2. 6904/// 6905/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 6906/// vcvt.f32.s32 d16, d16 6907/// vdiv.f32 d16, d17, d16 6908/// becomes: 6909/// vcvt.f32.s32 d16, d16, #3 6910static SDValue PerformVDIVCombine(SDNode *N, 6911 TargetLowering::DAGCombinerInfo &DCI, 6912 const ARMSubtarget *Subtarget) { 6913 SelectionDAG &DAG = DCI.DAG; 6914 SDValue Op = N->getOperand(0); 6915 unsigned OpOpcode = Op.getNode()->getOpcode(); 6916 6917 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 6918 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 6919 return SDValue(); 6920 6921 uint64_t C; 6922 SDValue ConstVec = N->getOperand(1); 6923 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 6924 6925 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 6926 !isConstVecPow2(ConstVec, isSigned, C)) 6927 return SDValue(); 6928 6929 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 6930 Intrinsic::arm_neon_vcvtfxu2fp; 6931 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 6932 Op.getValueType(), 6933 DAG.getConstant(IntrinsicOpcode, MVT::i32), 6934 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 6935} 6936 6937/// Getvshiftimm - Check if this is a valid build_vector for the immediate 6938/// operand of a vector shift operation, where all the elements of the 6939/// build_vector must have the same constant integer value. 6940static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 6941 // Ignore bit_converts. 6942 while (Op.getOpcode() == ISD::BITCAST) 6943 Op = Op.getOperand(0); 6944 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6945 APInt SplatBits, SplatUndef; 6946 unsigned SplatBitSize; 6947 bool HasAnyUndefs; 6948 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 6949 HasAnyUndefs, ElementBits) || 6950 SplatBitSize > ElementBits) 6951 return false; 6952 Cnt = SplatBits.getSExtValue(); 6953 return true; 6954} 6955 6956/// isVShiftLImm - Check if this is a valid build_vector for the immediate 6957/// operand of a vector shift left operation. That value must be in the range: 6958/// 0 <= Value < ElementBits for a left shift; or 6959/// 0 <= Value <= ElementBits for a long left shift. 6960static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 6961 assert(VT.isVector() && "vector shift count is not a vector type"); 6962 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6963 if (! getVShiftImm(Op, ElementBits, Cnt)) 6964 return false; 6965 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 6966} 6967 6968/// isVShiftRImm - Check if this is a valid build_vector for the immediate 6969/// operand of a vector shift right operation. For a shift opcode, the value 6970/// is positive, but for an intrinsic the value count must be negative. The 6971/// absolute value must be in the range: 6972/// 1 <= |Value| <= ElementBits for a right shift; or 6973/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 6974static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 6975 int64_t &Cnt) { 6976 assert(VT.isVector() && "vector shift count is not a vector type"); 6977 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6978 if (! getVShiftImm(Op, ElementBits, Cnt)) 6979 return false; 6980 if (isIntrinsic) 6981 Cnt = -Cnt; 6982 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 6983} 6984 6985/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 6986static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 6987 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 6988 switch (IntNo) { 6989 default: 6990 // Don't do anything for most intrinsics. 6991 break; 6992 6993 // Vector shifts: check for immediate versions and lower them. 6994 // Note: This is done during DAG combining instead of DAG legalizing because 6995 // the build_vectors for 64-bit vector element shift counts are generally 6996 // not legal, and it is hard to see their values after they get legalized to 6997 // loads from a constant pool. 6998 case Intrinsic::arm_neon_vshifts: 6999 case Intrinsic::arm_neon_vshiftu: 7000 case Intrinsic::arm_neon_vshiftls: 7001 case Intrinsic::arm_neon_vshiftlu: 7002 case Intrinsic::arm_neon_vshiftn: 7003 case Intrinsic::arm_neon_vrshifts: 7004 case Intrinsic::arm_neon_vrshiftu: 7005 case Intrinsic::arm_neon_vrshiftn: 7006 case Intrinsic::arm_neon_vqshifts: 7007 case Intrinsic::arm_neon_vqshiftu: 7008 case Intrinsic::arm_neon_vqshiftsu: 7009 case Intrinsic::arm_neon_vqshiftns: 7010 case Intrinsic::arm_neon_vqshiftnu: 7011 case Intrinsic::arm_neon_vqshiftnsu: 7012 case Intrinsic::arm_neon_vqrshiftns: 7013 case Intrinsic::arm_neon_vqrshiftnu: 7014 case Intrinsic::arm_neon_vqrshiftnsu: { 7015 EVT VT = N->getOperand(1).getValueType(); 7016 int64_t Cnt; 7017 unsigned VShiftOpc = 0; 7018 7019 switch (IntNo) { 7020 case Intrinsic::arm_neon_vshifts: 7021 case Intrinsic::arm_neon_vshiftu: 7022 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 7023 VShiftOpc = ARMISD::VSHL; 7024 break; 7025 } 7026 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 7027 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 7028 ARMISD::VSHRs : ARMISD::VSHRu); 7029 break; 7030 } 7031 return SDValue(); 7032 7033 case Intrinsic::arm_neon_vshiftls: 7034 case Intrinsic::arm_neon_vshiftlu: 7035 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 7036 break; 7037 llvm_unreachable("invalid shift count for vshll intrinsic"); 7038 7039 case Intrinsic::arm_neon_vrshifts: 7040 case Intrinsic::arm_neon_vrshiftu: 7041 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 7042 break; 7043 return SDValue(); 7044 7045 case Intrinsic::arm_neon_vqshifts: 7046 case Intrinsic::arm_neon_vqshiftu: 7047 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7048 break; 7049 return SDValue(); 7050 7051 case Intrinsic::arm_neon_vqshiftsu: 7052 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7053 break; 7054 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 7055 7056 case Intrinsic::arm_neon_vshiftn: 7057 case Intrinsic::arm_neon_vrshiftn: 7058 case Intrinsic::arm_neon_vqshiftns: 7059 case Intrinsic::arm_neon_vqshiftnu: 7060 case Intrinsic::arm_neon_vqshiftnsu: 7061 case Intrinsic::arm_neon_vqrshiftns: 7062 case Intrinsic::arm_neon_vqrshiftnu: 7063 case Intrinsic::arm_neon_vqrshiftnsu: 7064 // Narrowing shifts require an immediate right shift. 7065 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 7066 break; 7067 llvm_unreachable("invalid shift count for narrowing vector shift " 7068 "intrinsic"); 7069 7070 default: 7071 llvm_unreachable("unhandled vector shift"); 7072 } 7073 7074 switch (IntNo) { 7075 case Intrinsic::arm_neon_vshifts: 7076 case Intrinsic::arm_neon_vshiftu: 7077 // Opcode already set above. 7078 break; 7079 case Intrinsic::arm_neon_vshiftls: 7080 case Intrinsic::arm_neon_vshiftlu: 7081 if (Cnt == VT.getVectorElementType().getSizeInBits()) 7082 VShiftOpc = ARMISD::VSHLLi; 7083 else 7084 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 7085 ARMISD::VSHLLs : ARMISD::VSHLLu); 7086 break; 7087 case Intrinsic::arm_neon_vshiftn: 7088 VShiftOpc = ARMISD::VSHRN; break; 7089 case Intrinsic::arm_neon_vrshifts: 7090 VShiftOpc = ARMISD::VRSHRs; break; 7091 case Intrinsic::arm_neon_vrshiftu: 7092 VShiftOpc = ARMISD::VRSHRu; break; 7093 case Intrinsic::arm_neon_vrshiftn: 7094 VShiftOpc = ARMISD::VRSHRN; break; 7095 case Intrinsic::arm_neon_vqshifts: 7096 VShiftOpc = ARMISD::VQSHLs; break; 7097 case Intrinsic::arm_neon_vqshiftu: 7098 VShiftOpc = ARMISD::VQSHLu; break; 7099 case Intrinsic::arm_neon_vqshiftsu: 7100 VShiftOpc = ARMISD::VQSHLsu; break; 7101 case Intrinsic::arm_neon_vqshiftns: 7102 VShiftOpc = ARMISD::VQSHRNs; break; 7103 case Intrinsic::arm_neon_vqshiftnu: 7104 VShiftOpc = ARMISD::VQSHRNu; break; 7105 case Intrinsic::arm_neon_vqshiftnsu: 7106 VShiftOpc = ARMISD::VQSHRNsu; break; 7107 case Intrinsic::arm_neon_vqrshiftns: 7108 VShiftOpc = ARMISD::VQRSHRNs; break; 7109 case Intrinsic::arm_neon_vqrshiftnu: 7110 VShiftOpc = ARMISD::VQRSHRNu; break; 7111 case Intrinsic::arm_neon_vqrshiftnsu: 7112 VShiftOpc = ARMISD::VQRSHRNsu; break; 7113 } 7114 7115 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7116 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 7117 } 7118 7119 case Intrinsic::arm_neon_vshiftins: { 7120 EVT VT = N->getOperand(1).getValueType(); 7121 int64_t Cnt; 7122 unsigned VShiftOpc = 0; 7123 7124 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 7125 VShiftOpc = ARMISD::VSLI; 7126 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 7127 VShiftOpc = ARMISD::VSRI; 7128 else { 7129 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 7130 } 7131 7132 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7133 N->getOperand(1), N->getOperand(2), 7134 DAG.getConstant(Cnt, MVT::i32)); 7135 } 7136 7137 case Intrinsic::arm_neon_vqrshifts: 7138 case Intrinsic::arm_neon_vqrshiftu: 7139 // No immediate versions of these to check for. 7140 break; 7141 } 7142 7143 return SDValue(); 7144} 7145 7146/// PerformShiftCombine - Checks for immediate versions of vector shifts and 7147/// lowers them. As with the vector shift intrinsics, this is done during DAG 7148/// combining instead of DAG legalizing because the build_vectors for 64-bit 7149/// vector element shift counts are generally not legal, and it is hard to see 7150/// their values after they get legalized to loads from a constant pool. 7151static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 7152 const ARMSubtarget *ST) { 7153 EVT VT = N->getValueType(0); 7154 7155 // Nothing to be done for scalar shifts. 7156 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7157 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 7158 return SDValue(); 7159 7160 assert(ST->hasNEON() && "unexpected vector shift"); 7161 int64_t Cnt; 7162 7163 switch (N->getOpcode()) { 7164 default: llvm_unreachable("unexpected shift opcode"); 7165 7166 case ISD::SHL: 7167 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 7168 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 7169 DAG.getConstant(Cnt, MVT::i32)); 7170 break; 7171 7172 case ISD::SRA: 7173 case ISD::SRL: 7174 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 7175 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 7176 ARMISD::VSHRs : ARMISD::VSHRu); 7177 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 7178 DAG.getConstant(Cnt, MVT::i32)); 7179 } 7180 } 7181 return SDValue(); 7182} 7183 7184/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 7185/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 7186static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 7187 const ARMSubtarget *ST) { 7188 SDValue N0 = N->getOperand(0); 7189 7190 // Check for sign- and zero-extensions of vector extract operations of 8- 7191 // and 16-bit vector elements. NEON supports these directly. They are 7192 // handled during DAG combining because type legalization will promote them 7193 // to 32-bit types and it is messy to recognize the operations after that. 7194 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7195 SDValue Vec = N0.getOperand(0); 7196 SDValue Lane = N0.getOperand(1); 7197 EVT VT = N->getValueType(0); 7198 EVT EltVT = N0.getValueType(); 7199 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7200 7201 if (VT == MVT::i32 && 7202 (EltVT == MVT::i8 || EltVT == MVT::i16) && 7203 TLI.isTypeLegal(Vec.getValueType()) && 7204 isa<ConstantSDNode>(Lane)) { 7205 7206 unsigned Opc = 0; 7207 switch (N->getOpcode()) { 7208 default: llvm_unreachable("unexpected opcode"); 7209 case ISD::SIGN_EXTEND: 7210 Opc = ARMISD::VGETLANEs; 7211 break; 7212 case ISD::ZERO_EXTEND: 7213 case ISD::ANY_EXTEND: 7214 Opc = ARMISD::VGETLANEu; 7215 break; 7216 } 7217 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 7218 } 7219 } 7220 7221 return SDValue(); 7222} 7223 7224/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 7225/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 7226static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 7227 const ARMSubtarget *ST) { 7228 // If the target supports NEON, try to use vmax/vmin instructions for f32 7229 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 7230 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 7231 // a NaN; only do the transformation when it matches that behavior. 7232 7233 // For now only do this when using NEON for FP operations; if using VFP, it 7234 // is not obvious that the benefit outweighs the cost of switching to the 7235 // NEON pipeline. 7236 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 7237 N->getValueType(0) != MVT::f32) 7238 return SDValue(); 7239 7240 SDValue CondLHS = N->getOperand(0); 7241 SDValue CondRHS = N->getOperand(1); 7242 SDValue LHS = N->getOperand(2); 7243 SDValue RHS = N->getOperand(3); 7244 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 7245 7246 unsigned Opcode = 0; 7247 bool IsReversed; 7248 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 7249 IsReversed = false; // x CC y ? x : y 7250 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 7251 IsReversed = true ; // x CC y ? y : x 7252 } else { 7253 return SDValue(); 7254 } 7255 7256 bool IsUnordered; 7257 switch (CC) { 7258 default: break; 7259 case ISD::SETOLT: 7260 case ISD::SETOLE: 7261 case ISD::SETLT: 7262 case ISD::SETLE: 7263 case ISD::SETULT: 7264 case ISD::SETULE: 7265 // If LHS is NaN, an ordered comparison will be false and the result will 7266 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 7267 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7268 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 7269 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7270 break; 7271 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 7272 // will return -0, so vmin can only be used for unsafe math or if one of 7273 // the operands is known to be nonzero. 7274 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 7275 !UnsafeFPMath && 7276 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7277 break; 7278 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 7279 break; 7280 7281 case ISD::SETOGT: 7282 case ISD::SETOGE: 7283 case ISD::SETGT: 7284 case ISD::SETGE: 7285 case ISD::SETUGT: 7286 case ISD::SETUGE: 7287 // If LHS is NaN, an ordered comparison will be false and the result will 7288 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 7289 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7290 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 7291 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7292 break; 7293 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 7294 // will return +0, so vmax can only be used for unsafe math or if one of 7295 // the operands is known to be nonzero. 7296 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 7297 !UnsafeFPMath && 7298 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7299 break; 7300 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 7301 break; 7302 } 7303 7304 if (!Opcode) 7305 return SDValue(); 7306 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 7307} 7308 7309/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 7310SDValue 7311ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 7312 SDValue Cmp = N->getOperand(4); 7313 if (Cmp.getOpcode() != ARMISD::CMPZ) 7314 // Only looking at EQ and NE cases. 7315 return SDValue(); 7316 7317 EVT VT = N->getValueType(0); 7318 DebugLoc dl = N->getDebugLoc(); 7319 SDValue LHS = Cmp.getOperand(0); 7320 SDValue RHS = Cmp.getOperand(1); 7321 SDValue FalseVal = N->getOperand(0); 7322 SDValue TrueVal = N->getOperand(1); 7323 SDValue ARMcc = N->getOperand(2); 7324 ARMCC::CondCodes CC = 7325 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 7326 7327 // Simplify 7328 // mov r1, r0 7329 // cmp r1, x 7330 // mov r0, y 7331 // moveq r0, x 7332 // to 7333 // cmp r0, x 7334 // movne r0, y 7335 // 7336 // mov r1, r0 7337 // cmp r1, x 7338 // mov r0, x 7339 // movne r0, y 7340 // to 7341 // cmp r0, x 7342 // movne r0, y 7343 /// FIXME: Turn this into a target neutral optimization? 7344 SDValue Res; 7345 if (CC == ARMCC::NE && FalseVal == RHS) { 7346 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 7347 N->getOperand(3), Cmp); 7348 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 7349 SDValue ARMcc; 7350 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 7351 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 7352 N->getOperand(3), NewCmp); 7353 } 7354 7355 if (Res.getNode()) { 7356 APInt KnownZero, KnownOne; 7357 APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()); 7358 DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne); 7359 // Capture demanded bits information that would be otherwise lost. 7360 if (KnownZero == 0xfffffffe) 7361 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 7362 DAG.getValueType(MVT::i1)); 7363 else if (KnownZero == 0xffffff00) 7364 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 7365 DAG.getValueType(MVT::i8)); 7366 else if (KnownZero == 0xffff0000) 7367 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 7368 DAG.getValueType(MVT::i16)); 7369 } 7370 7371 return Res; 7372} 7373 7374SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 7375 DAGCombinerInfo &DCI) const { 7376 switch (N->getOpcode()) { 7377 default: break; 7378 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 7379 case ISD::SUB: return PerformSUBCombine(N, DCI); 7380 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 7381 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 7382 case ISD::AND: return PerformANDCombine(N, DCI); 7383 case ARMISD::BFI: return PerformBFICombine(N, DCI); 7384 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 7385 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 7386 case ISD::STORE: return PerformSTORECombine(N, DCI); 7387 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 7388 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 7389 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 7390 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 7391 case ISD::FP_TO_SINT: 7392 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 7393 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 7394 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 7395 case ISD::SHL: 7396 case ISD::SRA: 7397 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 7398 case ISD::SIGN_EXTEND: 7399 case ISD::ZERO_EXTEND: 7400 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 7401 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 7402 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 7403 case ARMISD::VLD2DUP: 7404 case ARMISD::VLD3DUP: 7405 case ARMISD::VLD4DUP: 7406 return CombineBaseUpdate(N, DCI); 7407 case ISD::INTRINSIC_VOID: 7408 case ISD::INTRINSIC_W_CHAIN: 7409 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 7410 case Intrinsic::arm_neon_vld1: 7411 case Intrinsic::arm_neon_vld2: 7412 case Intrinsic::arm_neon_vld3: 7413 case Intrinsic::arm_neon_vld4: 7414 case Intrinsic::arm_neon_vld2lane: 7415 case Intrinsic::arm_neon_vld3lane: 7416 case Intrinsic::arm_neon_vld4lane: 7417 case Intrinsic::arm_neon_vst1: 7418 case Intrinsic::arm_neon_vst2: 7419 case Intrinsic::arm_neon_vst3: 7420 case Intrinsic::arm_neon_vst4: 7421 case Intrinsic::arm_neon_vst2lane: 7422 case Intrinsic::arm_neon_vst3lane: 7423 case Intrinsic::arm_neon_vst4lane: 7424 return CombineBaseUpdate(N, DCI); 7425 default: break; 7426 } 7427 break; 7428 } 7429 return SDValue(); 7430} 7431 7432bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 7433 EVT VT) const { 7434 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 7435} 7436 7437bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 7438 if (!Subtarget->allowsUnalignedMem()) 7439 return false; 7440 7441 switch (VT.getSimpleVT().SimpleTy) { 7442 default: 7443 return false; 7444 case MVT::i8: 7445 case MVT::i16: 7446 case MVT::i32: 7447 return true; 7448 // FIXME: VLD1 etc with standard alignment is legal. 7449 } 7450} 7451 7452static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 7453 if (V < 0) 7454 return false; 7455 7456 unsigned Scale = 1; 7457 switch (VT.getSimpleVT().SimpleTy) { 7458 default: return false; 7459 case MVT::i1: 7460 case MVT::i8: 7461 // Scale == 1; 7462 break; 7463 case MVT::i16: 7464 // Scale == 2; 7465 Scale = 2; 7466 break; 7467 case MVT::i32: 7468 // Scale == 4; 7469 Scale = 4; 7470 break; 7471 } 7472 7473 if ((V & (Scale - 1)) != 0) 7474 return false; 7475 V /= Scale; 7476 return V == (V & ((1LL << 5) - 1)); 7477} 7478 7479static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 7480 const ARMSubtarget *Subtarget) { 7481 bool isNeg = false; 7482 if (V < 0) { 7483 isNeg = true; 7484 V = - V; 7485 } 7486 7487 switch (VT.getSimpleVT().SimpleTy) { 7488 default: return false; 7489 case MVT::i1: 7490 case MVT::i8: 7491 case MVT::i16: 7492 case MVT::i32: 7493 // + imm12 or - imm8 7494 if (isNeg) 7495 return V == (V & ((1LL << 8) - 1)); 7496 return V == (V & ((1LL << 12) - 1)); 7497 case MVT::f32: 7498 case MVT::f64: 7499 // Same as ARM mode. FIXME: NEON? 7500 if (!Subtarget->hasVFP2()) 7501 return false; 7502 if ((V & 3) != 0) 7503 return false; 7504 V >>= 2; 7505 return V == (V & ((1LL << 8) - 1)); 7506 } 7507} 7508 7509/// isLegalAddressImmediate - Return true if the integer value can be used 7510/// as the offset of the target addressing mode for load / store of the 7511/// given type. 7512static bool isLegalAddressImmediate(int64_t V, EVT VT, 7513 const ARMSubtarget *Subtarget) { 7514 if (V == 0) 7515 return true; 7516 7517 if (!VT.isSimple()) 7518 return false; 7519 7520 if (Subtarget->isThumb1Only()) 7521 return isLegalT1AddressImmediate(V, VT); 7522 else if (Subtarget->isThumb2()) 7523 return isLegalT2AddressImmediate(V, VT, Subtarget); 7524 7525 // ARM mode. 7526 if (V < 0) 7527 V = - V; 7528 switch (VT.getSimpleVT().SimpleTy) { 7529 default: return false; 7530 case MVT::i1: 7531 case MVT::i8: 7532 case MVT::i32: 7533 // +- imm12 7534 return V == (V & ((1LL << 12) - 1)); 7535 case MVT::i16: 7536 // +- imm8 7537 return V == (V & ((1LL << 8) - 1)); 7538 case MVT::f32: 7539 case MVT::f64: 7540 if (!Subtarget->hasVFP2()) // FIXME: NEON? 7541 return false; 7542 if ((V & 3) != 0) 7543 return false; 7544 V >>= 2; 7545 return V == (V & ((1LL << 8) - 1)); 7546 } 7547} 7548 7549bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 7550 EVT VT) const { 7551 int Scale = AM.Scale; 7552 if (Scale < 0) 7553 return false; 7554 7555 switch (VT.getSimpleVT().SimpleTy) { 7556 default: return false; 7557 case MVT::i1: 7558 case MVT::i8: 7559 case MVT::i16: 7560 case MVT::i32: 7561 if (Scale == 1) 7562 return true; 7563 // r + r << imm 7564 Scale = Scale & ~1; 7565 return Scale == 2 || Scale == 4 || Scale == 8; 7566 case MVT::i64: 7567 // r + r 7568 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 7569 return true; 7570 return false; 7571 case MVT::isVoid: 7572 // Note, we allow "void" uses (basically, uses that aren't loads or 7573 // stores), because arm allows folding a scale into many arithmetic 7574 // operations. This should be made more precise and revisited later. 7575 7576 // Allow r << imm, but the imm has to be a multiple of two. 7577 if (Scale & 1) return false; 7578 return isPowerOf2_32(Scale); 7579 } 7580} 7581 7582/// isLegalAddressingMode - Return true if the addressing mode represented 7583/// by AM is legal for this target, for a load/store of the specified type. 7584bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 7585 Type *Ty) const { 7586 EVT VT = getValueType(Ty, true); 7587 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 7588 return false; 7589 7590 // Can never fold addr of global into load/store. 7591 if (AM.BaseGV) 7592 return false; 7593 7594 switch (AM.Scale) { 7595 case 0: // no scale reg, must be "r+i" or "r", or "i". 7596 break; 7597 case 1: 7598 if (Subtarget->isThumb1Only()) 7599 return false; 7600 // FALL THROUGH. 7601 default: 7602 // ARM doesn't support any R+R*scale+imm addr modes. 7603 if (AM.BaseOffs) 7604 return false; 7605 7606 if (!VT.isSimple()) 7607 return false; 7608 7609 if (Subtarget->isThumb2()) 7610 return isLegalT2ScaledAddressingMode(AM, VT); 7611 7612 int Scale = AM.Scale; 7613 switch (VT.getSimpleVT().SimpleTy) { 7614 default: return false; 7615 case MVT::i1: 7616 case MVT::i8: 7617 case MVT::i32: 7618 if (Scale < 0) Scale = -Scale; 7619 if (Scale == 1) 7620 return true; 7621 // r + r << imm 7622 return isPowerOf2_32(Scale & ~1); 7623 case MVT::i16: 7624 case MVT::i64: 7625 // r + r 7626 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 7627 return true; 7628 return false; 7629 7630 case MVT::isVoid: 7631 // Note, we allow "void" uses (basically, uses that aren't loads or 7632 // stores), because arm allows folding a scale into many arithmetic 7633 // operations. This should be made more precise and revisited later. 7634 7635 // Allow r << imm, but the imm has to be a multiple of two. 7636 if (Scale & 1) return false; 7637 return isPowerOf2_32(Scale); 7638 } 7639 break; 7640 } 7641 return true; 7642} 7643 7644/// isLegalICmpImmediate - Return true if the specified immediate is legal 7645/// icmp immediate, that is the target has icmp instructions which can compare 7646/// a register against the immediate without having to materialize the 7647/// immediate into a register. 7648bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 7649 if (!Subtarget->isThumb()) 7650 return ARM_AM::getSOImmVal(Imm) != -1; 7651 if (Subtarget->isThumb2()) 7652 return ARM_AM::getT2SOImmVal(Imm) != -1; 7653 return Imm >= 0 && Imm <= 255; 7654} 7655 7656/// isLegalAddImmediate - Return true if the specified immediate is legal 7657/// add immediate, that is the target has add instructions which can add 7658/// a register with the immediate without having to materialize the 7659/// immediate into a register. 7660bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 7661 return ARM_AM::getSOImmVal(Imm) != -1; 7662} 7663 7664static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 7665 bool isSEXTLoad, SDValue &Base, 7666 SDValue &Offset, bool &isInc, 7667 SelectionDAG &DAG) { 7668 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 7669 return false; 7670 7671 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 7672 // AddressingMode 3 7673 Base = Ptr->getOperand(0); 7674 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7675 int RHSC = (int)RHS->getZExtValue(); 7676 if (RHSC < 0 && RHSC > -256) { 7677 assert(Ptr->getOpcode() == ISD::ADD); 7678 isInc = false; 7679 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7680 return true; 7681 } 7682 } 7683 isInc = (Ptr->getOpcode() == ISD::ADD); 7684 Offset = Ptr->getOperand(1); 7685 return true; 7686 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 7687 // AddressingMode 2 7688 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7689 int RHSC = (int)RHS->getZExtValue(); 7690 if (RHSC < 0 && RHSC > -0x1000) { 7691 assert(Ptr->getOpcode() == ISD::ADD); 7692 isInc = false; 7693 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7694 Base = Ptr->getOperand(0); 7695 return true; 7696 } 7697 } 7698 7699 if (Ptr->getOpcode() == ISD::ADD) { 7700 isInc = true; 7701 ARM_AM::ShiftOpc ShOpcVal= 7702 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 7703 if (ShOpcVal != ARM_AM::no_shift) { 7704 Base = Ptr->getOperand(1); 7705 Offset = Ptr->getOperand(0); 7706 } else { 7707 Base = Ptr->getOperand(0); 7708 Offset = Ptr->getOperand(1); 7709 } 7710 return true; 7711 } 7712 7713 isInc = (Ptr->getOpcode() == ISD::ADD); 7714 Base = Ptr->getOperand(0); 7715 Offset = Ptr->getOperand(1); 7716 return true; 7717 } 7718 7719 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 7720 return false; 7721} 7722 7723static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 7724 bool isSEXTLoad, SDValue &Base, 7725 SDValue &Offset, bool &isInc, 7726 SelectionDAG &DAG) { 7727 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 7728 return false; 7729 7730 Base = Ptr->getOperand(0); 7731 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 7732 int RHSC = (int)RHS->getZExtValue(); 7733 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 7734 assert(Ptr->getOpcode() == ISD::ADD); 7735 isInc = false; 7736 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 7737 return true; 7738 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 7739 isInc = Ptr->getOpcode() == ISD::ADD; 7740 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 7741 return true; 7742 } 7743 } 7744 7745 return false; 7746} 7747 7748/// getPreIndexedAddressParts - returns true by value, base pointer and 7749/// offset pointer and addressing mode by reference if the node's address 7750/// can be legally represented as pre-indexed load / store address. 7751bool 7752ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 7753 SDValue &Offset, 7754 ISD::MemIndexedMode &AM, 7755 SelectionDAG &DAG) const { 7756 if (Subtarget->isThumb1Only()) 7757 return false; 7758 7759 EVT VT; 7760 SDValue Ptr; 7761 bool isSEXTLoad = false; 7762 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7763 Ptr = LD->getBasePtr(); 7764 VT = LD->getMemoryVT(); 7765 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 7766 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7767 Ptr = ST->getBasePtr(); 7768 VT = ST->getMemoryVT(); 7769 } else 7770 return false; 7771 7772 bool isInc; 7773 bool isLegal = false; 7774 if (Subtarget->isThumb2()) 7775 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 7776 Offset, isInc, DAG); 7777 else 7778 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 7779 Offset, isInc, DAG); 7780 if (!isLegal) 7781 return false; 7782 7783 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 7784 return true; 7785} 7786 7787/// getPostIndexedAddressParts - returns true by value, base pointer and 7788/// offset pointer and addressing mode by reference if this node can be 7789/// combined with a load / store to form a post-indexed load / store. 7790bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 7791 SDValue &Base, 7792 SDValue &Offset, 7793 ISD::MemIndexedMode &AM, 7794 SelectionDAG &DAG) const { 7795 if (Subtarget->isThumb1Only()) 7796 return false; 7797 7798 EVT VT; 7799 SDValue Ptr; 7800 bool isSEXTLoad = false; 7801 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7802 VT = LD->getMemoryVT(); 7803 Ptr = LD->getBasePtr(); 7804 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 7805 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7806 VT = ST->getMemoryVT(); 7807 Ptr = ST->getBasePtr(); 7808 } else 7809 return false; 7810 7811 bool isInc; 7812 bool isLegal = false; 7813 if (Subtarget->isThumb2()) 7814 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 7815 isInc, DAG); 7816 else 7817 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 7818 isInc, DAG); 7819 if (!isLegal) 7820 return false; 7821 7822 if (Ptr != Base) { 7823 // Swap base ptr and offset to catch more post-index load / store when 7824 // it's legal. In Thumb2 mode, offset must be an immediate. 7825 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 7826 !Subtarget->isThumb2()) 7827 std::swap(Base, Offset); 7828 7829 // Post-indexed load / store update the base pointer. 7830 if (Ptr != Base) 7831 return false; 7832 } 7833 7834 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 7835 return true; 7836} 7837 7838void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 7839 const APInt &Mask, 7840 APInt &KnownZero, 7841 APInt &KnownOne, 7842 const SelectionDAG &DAG, 7843 unsigned Depth) const { 7844 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 7845 switch (Op.getOpcode()) { 7846 default: break; 7847 case ARMISD::CMOV: { 7848 // Bits are known zero/one if known on the LHS and RHS. 7849 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 7850 if (KnownZero == 0 && KnownOne == 0) return; 7851 7852 APInt KnownZeroRHS, KnownOneRHS; 7853 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 7854 KnownZeroRHS, KnownOneRHS, Depth+1); 7855 KnownZero &= KnownZeroRHS; 7856 KnownOne &= KnownOneRHS; 7857 return; 7858 } 7859 } 7860} 7861 7862//===----------------------------------------------------------------------===// 7863// ARM Inline Assembly Support 7864//===----------------------------------------------------------------------===// 7865 7866bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 7867 // Looking for "rev" which is V6+. 7868 if (!Subtarget->hasV6Ops()) 7869 return false; 7870 7871 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 7872 std::string AsmStr = IA->getAsmString(); 7873 SmallVector<StringRef, 4> AsmPieces; 7874 SplitString(AsmStr, AsmPieces, ";\n"); 7875 7876 switch (AsmPieces.size()) { 7877 default: return false; 7878 case 1: 7879 AsmStr = AsmPieces[0]; 7880 AsmPieces.clear(); 7881 SplitString(AsmStr, AsmPieces, " \t,"); 7882 7883 // rev $0, $1 7884 if (AsmPieces.size() == 3 && 7885 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 7886 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 7887 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 7888 if (Ty && Ty->getBitWidth() == 32) 7889 return IntrinsicLowering::LowerToByteSwap(CI); 7890 } 7891 break; 7892 } 7893 7894 return false; 7895} 7896 7897/// getConstraintType - Given a constraint letter, return the type of 7898/// constraint it is for this target. 7899ARMTargetLowering::ConstraintType 7900ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 7901 if (Constraint.size() == 1) { 7902 switch (Constraint[0]) { 7903 default: break; 7904 case 'l': return C_RegisterClass; 7905 case 'w': return C_RegisterClass; 7906 case 'h': return C_RegisterClass; 7907 case 'x': return C_RegisterClass; 7908 case 't': return C_RegisterClass; 7909 case 'j': return C_Other; // Constant for movw. 7910 // An address with a single base register. Due to the way we 7911 // currently handle addresses it is the same as an 'r' memory constraint. 7912 case 'Q': return C_Memory; 7913 } 7914 } else if (Constraint.size() == 2) { 7915 switch (Constraint[0]) { 7916 default: break; 7917 // All 'U+' constraints are addresses. 7918 case 'U': return C_Memory; 7919 } 7920 } 7921 return TargetLowering::getConstraintType(Constraint); 7922} 7923 7924/// Examine constraint type and operand type and determine a weight value. 7925/// This object must already have been set up with the operand type 7926/// and the current alternative constraint selected. 7927TargetLowering::ConstraintWeight 7928ARMTargetLowering::getSingleConstraintMatchWeight( 7929 AsmOperandInfo &info, const char *constraint) const { 7930 ConstraintWeight weight = CW_Invalid; 7931 Value *CallOperandVal = info.CallOperandVal; 7932 // If we don't have a value, we can't do a match, 7933 // but allow it at the lowest weight. 7934 if (CallOperandVal == NULL) 7935 return CW_Default; 7936 Type *type = CallOperandVal->getType(); 7937 // Look at the constraint type. 7938 switch (*constraint) { 7939 default: 7940 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 7941 break; 7942 case 'l': 7943 if (type->isIntegerTy()) { 7944 if (Subtarget->isThumb()) 7945 weight = CW_SpecificReg; 7946 else 7947 weight = CW_Register; 7948 } 7949 break; 7950 case 'w': 7951 if (type->isFloatingPointTy()) 7952 weight = CW_Register; 7953 break; 7954 } 7955 return weight; 7956} 7957 7958typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 7959RCPair 7960ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 7961 EVT VT) const { 7962 if (Constraint.size() == 1) { 7963 // GCC ARM Constraint Letters 7964 switch (Constraint[0]) { 7965 case 'l': // Low regs or general regs. 7966 if (Subtarget->isThumb()) 7967 return RCPair(0U, ARM::tGPRRegisterClass); 7968 else 7969 return RCPair(0U, ARM::GPRRegisterClass); 7970 case 'h': // High regs or no regs. 7971 if (Subtarget->isThumb()) 7972 return RCPair(0U, ARM::hGPRRegisterClass); 7973 break; 7974 case 'r': 7975 return RCPair(0U, ARM::GPRRegisterClass); 7976 case 'w': 7977 if (VT == MVT::f32) 7978 return RCPair(0U, ARM::SPRRegisterClass); 7979 if (VT.getSizeInBits() == 64) 7980 return RCPair(0U, ARM::DPRRegisterClass); 7981 if (VT.getSizeInBits() == 128) 7982 return RCPair(0U, ARM::QPRRegisterClass); 7983 break; 7984 case 'x': 7985 if (VT == MVT::f32) 7986 return RCPair(0U, ARM::SPR_8RegisterClass); 7987 if (VT.getSizeInBits() == 64) 7988 return RCPair(0U, ARM::DPR_8RegisterClass); 7989 if (VT.getSizeInBits() == 128) 7990 return RCPair(0U, ARM::QPR_8RegisterClass); 7991 break; 7992 case 't': 7993 if (VT == MVT::f32) 7994 return RCPair(0U, ARM::SPRRegisterClass); 7995 break; 7996 } 7997 } 7998 if (StringRef("{cc}").equals_lower(Constraint)) 7999 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 8000 8001 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 8002} 8003 8004/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 8005/// vector. If it is invalid, don't add anything to Ops. 8006void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 8007 std::string &Constraint, 8008 std::vector<SDValue>&Ops, 8009 SelectionDAG &DAG) const { 8010 SDValue Result(0, 0); 8011 8012 // Currently only support length 1 constraints. 8013 if (Constraint.length() != 1) return; 8014 8015 char ConstraintLetter = Constraint[0]; 8016 switch (ConstraintLetter) { 8017 default: break; 8018 case 'j': 8019 case 'I': case 'J': case 'K': case 'L': 8020 case 'M': case 'N': case 'O': 8021 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 8022 if (!C) 8023 return; 8024 8025 int64_t CVal64 = C->getSExtValue(); 8026 int CVal = (int) CVal64; 8027 // None of these constraints allow values larger than 32 bits. Check 8028 // that the value fits in an int. 8029 if (CVal != CVal64) 8030 return; 8031 8032 switch (ConstraintLetter) { 8033 case 'j': 8034 // Constant suitable for movw, must be between 0 and 8035 // 65535. 8036 if (Subtarget->hasV6T2Ops()) 8037 if (CVal >= 0 && CVal <= 65535) 8038 break; 8039 return; 8040 case 'I': 8041 if (Subtarget->isThumb1Only()) { 8042 // This must be a constant between 0 and 255, for ADD 8043 // immediates. 8044 if (CVal >= 0 && CVal <= 255) 8045 break; 8046 } else if (Subtarget->isThumb2()) { 8047 // A constant that can be used as an immediate value in a 8048 // data-processing instruction. 8049 if (ARM_AM::getT2SOImmVal(CVal) != -1) 8050 break; 8051 } else { 8052 // A constant that can be used as an immediate value in a 8053 // data-processing instruction. 8054 if (ARM_AM::getSOImmVal(CVal) != -1) 8055 break; 8056 } 8057 return; 8058 8059 case 'J': 8060 if (Subtarget->isThumb()) { // FIXME thumb2 8061 // This must be a constant between -255 and -1, for negated ADD 8062 // immediates. This can be used in GCC with an "n" modifier that 8063 // prints the negated value, for use with SUB instructions. It is 8064 // not useful otherwise but is implemented for compatibility. 8065 if (CVal >= -255 && CVal <= -1) 8066 break; 8067 } else { 8068 // This must be a constant between -4095 and 4095. It is not clear 8069 // what this constraint is intended for. Implemented for 8070 // compatibility with GCC. 8071 if (CVal >= -4095 && CVal <= 4095) 8072 break; 8073 } 8074 return; 8075 8076 case 'K': 8077 if (Subtarget->isThumb1Only()) { 8078 // A 32-bit value where only one byte has a nonzero value. Exclude 8079 // zero to match GCC. This constraint is used by GCC internally for 8080 // constants that can be loaded with a move/shift combination. 8081 // It is not useful otherwise but is implemented for compatibility. 8082 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 8083 break; 8084 } else if (Subtarget->isThumb2()) { 8085 // A constant whose bitwise inverse can be used as an immediate 8086 // value in a data-processing instruction. This can be used in GCC 8087 // with a "B" modifier that prints the inverted value, for use with 8088 // BIC and MVN instructions. It is not useful otherwise but is 8089 // implemented for compatibility. 8090 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 8091 break; 8092 } else { 8093 // A constant whose bitwise inverse can be used as an immediate 8094 // value in a data-processing instruction. This can be used in GCC 8095 // with a "B" modifier that prints the inverted value, for use with 8096 // BIC and MVN instructions. It is not useful otherwise but is 8097 // implemented for compatibility. 8098 if (ARM_AM::getSOImmVal(~CVal) != -1) 8099 break; 8100 } 8101 return; 8102 8103 case 'L': 8104 if (Subtarget->isThumb1Only()) { 8105 // This must be a constant between -7 and 7, 8106 // for 3-operand ADD/SUB immediate instructions. 8107 if (CVal >= -7 && CVal < 7) 8108 break; 8109 } else if (Subtarget->isThumb2()) { 8110 // A constant whose negation can be used as an immediate value in a 8111 // data-processing instruction. This can be used in GCC with an "n" 8112 // modifier that prints the negated value, for use with SUB 8113 // instructions. It is not useful otherwise but is implemented for 8114 // compatibility. 8115 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 8116 break; 8117 } else { 8118 // A constant whose negation can be used as an immediate value in a 8119 // data-processing instruction. This can be used in GCC with an "n" 8120 // modifier that prints the negated value, for use with SUB 8121 // instructions. It is not useful otherwise but is implemented for 8122 // compatibility. 8123 if (ARM_AM::getSOImmVal(-CVal) != -1) 8124 break; 8125 } 8126 return; 8127 8128 case 'M': 8129 if (Subtarget->isThumb()) { // FIXME thumb2 8130 // This must be a multiple of 4 between 0 and 1020, for 8131 // ADD sp + immediate. 8132 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 8133 break; 8134 } else { 8135 // A power of two or a constant between 0 and 32. This is used in 8136 // GCC for the shift amount on shifted register operands, but it is 8137 // useful in general for any shift amounts. 8138 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 8139 break; 8140 } 8141 return; 8142 8143 case 'N': 8144 if (Subtarget->isThumb()) { // FIXME thumb2 8145 // This must be a constant between 0 and 31, for shift amounts. 8146 if (CVal >= 0 && CVal <= 31) 8147 break; 8148 } 8149 return; 8150 8151 case 'O': 8152 if (Subtarget->isThumb()) { // FIXME thumb2 8153 // This must be a multiple of 4 between -508 and 508, for 8154 // ADD/SUB sp = sp + immediate. 8155 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 8156 break; 8157 } 8158 return; 8159 } 8160 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 8161 break; 8162 } 8163 8164 if (Result.getNode()) { 8165 Ops.push_back(Result); 8166 return; 8167 } 8168 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 8169} 8170 8171bool 8172ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 8173 // The ARM target isn't yet aware of offsets. 8174 return false; 8175} 8176 8177int ARM::getVFPf32Imm(const APFloat &FPImm) { 8178 APInt Imm = FPImm.bitcastToAPInt(); 8179 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 8180 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 8181 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 8182 8183 // We can handle 4 bits of mantissa. 8184 // mantissa = (16+UInt(e:f:g:h))/16. 8185 if (Mantissa & 0x7ffff) 8186 return -1; 8187 Mantissa >>= 19; 8188 if ((Mantissa & 0xf) != Mantissa) 8189 return -1; 8190 8191 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 8192 if (Exp < -3 || Exp > 4) 8193 return -1; 8194 Exp = ((Exp+3) & 0x7) ^ 4; 8195 8196 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 8197} 8198 8199int ARM::getVFPf64Imm(const APFloat &FPImm) { 8200 APInt Imm = FPImm.bitcastToAPInt(); 8201 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 8202 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 8203 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 8204 8205 // We can handle 4 bits of mantissa. 8206 // mantissa = (16+UInt(e:f:g:h))/16. 8207 if (Mantissa & 0xffffffffffffLL) 8208 return -1; 8209 Mantissa >>= 48; 8210 if ((Mantissa & 0xf) != Mantissa) 8211 return -1; 8212 8213 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 8214 if (Exp < -3 || Exp > 4) 8215 return -1; 8216 Exp = ((Exp+3) & 0x7) ^ 4; 8217 8218 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 8219} 8220 8221bool ARM::isBitFieldInvertedMask(unsigned v) { 8222 if (v == 0xffffffff) 8223 return 0; 8224 // there can be 1's on either or both "outsides", all the "inside" 8225 // bits must be 0's 8226 unsigned int lsb = 0, msb = 31; 8227 while (v & (1 << msb)) --msb; 8228 while (v & (1 << lsb)) ++lsb; 8229 for (unsigned int i = lsb; i <= msb; ++i) { 8230 if (v & (1 << i)) 8231 return 0; 8232 } 8233 return 1; 8234} 8235 8236/// isFPImmLegal - Returns true if the target can instruction select the 8237/// specified FP immediate natively. If false, the legalizer will 8238/// materialize the FP immediate as a load from a constant pool. 8239bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 8240 if (!Subtarget->hasVFP3()) 8241 return false; 8242 if (VT == MVT::f32) 8243 return ARM::getVFPf32Imm(Imm) != -1; 8244 if (VT == MVT::f64) 8245 return ARM::getVFPf64Imm(Imm) != -1; 8246 return false; 8247} 8248 8249/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 8250/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 8251/// specified in the intrinsic calls. 8252bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 8253 const CallInst &I, 8254 unsigned Intrinsic) const { 8255 switch (Intrinsic) { 8256 case Intrinsic::arm_neon_vld1: 8257 case Intrinsic::arm_neon_vld2: 8258 case Intrinsic::arm_neon_vld3: 8259 case Intrinsic::arm_neon_vld4: 8260 case Intrinsic::arm_neon_vld2lane: 8261 case Intrinsic::arm_neon_vld3lane: 8262 case Intrinsic::arm_neon_vld4lane: { 8263 Info.opc = ISD::INTRINSIC_W_CHAIN; 8264 // Conservatively set memVT to the entire set of vectors loaded. 8265 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 8266 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8267 Info.ptrVal = I.getArgOperand(0); 8268 Info.offset = 0; 8269 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8270 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8271 Info.vol = false; // volatile loads with NEON intrinsics not supported 8272 Info.readMem = true; 8273 Info.writeMem = false; 8274 return true; 8275 } 8276 case Intrinsic::arm_neon_vst1: 8277 case Intrinsic::arm_neon_vst2: 8278 case Intrinsic::arm_neon_vst3: 8279 case Intrinsic::arm_neon_vst4: 8280 case Intrinsic::arm_neon_vst2lane: 8281 case Intrinsic::arm_neon_vst3lane: 8282 case Intrinsic::arm_neon_vst4lane: { 8283 Info.opc = ISD::INTRINSIC_VOID; 8284 // Conservatively set memVT to the entire set of vectors stored. 8285 unsigned NumElts = 0; 8286 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 8287 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 8288 if (!ArgTy->isVectorTy()) 8289 break; 8290 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 8291 } 8292 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8293 Info.ptrVal = I.getArgOperand(0); 8294 Info.offset = 0; 8295 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8296 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8297 Info.vol = false; // volatile stores with NEON intrinsics not supported 8298 Info.readMem = false; 8299 Info.writeMem = true; 8300 return true; 8301 } 8302 case Intrinsic::arm_strexd: { 8303 Info.opc = ISD::INTRINSIC_W_CHAIN; 8304 Info.memVT = MVT::i64; 8305 Info.ptrVal = I.getArgOperand(2); 8306 Info.offset = 0; 8307 Info.align = 8; 8308 Info.vol = true; 8309 Info.readMem = false; 8310 Info.writeMem = true; 8311 return true; 8312 } 8313 case Intrinsic::arm_ldrexd: { 8314 Info.opc = ISD::INTRINSIC_W_CHAIN; 8315 Info.memVT = MVT::i64; 8316 Info.ptrVal = I.getArgOperand(0); 8317 Info.offset = 0; 8318 Info.align = 8; 8319 Info.vol = true; 8320 Info.readMem = true; 8321 Info.writeMem = false; 8322 return true; 8323 } 8324 default: 8325 break; 8326 } 8327 8328 return false; 8329} 8330