ARMISelLowering.cpp revision eaa192af18677c4dc5894e049514d8a6b1d6d7c2
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMCallingConv.h" 18#include "ARMConstantPoolValue.h" 19#include "ARMISelLowering.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMRegisterInfo.h" 23#include "ARMSubtarget.h" 24#include "ARMTargetMachine.h" 25#include "ARMTargetObjectFile.h" 26#include "MCTargetDesc/ARMAddressingModes.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineModuleInfo.h" 42#include "llvm/CodeGen/MachineRegisterInfo.h" 43#include "llvm/CodeGen/PseudoSourceValue.h" 44#include "llvm/CodeGen/SelectionDAG.h" 45#include "llvm/MC/MCSectionMachO.h" 46#include "llvm/Target/TargetOptions.h" 47#include "llvm/ADT/VectorExtras.h" 48#include "llvm/ADT/StringExtras.h" 49#include "llvm/ADT/Statistic.h" 50#include "llvm/Support/CommandLine.h" 51#include "llvm/Support/ErrorHandling.h" 52#include "llvm/Support/MathExtras.h" 53#include "llvm/Support/raw_ostream.h" 54#include <sstream> 55using namespace llvm; 56 57STATISTIC(NumTailCalls, "Number of tail calls"); 58STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 59 60// This option should go away when tail calls fully work. 61static cl::opt<bool> 62EnableARMTailCalls("arm-tail-calls", cl::Hidden, 63 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 64 cl::init(false)); 65 66cl::opt<bool> 67EnableARMLongCalls("arm-long-calls", cl::Hidden, 68 cl::desc("Generate calls via indirect call instructions"), 69 cl::init(false)); 70 71static cl::opt<bool> 72ARMInterworking("arm-interworking", cl::Hidden, 73 cl::desc("Enable / disable ARM interworking (for debugging only)"), 74 cl::init(true)); 75 76namespace llvm { 77 class ARMCCState : public CCState { 78 public: 79 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 80 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 81 LLVMContext &C, ParmContext PC) 82 : CCState(CC, isVarArg, MF, TM, locs, C) { 83 assert(((PC == Call) || (PC == Prologue)) && 84 "ARMCCState users must specify whether their context is call" 85 "or prologue generation."); 86 CallOrPrologue = PC; 87 } 88 }; 89} 90 91// The APCS parameter registers. 92static const unsigned GPRArgRegs[] = { 93 ARM::R0, ARM::R1, ARM::R2, ARM::R3 94}; 95 96void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 97 EVT PromotedBitwiseVT) { 98 if (VT != PromotedLdStVT) { 99 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 100 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 101 PromotedLdStVT.getSimpleVT()); 102 103 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 104 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 105 PromotedLdStVT.getSimpleVT()); 106 } 107 108 EVT ElemTy = VT.getVectorElementType(); 109 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 110 setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom); 111 setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom); 112 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 113 if (ElemTy == MVT::i32) { 114 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Custom); 115 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Custom); 116 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom); 117 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom); 118 } else { 119 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 120 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 121 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 122 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 123 } 124 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 125 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 126 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 127 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 128 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 129 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 130 setOperationAction(ISD::SIGN_EXTEND_INREG, VT.getSimpleVT(), Expand); 131 if (VT.isInteger()) { 132 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 133 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 134 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 135 } 136 137 // Promote all bit-wise operations. 138 if (VT.isInteger() && VT != PromotedBitwiseVT) { 139 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 140 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 141 PromotedBitwiseVT.getSimpleVT()); 142 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 143 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 144 PromotedBitwiseVT.getSimpleVT()); 145 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 146 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 147 PromotedBitwiseVT.getSimpleVT()); 148 } 149 150 // Neon does not support vector divide/remainder operations. 151 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 152 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 153 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 154 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 155 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 156 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 157} 158 159void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 160 addRegisterClass(VT, ARM::DPRRegisterClass); 161 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 162} 163 164void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 165 addRegisterClass(VT, ARM::QPRRegisterClass); 166 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 167} 168 169static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 170 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 171 return new TargetLoweringObjectFileMachO(); 172 173 return new ARMElfTargetObjectFile(); 174} 175 176ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 177 : TargetLowering(TM, createTLOF(TM)) { 178 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 179 RegInfo = TM.getRegisterInfo(); 180 Itins = TM.getInstrItineraryData(); 181 182 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 183 184 if (Subtarget->isTargetDarwin()) { 185 // Uses VFP for Thumb libfuncs if available. 186 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 187 // Single-precision floating-point arithmetic. 188 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 189 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 190 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 191 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 192 193 // Double-precision floating-point arithmetic. 194 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 195 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 196 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 197 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 198 199 // Single-precision comparisons. 200 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 201 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 202 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 203 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 204 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 205 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 206 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 207 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 208 209 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 210 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 216 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 217 218 // Double-precision comparisons. 219 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 220 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 221 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 222 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 223 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 224 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 225 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 226 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 227 228 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 229 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 230 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 231 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 232 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 233 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 234 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 235 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 236 237 // Floating-point to integer conversions. 238 // i64 conversions are done via library routines even when generating VFP 239 // instructions, so use the same ones. 240 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 241 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 242 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 243 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 244 245 // Conversions between floating types. 246 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 247 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 248 249 // Integer to floating-point conversions. 250 // i64 conversions are done via library routines even when generating VFP 251 // instructions, so use the same ones. 252 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 253 // e.g., __floatunsidf vs. __floatunssidfvfp. 254 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 255 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 256 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 257 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 258 } 259 } 260 261 // These libcalls are not available in 32-bit. 262 setLibcallName(RTLIB::SHL_I128, 0); 263 setLibcallName(RTLIB::SRL_I128, 0); 264 setLibcallName(RTLIB::SRA_I128, 0); 265 266 if (Subtarget->isAAPCS_ABI()) { 267 // Double-precision floating-point arithmetic helper functions 268 // RTABI chapter 4.1.2, Table 2 269 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 270 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 271 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 272 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 273 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 277 278 // Double-precision floating-point comparison helper functions 279 // RTABI chapter 4.1.2, Table 3 280 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 281 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 282 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 283 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 284 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 285 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 286 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 287 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 288 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 289 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 290 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 291 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 292 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 293 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 294 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 295 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 296 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 297 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 298 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 299 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 300 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 301 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 302 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 303 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 304 305 // Single-precision floating-point arithmetic helper functions 306 // RTABI chapter 4.1.2, Table 4 307 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 308 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 309 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 310 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 311 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 315 316 // Single-precision floating-point comparison helper functions 317 // RTABI chapter 4.1.2, Table 5 318 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 319 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 320 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 321 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 322 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 323 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 324 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 325 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 326 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 327 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 328 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 329 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 330 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 331 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 332 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 333 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 334 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 335 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 342 343 // Floating-point to integer conversions. 344 // RTABI chapter 4.1.2, Table 6 345 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 346 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 347 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 348 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 349 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 350 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 351 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 352 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 353 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 361 362 // Conversions between floating types. 363 // RTABI chapter 4.1.2, Table 7 364 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 365 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 366 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 367 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 368 369 // Integer to floating-point conversions. 370 // RTABI chapter 4.1.2, Table 8 371 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 372 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 373 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 374 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 375 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 376 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 377 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 378 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 379 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 380 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 384 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 385 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 387 388 // Long long helper functions 389 // RTABI chapter 4.2, Table 9 390 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 391 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 392 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 393 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 394 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 395 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 396 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 397 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 399 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 400 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 401 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 402 403 // Integer division functions 404 // RTABI chapter 4.3.1 405 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 406 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 407 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 408 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 409 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 410 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 411 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 412 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 413 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 414 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 415 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 416 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 417 418 // Memory operations 419 // RTABI chapter 4.3.4 420 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 421 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 422 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 423 } 424 425 // Use divmod compiler-rt calls for iOS 5.0 and later. 426 if (Subtarget->getTargetTriple().getOS() == Triple::IOS && 427 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 428 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 429 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 430 } 431 432 if (Subtarget->isThumb1Only()) 433 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 434 else 435 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 436 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 437 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 438 if (!Subtarget->isFPOnlySP()) 439 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 440 441 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 442 } 443 444 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 445 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 446 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 447 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 448 setTruncStoreAction((MVT::SimpleValueType)VT, 449 (MVT::SimpleValueType)InnerVT, Expand); 450 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 451 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 452 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 453 } 454 455 if (Subtarget->hasNEON()) { 456 addDRTypeForNEON(MVT::v2f32); 457 addDRTypeForNEON(MVT::v8i8); 458 addDRTypeForNEON(MVT::v4i16); 459 addDRTypeForNEON(MVT::v2i32); 460 addDRTypeForNEON(MVT::v1i64); 461 462 addQRTypeForNEON(MVT::v4f32); 463 addQRTypeForNEON(MVT::v2f64); 464 addQRTypeForNEON(MVT::v16i8); 465 addQRTypeForNEON(MVT::v8i16); 466 addQRTypeForNEON(MVT::v4i32); 467 addQRTypeForNEON(MVT::v2i64); 468 469 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 470 // neither Neon nor VFP support any arithmetic operations on it. 471 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 472 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 473 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 474 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 475 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 476 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 477 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 478 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 479 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 480 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 481 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 482 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 483 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 484 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 485 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 486 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 487 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 488 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 489 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 490 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 491 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 492 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 493 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 494 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 495 496 // Neon does not support some operations on v1i64 and v2i64 types. 497 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 498 // Custom handling for some quad-vector types to detect VMULL. 499 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 500 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 501 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 502 // Custom handling for some vector types to avoid expensive expansions 503 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 504 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 505 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 506 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 507 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 508 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 509 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 510 // a destination type that is wider than the source. 511 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 512 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 513 514 setTargetDAGCombine(ISD::INTRINSIC_VOID); 515 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 516 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 517 setTargetDAGCombine(ISD::SHL); 518 setTargetDAGCombine(ISD::SRL); 519 setTargetDAGCombine(ISD::SRA); 520 setTargetDAGCombine(ISD::SIGN_EXTEND); 521 setTargetDAGCombine(ISD::ZERO_EXTEND); 522 setTargetDAGCombine(ISD::ANY_EXTEND); 523 setTargetDAGCombine(ISD::SELECT_CC); 524 setTargetDAGCombine(ISD::BUILD_VECTOR); 525 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 526 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 527 setTargetDAGCombine(ISD::STORE); 528 setTargetDAGCombine(ISD::FP_TO_SINT); 529 setTargetDAGCombine(ISD::FP_TO_UINT); 530 setTargetDAGCombine(ISD::FDIV); 531 532 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand); 533 } 534 535 computeRegisterProperties(); 536 537 // ARM does not have f32 extending load. 538 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 539 540 // ARM does not have i1 sign extending load. 541 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 542 543 // ARM supports all 4 flavors of integer indexed load / store. 544 if (!Subtarget->isThumb1Only()) { 545 for (unsigned im = (unsigned)ISD::PRE_INC; 546 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 547 setIndexedLoadAction(im, MVT::i1, Legal); 548 setIndexedLoadAction(im, MVT::i8, Legal); 549 setIndexedLoadAction(im, MVT::i16, Legal); 550 setIndexedLoadAction(im, MVT::i32, Legal); 551 setIndexedStoreAction(im, MVT::i1, Legal); 552 setIndexedStoreAction(im, MVT::i8, Legal); 553 setIndexedStoreAction(im, MVT::i16, Legal); 554 setIndexedStoreAction(im, MVT::i32, Legal); 555 } 556 } 557 558 // i64 operation support. 559 setOperationAction(ISD::MUL, MVT::i64, Expand); 560 setOperationAction(ISD::MULHU, MVT::i32, Expand); 561 if (Subtarget->isThumb1Only()) { 562 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 563 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 564 } 565 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 566 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 567 setOperationAction(ISD::MULHS, MVT::i32, Expand); 568 569 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 570 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 571 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 572 setOperationAction(ISD::SRL, MVT::i64, Custom); 573 setOperationAction(ISD::SRA, MVT::i64, Custom); 574 575 if (!Subtarget->isThumb1Only()) { 576 // FIXME: We should do this for Thumb1 as well. 577 setOperationAction(ISD::ADDC, MVT::i32, Custom); 578 setOperationAction(ISD::ADDE, MVT::i32, Custom); 579 setOperationAction(ISD::SUBC, MVT::i32, Custom); 580 setOperationAction(ISD::SUBE, MVT::i32, Custom); 581 } 582 583 // ARM does not have ROTL. 584 setOperationAction(ISD::ROTL, MVT::i32, Expand); 585 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 586 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 587 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 588 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 589 590 // Only ARMv6 has BSWAP. 591 if (!Subtarget->hasV6Ops()) 592 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 593 594 // These are expanded into libcalls. 595 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 596 // v7M has a hardware divider 597 setOperationAction(ISD::SDIV, MVT::i32, Expand); 598 setOperationAction(ISD::UDIV, MVT::i32, Expand); 599 } 600 setOperationAction(ISD::SREM, MVT::i32, Expand); 601 setOperationAction(ISD::UREM, MVT::i32, Expand); 602 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 603 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 604 605 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 606 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 607 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 608 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 609 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 610 611 setOperationAction(ISD::TRAP, MVT::Other, Legal); 612 613 // Use the default implementation. 614 setOperationAction(ISD::VASTART, MVT::Other, Custom); 615 setOperationAction(ISD::VAARG, MVT::Other, Expand); 616 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 617 setOperationAction(ISD::VAEND, MVT::Other, Expand); 618 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 619 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 620 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 621 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 622 setExceptionPointerRegister(ARM::R0); 623 setExceptionSelectorRegister(ARM::R1); 624 625 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 626 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 627 // the default expansion. 628 // FIXME: This should be checking for v6k, not just v6. 629 if (Subtarget->hasDataBarrier() || 630 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 631 // membarrier needs custom lowering; the rest are legal and handled 632 // normally. 633 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 634 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 635 // Custom lowering for 64-bit ops 636 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 637 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 638 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 639 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 640 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 641 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 642 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 643 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 644 setInsertFencesForAtomic(true); 645 } else { 646 // Set them all for expansion, which will force libcalls. 647 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 648 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 649 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 650 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 651 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 652 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 653 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 654 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 655 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 656 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 657 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 658 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 659 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 660 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 661 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 662 // Unordered/Monotonic case. 663 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 664 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 665 // Since the libcalls include locking, fold in the fences 666 setShouldFoldAtomicFences(true); 667 } 668 669 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 670 671 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 672 if (!Subtarget->hasV6Ops()) { 673 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 674 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 675 } 676 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 677 678 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 679 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 680 // iff target supports vfp2. 681 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 682 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 683 } 684 685 // We want to custom lower some of our intrinsics. 686 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 687 if (Subtarget->isTargetDarwin()) { 688 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 689 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 690 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 691 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 692 } 693 694 setOperationAction(ISD::SETCC, MVT::i32, Expand); 695 setOperationAction(ISD::SETCC, MVT::f32, Expand); 696 setOperationAction(ISD::SETCC, MVT::f64, Expand); 697 setOperationAction(ISD::SELECT, MVT::i32, Custom); 698 setOperationAction(ISD::SELECT, MVT::f32, Custom); 699 setOperationAction(ISD::SELECT, MVT::f64, Custom); 700 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 701 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 702 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 703 704 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 705 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 706 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 707 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 708 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 709 710 // We don't support sin/cos/fmod/copysign/pow 711 setOperationAction(ISD::FSIN, MVT::f64, Expand); 712 setOperationAction(ISD::FSIN, MVT::f32, Expand); 713 setOperationAction(ISD::FCOS, MVT::f32, Expand); 714 setOperationAction(ISD::FCOS, MVT::f64, Expand); 715 setOperationAction(ISD::FREM, MVT::f64, Expand); 716 setOperationAction(ISD::FREM, MVT::f32, Expand); 717 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 720 } 721 setOperationAction(ISD::FPOW, MVT::f64, Expand); 722 setOperationAction(ISD::FPOW, MVT::f32, Expand); 723 724 setOperationAction(ISD::FMA, MVT::f64, Expand); 725 setOperationAction(ISD::FMA, MVT::f32, Expand); 726 727 // Various VFP goodness 728 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 729 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 730 if (Subtarget->hasVFP2()) { 731 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 732 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 733 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 734 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 735 } 736 // Special handling for half-precision FP. 737 if (!Subtarget->hasFP16()) { 738 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 739 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 740 } 741 } 742 743 // We have target-specific dag combine patterns for the following nodes: 744 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 745 setTargetDAGCombine(ISD::ADD); 746 setTargetDAGCombine(ISD::SUB); 747 setTargetDAGCombine(ISD::MUL); 748 749 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 750 setTargetDAGCombine(ISD::OR); 751 if (Subtarget->hasNEON()) 752 setTargetDAGCombine(ISD::AND); 753 754 setStackPointerRegisterToSaveRestore(ARM::SP); 755 756 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 757 setSchedulingPreference(Sched::RegPressure); 758 else 759 setSchedulingPreference(Sched::Hybrid); 760 761 //// temporary - rewrite interface to use type 762 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 763 maxStoresPerMemset = 16; 764 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 765 766 // On ARM arguments smaller than 4 bytes are extended, so all arguments 767 // are at least 4 bytes aligned. 768 setMinStackArgumentAlignment(4); 769 770 benefitFromCodePlacementOpt = true; 771 772 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 773} 774 775// FIXME: It might make sense to define the representative register class as the 776// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 777// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 778// SPR's representative would be DPR_VFP2. This should work well if register 779// pressure tracking were modified such that a register use would increment the 780// pressure of the register class's representative and all of it's super 781// classes' representatives transitively. We have not implemented this because 782// of the difficulty prior to coalescing of modeling operand register classes 783// due to the common occurrence of cross class copies and subregister insertions 784// and extractions. 785std::pair<const TargetRegisterClass*, uint8_t> 786ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 787 const TargetRegisterClass *RRC = 0; 788 uint8_t Cost = 1; 789 switch (VT.getSimpleVT().SimpleTy) { 790 default: 791 return TargetLowering::findRepresentativeClass(VT); 792 // Use DPR as representative register class for all floating point 793 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 794 // the cost is 1 for both f32 and f64. 795 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 796 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 797 RRC = ARM::DPRRegisterClass; 798 // When NEON is used for SP, only half of the register file is available 799 // because operations that define both SP and DP results will be constrained 800 // to the VFP2 class (D0-D15). We currently model this constraint prior to 801 // coalescing by double-counting the SP regs. See the FIXME above. 802 if (Subtarget->useNEONForSinglePrecisionFP()) 803 Cost = 2; 804 break; 805 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 806 case MVT::v4f32: case MVT::v2f64: 807 RRC = ARM::DPRRegisterClass; 808 Cost = 2; 809 break; 810 case MVT::v4i64: 811 RRC = ARM::DPRRegisterClass; 812 Cost = 4; 813 break; 814 case MVT::v8i64: 815 RRC = ARM::DPRRegisterClass; 816 Cost = 8; 817 break; 818 } 819 return std::make_pair(RRC, Cost); 820} 821 822const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 823 switch (Opcode) { 824 default: return 0; 825 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 826 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 827 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 828 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 829 case ARMISD::CALL: return "ARMISD::CALL"; 830 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 831 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 832 case ARMISD::tCALL: return "ARMISD::tCALL"; 833 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 834 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 835 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 836 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 837 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 838 case ARMISD::CMP: return "ARMISD::CMP"; 839 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 840 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 841 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 842 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 843 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 844 case ARMISD::CMOV: return "ARMISD::CMOV"; 845 846 case ARMISD::RBIT: return "ARMISD::RBIT"; 847 848 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 849 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 850 case ARMISD::SITOF: return "ARMISD::SITOF"; 851 case ARMISD::UITOF: return "ARMISD::UITOF"; 852 853 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 854 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 855 case ARMISD::RRX: return "ARMISD::RRX"; 856 857 case ARMISD::ADDC: return "ARMISD::ADDC"; 858 case ARMISD::ADDE: return "ARMISD::ADDE"; 859 case ARMISD::SUBC: return "ARMISD::SUBC"; 860 case ARMISD::SUBE: return "ARMISD::SUBE"; 861 862 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 863 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 864 865 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 866 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 867 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 868 869 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 870 871 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 872 873 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 874 875 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 876 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 877 878 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 879 880 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 881 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 882 case ARMISD::VCGE: return "ARMISD::VCGE"; 883 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 884 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 885 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 886 case ARMISD::VCGT: return "ARMISD::VCGT"; 887 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 888 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 889 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 890 case ARMISD::VTST: return "ARMISD::VTST"; 891 892 case ARMISD::VSHL: return "ARMISD::VSHL"; 893 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 894 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 895 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 896 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 897 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 898 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 899 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 900 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 901 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 902 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 903 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 904 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 905 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 906 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 907 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 908 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 909 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 910 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 911 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 912 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 913 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 914 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 915 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 916 case ARMISD::VDUP: return "ARMISD::VDUP"; 917 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 918 case ARMISD::VEXT: return "ARMISD::VEXT"; 919 case ARMISD::VREV64: return "ARMISD::VREV64"; 920 case ARMISD::VREV32: return "ARMISD::VREV32"; 921 case ARMISD::VREV16: return "ARMISD::VREV16"; 922 case ARMISD::VZIP: return "ARMISD::VZIP"; 923 case ARMISD::VUZP: return "ARMISD::VUZP"; 924 case ARMISD::VTRN: return "ARMISD::VTRN"; 925 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 926 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 927 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 928 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 929 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 930 case ARMISD::FMAX: return "ARMISD::FMAX"; 931 case ARMISD::FMIN: return "ARMISD::FMIN"; 932 case ARMISD::BFI: return "ARMISD::BFI"; 933 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 934 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 935 case ARMISD::VBSL: return "ARMISD::VBSL"; 936 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 937 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 938 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 939 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 940 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 941 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 942 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 943 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 944 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 945 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 946 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 947 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 948 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 949 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 950 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 951 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 952 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 953 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 954 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 955 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 956 } 957} 958 959EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 960 if (!VT.isVector()) return getPointerTy(); 961 return VT.changeVectorElementTypeToInteger(); 962} 963 964/// getRegClassFor - Return the register class that should be used for the 965/// specified value type. 966TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 967 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 968 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 969 // load / store 4 to 8 consecutive D registers. 970 if (Subtarget->hasNEON()) { 971 if (VT == MVT::v4i64) 972 return ARM::QQPRRegisterClass; 973 else if (VT == MVT::v8i64) 974 return ARM::QQQQPRRegisterClass; 975 } 976 return TargetLowering::getRegClassFor(VT); 977} 978 979// Create a fast isel object. 980FastISel * 981ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 982 return ARM::createFastISel(funcInfo); 983} 984 985/// getMaximalGlobalOffset - Returns the maximal possible offset which can 986/// be used for loads / stores from the global. 987unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 988 return (Subtarget->isThumb1Only() ? 127 : 4095); 989} 990 991Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 992 unsigned NumVals = N->getNumValues(); 993 if (!NumVals) 994 return Sched::RegPressure; 995 996 for (unsigned i = 0; i != NumVals; ++i) { 997 EVT VT = N->getValueType(i); 998 if (VT == MVT::Glue || VT == MVT::Other) 999 continue; 1000 if (VT.isFloatingPoint() || VT.isVector()) 1001 return Sched::ILP; 1002 } 1003 1004 if (!N->isMachineOpcode()) 1005 return Sched::RegPressure; 1006 1007 // Load are scheduled for latency even if there instruction itinerary 1008 // is not available. 1009 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1010 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1011 1012 if (MCID.getNumDefs() == 0) 1013 return Sched::RegPressure; 1014 if (!Itins->isEmpty() && 1015 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1016 return Sched::ILP; 1017 1018 return Sched::RegPressure; 1019} 1020 1021//===----------------------------------------------------------------------===// 1022// Lowering Code 1023//===----------------------------------------------------------------------===// 1024 1025/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1026static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1027 switch (CC) { 1028 default: llvm_unreachable("Unknown condition code!"); 1029 case ISD::SETNE: return ARMCC::NE; 1030 case ISD::SETEQ: return ARMCC::EQ; 1031 case ISD::SETGT: return ARMCC::GT; 1032 case ISD::SETGE: return ARMCC::GE; 1033 case ISD::SETLT: return ARMCC::LT; 1034 case ISD::SETLE: return ARMCC::LE; 1035 case ISD::SETUGT: return ARMCC::HI; 1036 case ISD::SETUGE: return ARMCC::HS; 1037 case ISD::SETULT: return ARMCC::LO; 1038 case ISD::SETULE: return ARMCC::LS; 1039 } 1040} 1041 1042/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1043static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1044 ARMCC::CondCodes &CondCode2) { 1045 CondCode2 = ARMCC::AL; 1046 switch (CC) { 1047 default: llvm_unreachable("Unknown FP condition!"); 1048 case ISD::SETEQ: 1049 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1050 case ISD::SETGT: 1051 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1052 case ISD::SETGE: 1053 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1054 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1055 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1056 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1057 case ISD::SETO: CondCode = ARMCC::VC; break; 1058 case ISD::SETUO: CondCode = ARMCC::VS; break; 1059 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1060 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1061 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1062 case ISD::SETLT: 1063 case ISD::SETULT: CondCode = ARMCC::LT; break; 1064 case ISD::SETLE: 1065 case ISD::SETULE: CondCode = ARMCC::LE; break; 1066 case ISD::SETNE: 1067 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1068 } 1069} 1070 1071//===----------------------------------------------------------------------===// 1072// Calling Convention Implementation 1073//===----------------------------------------------------------------------===// 1074 1075#include "ARMGenCallingConv.inc" 1076 1077/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1078/// given CallingConvention value. 1079CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1080 bool Return, 1081 bool isVarArg) const { 1082 switch (CC) { 1083 default: 1084 llvm_unreachable("Unsupported calling convention"); 1085 case CallingConv::Fast: 1086 if (Subtarget->hasVFP2() && !isVarArg) { 1087 if (!Subtarget->isAAPCS_ABI()) 1088 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1089 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1090 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1091 } 1092 // Fallthrough 1093 case CallingConv::C: { 1094 // Use target triple & subtarget features to do actual dispatch. 1095 if (!Subtarget->isAAPCS_ABI()) 1096 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1097 else if (Subtarget->hasVFP2() && 1098 FloatABIType == FloatABI::Hard && !isVarArg) 1099 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1100 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1101 } 1102 case CallingConv::ARM_AAPCS_VFP: 1103 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1104 case CallingConv::ARM_AAPCS: 1105 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1106 case CallingConv::ARM_APCS: 1107 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1108 } 1109} 1110 1111/// LowerCallResult - Lower the result values of a call into the 1112/// appropriate copies out of appropriate physical registers. 1113SDValue 1114ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1115 CallingConv::ID CallConv, bool isVarArg, 1116 const SmallVectorImpl<ISD::InputArg> &Ins, 1117 DebugLoc dl, SelectionDAG &DAG, 1118 SmallVectorImpl<SDValue> &InVals) const { 1119 1120 // Assign locations to each value returned by this call. 1121 SmallVector<CCValAssign, 16> RVLocs; 1122 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1123 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1124 CCInfo.AnalyzeCallResult(Ins, 1125 CCAssignFnForNode(CallConv, /* Return*/ true, 1126 isVarArg)); 1127 1128 // Copy all of the result registers out of their specified physreg. 1129 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1130 CCValAssign VA = RVLocs[i]; 1131 1132 SDValue Val; 1133 if (VA.needsCustom()) { 1134 // Handle f64 or half of a v2f64. 1135 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1136 InFlag); 1137 Chain = Lo.getValue(1); 1138 InFlag = Lo.getValue(2); 1139 VA = RVLocs[++i]; // skip ahead to next loc 1140 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1141 InFlag); 1142 Chain = Hi.getValue(1); 1143 InFlag = Hi.getValue(2); 1144 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1145 1146 if (VA.getLocVT() == MVT::v2f64) { 1147 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1148 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1149 DAG.getConstant(0, MVT::i32)); 1150 1151 VA = RVLocs[++i]; // skip ahead to next loc 1152 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1153 Chain = Lo.getValue(1); 1154 InFlag = Lo.getValue(2); 1155 VA = RVLocs[++i]; // skip ahead to next loc 1156 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1157 Chain = Hi.getValue(1); 1158 InFlag = Hi.getValue(2); 1159 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1160 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1161 DAG.getConstant(1, MVT::i32)); 1162 } 1163 } else { 1164 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1165 InFlag); 1166 Chain = Val.getValue(1); 1167 InFlag = Val.getValue(2); 1168 } 1169 1170 switch (VA.getLocInfo()) { 1171 default: llvm_unreachable("Unknown loc info!"); 1172 case CCValAssign::Full: break; 1173 case CCValAssign::BCvt: 1174 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1175 break; 1176 } 1177 1178 InVals.push_back(Val); 1179 } 1180 1181 return Chain; 1182} 1183 1184/// LowerMemOpCallTo - Store the argument to the stack. 1185SDValue 1186ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1187 SDValue StackPtr, SDValue Arg, 1188 DebugLoc dl, SelectionDAG &DAG, 1189 const CCValAssign &VA, 1190 ISD::ArgFlagsTy Flags) const { 1191 unsigned LocMemOffset = VA.getLocMemOffset(); 1192 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1193 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1194 return DAG.getStore(Chain, dl, Arg, PtrOff, 1195 MachinePointerInfo::getStack(LocMemOffset), 1196 false, false, 0); 1197} 1198 1199void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1200 SDValue Chain, SDValue &Arg, 1201 RegsToPassVector &RegsToPass, 1202 CCValAssign &VA, CCValAssign &NextVA, 1203 SDValue &StackPtr, 1204 SmallVector<SDValue, 8> &MemOpChains, 1205 ISD::ArgFlagsTy Flags) const { 1206 1207 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1208 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1209 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1210 1211 if (NextVA.isRegLoc()) 1212 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1213 else { 1214 assert(NextVA.isMemLoc()); 1215 if (StackPtr.getNode() == 0) 1216 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1217 1218 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1219 dl, DAG, NextVA, 1220 Flags)); 1221 } 1222} 1223 1224/// LowerCall - Lowering a call into a callseq_start <- 1225/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1226/// nodes. 1227SDValue 1228ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1229 CallingConv::ID CallConv, bool isVarArg, 1230 bool &isTailCall, 1231 const SmallVectorImpl<ISD::OutputArg> &Outs, 1232 const SmallVectorImpl<SDValue> &OutVals, 1233 const SmallVectorImpl<ISD::InputArg> &Ins, 1234 DebugLoc dl, SelectionDAG &DAG, 1235 SmallVectorImpl<SDValue> &InVals) const { 1236 MachineFunction &MF = DAG.getMachineFunction(); 1237 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1238 bool IsSibCall = false; 1239 // Disable tail calls if they're not supported. 1240 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 1241 isTailCall = false; 1242 if (isTailCall) { 1243 // Check if it's really possible to do a tail call. 1244 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1245 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1246 Outs, OutVals, Ins, DAG); 1247 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1248 // detected sibcalls. 1249 if (isTailCall) { 1250 ++NumTailCalls; 1251 IsSibCall = true; 1252 } 1253 } 1254 1255 // Analyze operands of the call, assigning locations to each operand. 1256 SmallVector<CCValAssign, 16> ArgLocs; 1257 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1258 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1259 CCInfo.AnalyzeCallOperands(Outs, 1260 CCAssignFnForNode(CallConv, /* Return*/ false, 1261 isVarArg)); 1262 1263 // Get a count of how many bytes are to be pushed on the stack. 1264 unsigned NumBytes = CCInfo.getNextStackOffset(); 1265 1266 // For tail calls, memory operands are available in our caller's stack. 1267 if (IsSibCall) 1268 NumBytes = 0; 1269 1270 // Adjust the stack pointer for the new arguments... 1271 // These operations are automatically eliminated by the prolog/epilog pass 1272 if (!IsSibCall) 1273 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1274 1275 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1276 1277 RegsToPassVector RegsToPass; 1278 SmallVector<SDValue, 8> MemOpChains; 1279 1280 // Walk the register/memloc assignments, inserting copies/loads. In the case 1281 // of tail call optimization, arguments are handled later. 1282 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1283 i != e; 1284 ++i, ++realArgIdx) { 1285 CCValAssign &VA = ArgLocs[i]; 1286 SDValue Arg = OutVals[realArgIdx]; 1287 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1288 bool isByVal = Flags.isByVal(); 1289 1290 // Promote the value if needed. 1291 switch (VA.getLocInfo()) { 1292 default: llvm_unreachable("Unknown loc info!"); 1293 case CCValAssign::Full: break; 1294 case CCValAssign::SExt: 1295 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1296 break; 1297 case CCValAssign::ZExt: 1298 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1299 break; 1300 case CCValAssign::AExt: 1301 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1302 break; 1303 case CCValAssign::BCvt: 1304 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1305 break; 1306 } 1307 1308 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1309 if (VA.needsCustom()) { 1310 if (VA.getLocVT() == MVT::v2f64) { 1311 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1312 DAG.getConstant(0, MVT::i32)); 1313 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1314 DAG.getConstant(1, MVT::i32)); 1315 1316 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1317 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1318 1319 VA = ArgLocs[++i]; // skip ahead to next loc 1320 if (VA.isRegLoc()) { 1321 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1322 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1323 } else { 1324 assert(VA.isMemLoc()); 1325 1326 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1327 dl, DAG, VA, Flags)); 1328 } 1329 } else { 1330 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1331 StackPtr, MemOpChains, Flags); 1332 } 1333 } else if (VA.isRegLoc()) { 1334 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1335 } else if (isByVal) { 1336 assert(VA.isMemLoc()); 1337 unsigned offset = 0; 1338 1339 // True if this byval aggregate will be split between registers 1340 // and memory. 1341 if (CCInfo.isFirstByValRegValid()) { 1342 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1343 unsigned int i, j; 1344 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1345 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1346 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1347 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1348 MachinePointerInfo(), 1349 false, false, false, 0); 1350 MemOpChains.push_back(Load.getValue(1)); 1351 RegsToPass.push_back(std::make_pair(j, Load)); 1352 } 1353 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1354 CCInfo.clearFirstByValReg(); 1355 } 1356 1357 unsigned LocMemOffset = VA.getLocMemOffset(); 1358 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1359 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1360 StkPtrOff); 1361 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1362 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1363 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1364 MVT::i32); 1365 MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 1366 Flags.getByValAlign(), 1367 /*isVolatile=*/false, 1368 /*AlwaysInline=*/false, 1369 MachinePointerInfo(0), 1370 MachinePointerInfo(0))); 1371 1372 } else if (!IsSibCall) { 1373 assert(VA.isMemLoc()); 1374 1375 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1376 dl, DAG, VA, Flags)); 1377 } 1378 } 1379 1380 if (!MemOpChains.empty()) 1381 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1382 &MemOpChains[0], MemOpChains.size()); 1383 1384 // Build a sequence of copy-to-reg nodes chained together with token chain 1385 // and flag operands which copy the outgoing args into the appropriate regs. 1386 SDValue InFlag; 1387 // Tail call byval lowering might overwrite argument registers so in case of 1388 // tail call optimization the copies to registers are lowered later. 1389 if (!isTailCall) 1390 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1391 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1392 RegsToPass[i].second, InFlag); 1393 InFlag = Chain.getValue(1); 1394 } 1395 1396 // For tail calls lower the arguments to the 'real' stack slot. 1397 if (isTailCall) { 1398 // Force all the incoming stack arguments to be loaded from the stack 1399 // before any new outgoing arguments are stored to the stack, because the 1400 // outgoing stack slots may alias the incoming argument stack slots, and 1401 // the alias isn't otherwise explicit. This is slightly more conservative 1402 // than necessary, because it means that each store effectively depends 1403 // on every argument instead of just those arguments it would clobber. 1404 1405 // Do not flag preceding copytoreg stuff together with the following stuff. 1406 InFlag = SDValue(); 1407 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1408 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1409 RegsToPass[i].second, InFlag); 1410 InFlag = Chain.getValue(1); 1411 } 1412 InFlag =SDValue(); 1413 } 1414 1415 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1416 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1417 // node so that legalize doesn't hack it. 1418 bool isDirect = false; 1419 bool isARMFunc = false; 1420 bool isLocalARMFunc = false; 1421 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1422 1423 if (EnableARMLongCalls) { 1424 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1425 && "long-calls with non-static relocation model!"); 1426 // Handle a global address or an external symbol. If it's not one of 1427 // those, the target's already in a register, so we don't need to do 1428 // anything extra. 1429 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1430 const GlobalValue *GV = G->getGlobal(); 1431 // Create a constant pool entry for the callee address 1432 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1433 ARMConstantPoolValue *CPV = 1434 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1435 1436 // Get the address of the callee into a register 1437 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1438 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1439 Callee = DAG.getLoad(getPointerTy(), dl, 1440 DAG.getEntryNode(), CPAddr, 1441 MachinePointerInfo::getConstantPool(), 1442 false, false, false, 0); 1443 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1444 const char *Sym = S->getSymbol(); 1445 1446 // Create a constant pool entry for the callee address 1447 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1448 ARMConstantPoolValue *CPV = 1449 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1450 ARMPCLabelIndex, 0); 1451 // Get the address of the callee into a register 1452 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1453 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1454 Callee = DAG.getLoad(getPointerTy(), dl, 1455 DAG.getEntryNode(), CPAddr, 1456 MachinePointerInfo::getConstantPool(), 1457 false, false, false, 0); 1458 } 1459 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1460 const GlobalValue *GV = G->getGlobal(); 1461 isDirect = true; 1462 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1463 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1464 getTargetMachine().getRelocationModel() != Reloc::Static; 1465 isARMFunc = !Subtarget->isThumb() || isStub; 1466 // ARM call to a local ARM function is predicable. 1467 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1468 // tBX takes a register source operand. 1469 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1470 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1471 ARMConstantPoolValue *CPV = 1472 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1473 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1474 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1475 Callee = DAG.getLoad(getPointerTy(), dl, 1476 DAG.getEntryNode(), CPAddr, 1477 MachinePointerInfo::getConstantPool(), 1478 false, false, false, 0); 1479 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1480 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1481 getPointerTy(), Callee, PICLabel); 1482 } else { 1483 // On ELF targets for PIC code, direct calls should go through the PLT 1484 unsigned OpFlags = 0; 1485 if (Subtarget->isTargetELF() && 1486 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1487 OpFlags = ARMII::MO_PLT; 1488 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1489 } 1490 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1491 isDirect = true; 1492 bool isStub = Subtarget->isTargetDarwin() && 1493 getTargetMachine().getRelocationModel() != Reloc::Static; 1494 isARMFunc = !Subtarget->isThumb() || isStub; 1495 // tBX takes a register source operand. 1496 const char *Sym = S->getSymbol(); 1497 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1498 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1499 ARMConstantPoolValue *CPV = 1500 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1501 ARMPCLabelIndex, 4); 1502 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1503 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1504 Callee = DAG.getLoad(getPointerTy(), dl, 1505 DAG.getEntryNode(), CPAddr, 1506 MachinePointerInfo::getConstantPool(), 1507 false, false, false, 0); 1508 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1509 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1510 getPointerTy(), Callee, PICLabel); 1511 } else { 1512 unsigned OpFlags = 0; 1513 // On ELF targets for PIC code, direct calls should go through the PLT 1514 if (Subtarget->isTargetELF() && 1515 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1516 OpFlags = ARMII::MO_PLT; 1517 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1518 } 1519 } 1520 1521 // FIXME: handle tail calls differently. 1522 unsigned CallOpc; 1523 if (Subtarget->isThumb()) { 1524 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1525 CallOpc = ARMISD::CALL_NOLINK; 1526 else 1527 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1528 } else { 1529 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1530 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1531 : ARMISD::CALL_NOLINK; 1532 } 1533 1534 std::vector<SDValue> Ops; 1535 Ops.push_back(Chain); 1536 Ops.push_back(Callee); 1537 1538 // Add argument registers to the end of the list so that they are known live 1539 // into the call. 1540 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1541 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1542 RegsToPass[i].second.getValueType())); 1543 1544 if (InFlag.getNode()) 1545 Ops.push_back(InFlag); 1546 1547 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1548 if (isTailCall) 1549 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1550 1551 // Returns a chain and a flag for retval copy to use. 1552 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1553 InFlag = Chain.getValue(1); 1554 1555 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1556 DAG.getIntPtrConstant(0, true), InFlag); 1557 if (!Ins.empty()) 1558 InFlag = Chain.getValue(1); 1559 1560 // Handle result values, copying them out of physregs into vregs that we 1561 // return. 1562 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1563 dl, DAG, InVals); 1564} 1565 1566/// HandleByVal - Every parameter *after* a byval parameter is passed 1567/// on the stack. Remember the next parameter register to allocate, 1568/// and then confiscate the rest of the parameter registers to insure 1569/// this. 1570void 1571llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { 1572 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1573 assert((State->getCallOrPrologue() == Prologue || 1574 State->getCallOrPrologue() == Call) && 1575 "unhandled ParmContext"); 1576 if ((!State->isFirstByValRegValid()) && 1577 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1578 State->setFirstByValReg(reg); 1579 // At a call site, a byval parameter that is split between 1580 // registers and memory needs its size truncated here. In a 1581 // function prologue, such byval parameters are reassembled in 1582 // memory, and are not truncated. 1583 if (State->getCallOrPrologue() == Call) { 1584 unsigned excess = 4 * (ARM::R4 - reg); 1585 assert(size >= excess && "expected larger existing stack allocation"); 1586 size -= excess; 1587 } 1588 } 1589 // Confiscate any remaining parameter registers to preclude their 1590 // assignment to subsequent parameters. 1591 while (State->AllocateReg(GPRArgRegs, 4)) 1592 ; 1593} 1594 1595/// MatchingStackOffset - Return true if the given stack call argument is 1596/// already available in the same position (relatively) of the caller's 1597/// incoming argument stack. 1598static 1599bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1600 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1601 const ARMInstrInfo *TII) { 1602 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1603 int FI = INT_MAX; 1604 if (Arg.getOpcode() == ISD::CopyFromReg) { 1605 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1606 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1607 return false; 1608 MachineInstr *Def = MRI->getVRegDef(VR); 1609 if (!Def) 1610 return false; 1611 if (!Flags.isByVal()) { 1612 if (!TII->isLoadFromStackSlot(Def, FI)) 1613 return false; 1614 } else { 1615 return false; 1616 } 1617 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1618 if (Flags.isByVal()) 1619 // ByVal argument is passed in as a pointer but it's now being 1620 // dereferenced. e.g. 1621 // define @foo(%struct.X* %A) { 1622 // tail call @bar(%struct.X* byval %A) 1623 // } 1624 return false; 1625 SDValue Ptr = Ld->getBasePtr(); 1626 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1627 if (!FINode) 1628 return false; 1629 FI = FINode->getIndex(); 1630 } else 1631 return false; 1632 1633 assert(FI != INT_MAX); 1634 if (!MFI->isFixedObjectIndex(FI)) 1635 return false; 1636 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1637} 1638 1639/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1640/// for tail call optimization. Targets which want to do tail call 1641/// optimization should implement this function. 1642bool 1643ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1644 CallingConv::ID CalleeCC, 1645 bool isVarArg, 1646 bool isCalleeStructRet, 1647 bool isCallerStructRet, 1648 const SmallVectorImpl<ISD::OutputArg> &Outs, 1649 const SmallVectorImpl<SDValue> &OutVals, 1650 const SmallVectorImpl<ISD::InputArg> &Ins, 1651 SelectionDAG& DAG) const { 1652 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1653 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1654 bool CCMatch = CallerCC == CalleeCC; 1655 1656 // Look for obvious safe cases to perform tail call optimization that do not 1657 // require ABI changes. This is what gcc calls sibcall. 1658 1659 // Do not sibcall optimize vararg calls unless the call site is not passing 1660 // any arguments. 1661 if (isVarArg && !Outs.empty()) 1662 return false; 1663 1664 // Also avoid sibcall optimization if either caller or callee uses struct 1665 // return semantics. 1666 if (isCalleeStructRet || isCallerStructRet) 1667 return false; 1668 1669 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1670 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1671 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1672 // support in the assembler and linker to be used. This would need to be 1673 // fixed to fully support tail calls in Thumb1. 1674 // 1675 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1676 // LR. This means if we need to reload LR, it takes an extra instructions, 1677 // which outweighs the value of the tail call; but here we don't know yet 1678 // whether LR is going to be used. Probably the right approach is to 1679 // generate the tail call here and turn it back into CALL/RET in 1680 // emitEpilogue if LR is used. 1681 1682 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1683 // but we need to make sure there are enough registers; the only valid 1684 // registers are the 4 used for parameters. We don't currently do this 1685 // case. 1686 if (Subtarget->isThumb1Only()) 1687 return false; 1688 1689 // If the calling conventions do not match, then we'd better make sure the 1690 // results are returned in the same way as what the caller expects. 1691 if (!CCMatch) { 1692 SmallVector<CCValAssign, 16> RVLocs1; 1693 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1694 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1695 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1696 1697 SmallVector<CCValAssign, 16> RVLocs2; 1698 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1699 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1700 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1701 1702 if (RVLocs1.size() != RVLocs2.size()) 1703 return false; 1704 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1705 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1706 return false; 1707 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1708 return false; 1709 if (RVLocs1[i].isRegLoc()) { 1710 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1711 return false; 1712 } else { 1713 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1714 return false; 1715 } 1716 } 1717 } 1718 1719 // If the callee takes no arguments then go on to check the results of the 1720 // call. 1721 if (!Outs.empty()) { 1722 // Check if stack adjustment is needed. For now, do not do this if any 1723 // argument is passed on the stack. 1724 SmallVector<CCValAssign, 16> ArgLocs; 1725 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1726 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1727 CCInfo.AnalyzeCallOperands(Outs, 1728 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1729 if (CCInfo.getNextStackOffset()) { 1730 MachineFunction &MF = DAG.getMachineFunction(); 1731 1732 // Check if the arguments are already laid out in the right way as 1733 // the caller's fixed stack objects. 1734 MachineFrameInfo *MFI = MF.getFrameInfo(); 1735 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1736 const ARMInstrInfo *TII = 1737 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1738 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1739 i != e; 1740 ++i, ++realArgIdx) { 1741 CCValAssign &VA = ArgLocs[i]; 1742 EVT RegVT = VA.getLocVT(); 1743 SDValue Arg = OutVals[realArgIdx]; 1744 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1745 if (VA.getLocInfo() == CCValAssign::Indirect) 1746 return false; 1747 if (VA.needsCustom()) { 1748 // f64 and vector types are split into multiple registers or 1749 // register/stack-slot combinations. The types will not match 1750 // the registers; give up on memory f64 refs until we figure 1751 // out what to do about this. 1752 if (!VA.isRegLoc()) 1753 return false; 1754 if (!ArgLocs[++i].isRegLoc()) 1755 return false; 1756 if (RegVT == MVT::v2f64) { 1757 if (!ArgLocs[++i].isRegLoc()) 1758 return false; 1759 if (!ArgLocs[++i].isRegLoc()) 1760 return false; 1761 } 1762 } else if (!VA.isRegLoc()) { 1763 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1764 MFI, MRI, TII)) 1765 return false; 1766 } 1767 } 1768 } 1769 } 1770 1771 return true; 1772} 1773 1774SDValue 1775ARMTargetLowering::LowerReturn(SDValue Chain, 1776 CallingConv::ID CallConv, bool isVarArg, 1777 const SmallVectorImpl<ISD::OutputArg> &Outs, 1778 const SmallVectorImpl<SDValue> &OutVals, 1779 DebugLoc dl, SelectionDAG &DAG) const { 1780 1781 // CCValAssign - represent the assignment of the return value to a location. 1782 SmallVector<CCValAssign, 16> RVLocs; 1783 1784 // CCState - Info about the registers and stack slots. 1785 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1786 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1787 1788 // Analyze outgoing return values. 1789 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1790 isVarArg)); 1791 1792 // If this is the first return lowered for this function, add 1793 // the regs to the liveout set for the function. 1794 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1795 for (unsigned i = 0; i != RVLocs.size(); ++i) 1796 if (RVLocs[i].isRegLoc()) 1797 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1798 } 1799 1800 SDValue Flag; 1801 1802 // Copy the result values into the output registers. 1803 for (unsigned i = 0, realRVLocIdx = 0; 1804 i != RVLocs.size(); 1805 ++i, ++realRVLocIdx) { 1806 CCValAssign &VA = RVLocs[i]; 1807 assert(VA.isRegLoc() && "Can only return in registers!"); 1808 1809 SDValue Arg = OutVals[realRVLocIdx]; 1810 1811 switch (VA.getLocInfo()) { 1812 default: llvm_unreachable("Unknown loc info!"); 1813 case CCValAssign::Full: break; 1814 case CCValAssign::BCvt: 1815 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1816 break; 1817 } 1818 1819 if (VA.needsCustom()) { 1820 if (VA.getLocVT() == MVT::v2f64) { 1821 // Extract the first half and return it in two registers. 1822 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1823 DAG.getConstant(0, MVT::i32)); 1824 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1825 DAG.getVTList(MVT::i32, MVT::i32), Half); 1826 1827 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1828 Flag = Chain.getValue(1); 1829 VA = RVLocs[++i]; // skip ahead to next loc 1830 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1831 HalfGPRs.getValue(1), Flag); 1832 Flag = Chain.getValue(1); 1833 VA = RVLocs[++i]; // skip ahead to next loc 1834 1835 // Extract the 2nd half and fall through to handle it as an f64 value. 1836 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1837 DAG.getConstant(1, MVT::i32)); 1838 } 1839 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1840 // available. 1841 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1842 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1843 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1844 Flag = Chain.getValue(1); 1845 VA = RVLocs[++i]; // skip ahead to next loc 1846 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1847 Flag); 1848 } else 1849 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1850 1851 // Guarantee that all emitted copies are 1852 // stuck together, avoiding something bad. 1853 Flag = Chain.getValue(1); 1854 } 1855 1856 SDValue result; 1857 if (Flag.getNode()) 1858 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1859 else // Return Void 1860 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1861 1862 return result; 1863} 1864 1865bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1866 if (N->getNumValues() != 1) 1867 return false; 1868 if (!N->hasNUsesOfValue(1, 0)) 1869 return false; 1870 1871 unsigned NumCopies = 0; 1872 SDNode* Copies[2]; 1873 SDNode *Use = *N->use_begin(); 1874 if (Use->getOpcode() == ISD::CopyToReg) { 1875 Copies[NumCopies++] = Use; 1876 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1877 // f64 returned in a pair of GPRs. 1878 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1879 UI != UE; ++UI) { 1880 if (UI->getOpcode() != ISD::CopyToReg) 1881 return false; 1882 Copies[UI.getUse().getResNo()] = *UI; 1883 ++NumCopies; 1884 } 1885 } else if (Use->getOpcode() == ISD::BITCAST) { 1886 // f32 returned in a single GPR. 1887 if (!Use->hasNUsesOfValue(1, 0)) 1888 return false; 1889 Use = *Use->use_begin(); 1890 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1891 return false; 1892 Copies[NumCopies++] = Use; 1893 } else { 1894 return false; 1895 } 1896 1897 if (NumCopies != 1 && NumCopies != 2) 1898 return false; 1899 1900 bool HasRet = false; 1901 for (unsigned i = 0; i < NumCopies; ++i) { 1902 SDNode *Copy = Copies[i]; 1903 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1904 UI != UE; ++UI) { 1905 if (UI->getOpcode() == ISD::CopyToReg) { 1906 SDNode *Use = *UI; 1907 if (Use == Copies[0] || Use == Copies[1]) 1908 continue; 1909 return false; 1910 } 1911 if (UI->getOpcode() != ARMISD::RET_FLAG) 1912 return false; 1913 HasRet = true; 1914 } 1915 } 1916 1917 return HasRet; 1918} 1919 1920bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1921 if (!EnableARMTailCalls) 1922 return false; 1923 1924 if (!CI->isTailCall()) 1925 return false; 1926 1927 return !Subtarget->isThumb1Only(); 1928} 1929 1930// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1931// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1932// one of the above mentioned nodes. It has to be wrapped because otherwise 1933// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1934// be used to form addressing mode. These wrapped nodes will be selected 1935// into MOVi. 1936static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1937 EVT PtrVT = Op.getValueType(); 1938 // FIXME there is no actual debug info here 1939 DebugLoc dl = Op.getDebugLoc(); 1940 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1941 SDValue Res; 1942 if (CP->isMachineConstantPoolEntry()) 1943 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1944 CP->getAlignment()); 1945 else 1946 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1947 CP->getAlignment()); 1948 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1949} 1950 1951unsigned ARMTargetLowering::getJumpTableEncoding() const { 1952 return MachineJumpTableInfo::EK_Inline; 1953} 1954 1955SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1956 SelectionDAG &DAG) const { 1957 MachineFunction &MF = DAG.getMachineFunction(); 1958 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1959 unsigned ARMPCLabelIndex = 0; 1960 DebugLoc DL = Op.getDebugLoc(); 1961 EVT PtrVT = getPointerTy(); 1962 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1963 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1964 SDValue CPAddr; 1965 if (RelocM == Reloc::Static) { 1966 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1967 } else { 1968 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1969 ARMPCLabelIndex = AFI->createPICLabelUId(); 1970 ARMConstantPoolValue *CPV = 1971 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 1972 ARMCP::CPBlockAddress, PCAdj); 1973 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1974 } 1975 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1976 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1977 MachinePointerInfo::getConstantPool(), 1978 false, false, false, 0); 1979 if (RelocM == Reloc::Static) 1980 return Result; 1981 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1982 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1983} 1984 1985// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1986SDValue 1987ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1988 SelectionDAG &DAG) const { 1989 DebugLoc dl = GA->getDebugLoc(); 1990 EVT PtrVT = getPointerTy(); 1991 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1992 MachineFunction &MF = DAG.getMachineFunction(); 1993 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1994 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1995 ARMConstantPoolValue *CPV = 1996 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 1997 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1998 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1999 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2000 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2001 MachinePointerInfo::getConstantPool(), 2002 false, false, false, 0); 2003 SDValue Chain = Argument.getValue(1); 2004 2005 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2006 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2007 2008 // call __tls_get_addr. 2009 ArgListTy Args; 2010 ArgListEntry Entry; 2011 Entry.Node = Argument; 2012 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2013 Args.push_back(Entry); 2014 // FIXME: is there useful debug info available here? 2015 std::pair<SDValue, SDValue> CallResult = 2016 LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()), 2017 false, false, false, false, 2018 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 2019 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2020 return CallResult.first; 2021} 2022 2023// Lower ISD::GlobalTLSAddress using the "initial exec" or 2024// "local exec" model. 2025SDValue 2026ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2027 SelectionDAG &DAG) const { 2028 const GlobalValue *GV = GA->getGlobal(); 2029 DebugLoc dl = GA->getDebugLoc(); 2030 SDValue Offset; 2031 SDValue Chain = DAG.getEntryNode(); 2032 EVT PtrVT = getPointerTy(); 2033 // Get the Thread Pointer 2034 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2035 2036 if (GV->isDeclaration()) { 2037 MachineFunction &MF = DAG.getMachineFunction(); 2038 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2039 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2040 // Initial exec model. 2041 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2042 ARMConstantPoolValue *CPV = 2043 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2044 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2045 true); 2046 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2047 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2048 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2049 MachinePointerInfo::getConstantPool(), 2050 false, false, false, 0); 2051 Chain = Offset.getValue(1); 2052 2053 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2054 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2055 2056 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2057 MachinePointerInfo::getConstantPool(), 2058 false, false, false, 0); 2059 } else { 2060 // local exec model 2061 ARMConstantPoolValue *CPV = 2062 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2063 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2064 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2065 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2066 MachinePointerInfo::getConstantPool(), 2067 false, false, false, 0); 2068 } 2069 2070 // The address of the thread local variable is the add of the thread 2071 // pointer with the offset of the variable. 2072 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2073} 2074 2075SDValue 2076ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2077 // TODO: implement the "local dynamic" model 2078 assert(Subtarget->isTargetELF() && 2079 "TLS not implemented for non-ELF targets"); 2080 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2081 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 2082 // otherwise use the "Local Exec" TLS Model 2083 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 2084 return LowerToTLSGeneralDynamicModel(GA, DAG); 2085 else 2086 return LowerToTLSExecModels(GA, DAG); 2087} 2088 2089SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2090 SelectionDAG &DAG) const { 2091 EVT PtrVT = getPointerTy(); 2092 DebugLoc dl = Op.getDebugLoc(); 2093 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2094 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2095 if (RelocM == Reloc::PIC_) { 2096 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2097 ARMConstantPoolValue *CPV = 2098 ARMConstantPoolConstant::Create(GV, 2099 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2100 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2101 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2102 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2103 CPAddr, 2104 MachinePointerInfo::getConstantPool(), 2105 false, false, false, 0); 2106 SDValue Chain = Result.getValue(1); 2107 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2108 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2109 if (!UseGOTOFF) 2110 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2111 MachinePointerInfo::getGOT(), 2112 false, false, false, 0); 2113 return Result; 2114 } 2115 2116 // If we have T2 ops, we can materialize the address directly via movt/movw 2117 // pair. This is always cheaper. 2118 if (Subtarget->useMovt()) { 2119 ++NumMovwMovt; 2120 // FIXME: Once remat is capable of dealing with instructions with register 2121 // operands, expand this into two nodes. 2122 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2123 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2124 } else { 2125 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2126 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2127 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2128 MachinePointerInfo::getConstantPool(), 2129 false, false, false, 0); 2130 } 2131} 2132 2133SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2134 SelectionDAG &DAG) const { 2135 EVT PtrVT = getPointerTy(); 2136 DebugLoc dl = Op.getDebugLoc(); 2137 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2138 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2139 MachineFunction &MF = DAG.getMachineFunction(); 2140 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2141 2142 // FIXME: Enable this for static codegen when tool issues are fixed. 2143 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2144 ++NumMovwMovt; 2145 // FIXME: Once remat is capable of dealing with instructions with register 2146 // operands, expand this into two nodes. 2147 if (RelocM == Reloc::Static) 2148 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2149 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2150 2151 unsigned Wrapper = (RelocM == Reloc::PIC_) 2152 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2153 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2154 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2155 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2156 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2157 MachinePointerInfo::getGOT(), 2158 false, false, false, 0); 2159 return Result; 2160 } 2161 2162 unsigned ARMPCLabelIndex = 0; 2163 SDValue CPAddr; 2164 if (RelocM == Reloc::Static) { 2165 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2166 } else { 2167 ARMPCLabelIndex = AFI->createPICLabelUId(); 2168 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2169 ARMConstantPoolValue *CPV = 2170 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2171 PCAdj); 2172 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2173 } 2174 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2175 2176 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2177 MachinePointerInfo::getConstantPool(), 2178 false, false, false, 0); 2179 SDValue Chain = Result.getValue(1); 2180 2181 if (RelocM == Reloc::PIC_) { 2182 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2183 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2184 } 2185 2186 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2187 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2188 false, false, false, 0); 2189 2190 return Result; 2191} 2192 2193SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2194 SelectionDAG &DAG) const { 2195 assert(Subtarget->isTargetELF() && 2196 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2197 MachineFunction &MF = DAG.getMachineFunction(); 2198 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2199 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2200 EVT PtrVT = getPointerTy(); 2201 DebugLoc dl = Op.getDebugLoc(); 2202 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2203 ARMConstantPoolValue *CPV = 2204 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2205 ARMPCLabelIndex, PCAdj); 2206 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2207 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2208 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2209 MachinePointerInfo::getConstantPool(), 2210 false, false, false, 0); 2211 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2212 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2213} 2214 2215SDValue 2216ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2217 const { 2218 DebugLoc dl = Op.getDebugLoc(); 2219 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2220 Op.getOperand(0), Op.getOperand(1)); 2221} 2222 2223SDValue 2224ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2225 DebugLoc dl = Op.getDebugLoc(); 2226 SDValue Val = DAG.getConstant(0, MVT::i32); 2227 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2228 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2229 Op.getOperand(1), Val); 2230} 2231 2232SDValue 2233ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2234 DebugLoc dl = Op.getDebugLoc(); 2235 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2236 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2237} 2238 2239SDValue 2240ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2241 const ARMSubtarget *Subtarget) const { 2242 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2243 DebugLoc dl = Op.getDebugLoc(); 2244 switch (IntNo) { 2245 default: return SDValue(); // Don't custom lower most intrinsics. 2246 case Intrinsic::arm_thread_pointer: { 2247 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2248 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2249 } 2250 case Intrinsic::eh_sjlj_lsda: { 2251 MachineFunction &MF = DAG.getMachineFunction(); 2252 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2253 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2254 EVT PtrVT = getPointerTy(); 2255 DebugLoc dl = Op.getDebugLoc(); 2256 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2257 SDValue CPAddr; 2258 unsigned PCAdj = (RelocM != Reloc::PIC_) 2259 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2260 ARMConstantPoolValue *CPV = 2261 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2262 ARMCP::CPLSDA, PCAdj); 2263 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2264 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2265 SDValue Result = 2266 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2267 MachinePointerInfo::getConstantPool(), 2268 false, false, false, 0); 2269 2270 if (RelocM == Reloc::PIC_) { 2271 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2272 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2273 } 2274 return Result; 2275 } 2276 case Intrinsic::arm_neon_vmulls: 2277 case Intrinsic::arm_neon_vmullu: { 2278 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2279 ? ARMISD::VMULLs : ARMISD::VMULLu; 2280 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2281 Op.getOperand(1), Op.getOperand(2)); 2282 } 2283 } 2284} 2285 2286static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2287 const ARMSubtarget *Subtarget) { 2288 DebugLoc dl = Op.getDebugLoc(); 2289 if (!Subtarget->hasDataBarrier()) { 2290 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2291 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2292 // here. 2293 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2294 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2295 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2296 DAG.getConstant(0, MVT::i32)); 2297 } 2298 2299 SDValue Op5 = Op.getOperand(5); 2300 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2301 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2302 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2303 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2304 2305 ARM_MB::MemBOpt DMBOpt; 2306 if (isDeviceBarrier) 2307 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2308 else 2309 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2310 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2311 DAG.getConstant(DMBOpt, MVT::i32)); 2312} 2313 2314 2315static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2316 const ARMSubtarget *Subtarget) { 2317 // FIXME: handle "fence singlethread" more efficiently. 2318 DebugLoc dl = Op.getDebugLoc(); 2319 if (!Subtarget->hasDataBarrier()) { 2320 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2321 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2322 // here. 2323 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2324 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2325 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2326 DAG.getConstant(0, MVT::i32)); 2327 } 2328 2329 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2330 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2331} 2332 2333static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2334 const ARMSubtarget *Subtarget) { 2335 // ARM pre v5TE and Thumb1 does not have preload instructions. 2336 if (!(Subtarget->isThumb2() || 2337 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2338 // Just preserve the chain. 2339 return Op.getOperand(0); 2340 2341 DebugLoc dl = Op.getDebugLoc(); 2342 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2343 if (!isRead && 2344 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2345 // ARMv7 with MP extension has PLDW. 2346 return Op.getOperand(0); 2347 2348 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2349 if (Subtarget->isThumb()) { 2350 // Invert the bits. 2351 isRead = ~isRead & 1; 2352 isData = ~isData & 1; 2353 } 2354 2355 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2356 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2357 DAG.getConstant(isData, MVT::i32)); 2358} 2359 2360static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2361 MachineFunction &MF = DAG.getMachineFunction(); 2362 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2363 2364 // vastart just stores the address of the VarArgsFrameIndex slot into the 2365 // memory location argument. 2366 DebugLoc dl = Op.getDebugLoc(); 2367 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2368 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2369 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2370 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2371 MachinePointerInfo(SV), false, false, 0); 2372} 2373 2374SDValue 2375ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2376 SDValue &Root, SelectionDAG &DAG, 2377 DebugLoc dl) const { 2378 MachineFunction &MF = DAG.getMachineFunction(); 2379 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2380 2381 TargetRegisterClass *RC; 2382 if (AFI->isThumb1OnlyFunction()) 2383 RC = ARM::tGPRRegisterClass; 2384 else 2385 RC = ARM::GPRRegisterClass; 2386 2387 // Transform the arguments stored in physical registers into virtual ones. 2388 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2389 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2390 2391 SDValue ArgValue2; 2392 if (NextVA.isMemLoc()) { 2393 MachineFrameInfo *MFI = MF.getFrameInfo(); 2394 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2395 2396 // Create load node to retrieve arguments from the stack. 2397 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2398 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2399 MachinePointerInfo::getFixedStack(FI), 2400 false, false, false, 0); 2401 } else { 2402 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2403 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2404 } 2405 2406 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2407} 2408 2409void 2410ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2411 unsigned &VARegSize, unsigned &VARegSaveSize) 2412 const { 2413 unsigned NumGPRs; 2414 if (CCInfo.isFirstByValRegValid()) 2415 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2416 else { 2417 unsigned int firstUnalloced; 2418 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2419 sizeof(GPRArgRegs) / 2420 sizeof(GPRArgRegs[0])); 2421 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2422 } 2423 2424 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2425 VARegSize = NumGPRs * 4; 2426 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2427} 2428 2429// The remaining GPRs hold either the beginning of variable-argument 2430// data, or the beginning of an aggregate passed by value (usuall 2431// byval). Either way, we allocate stack slots adjacent to the data 2432// provided by our caller, and store the unallocated registers there. 2433// If this is a variadic function, the va_list pointer will begin with 2434// these values; otherwise, this reassembles a (byval) structure that 2435// was split between registers and memory. 2436void 2437ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2438 DebugLoc dl, SDValue &Chain, 2439 unsigned ArgOffset) const { 2440 MachineFunction &MF = DAG.getMachineFunction(); 2441 MachineFrameInfo *MFI = MF.getFrameInfo(); 2442 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2443 unsigned firstRegToSaveIndex; 2444 if (CCInfo.isFirstByValRegValid()) 2445 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2446 else { 2447 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2448 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2449 } 2450 2451 unsigned VARegSize, VARegSaveSize; 2452 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2453 if (VARegSaveSize) { 2454 // If this function is vararg, store any remaining integer argument regs 2455 // to their spots on the stack so that they may be loaded by deferencing 2456 // the result of va_next. 2457 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2458 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2459 ArgOffset + VARegSaveSize 2460 - VARegSize, 2461 false)); 2462 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2463 getPointerTy()); 2464 2465 SmallVector<SDValue, 4> MemOps; 2466 for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { 2467 TargetRegisterClass *RC; 2468 if (AFI->isThumb1OnlyFunction()) 2469 RC = ARM::tGPRRegisterClass; 2470 else 2471 RC = ARM::GPRRegisterClass; 2472 2473 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2474 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2475 SDValue Store = 2476 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2477 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2478 false, false, 0); 2479 MemOps.push_back(Store); 2480 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2481 DAG.getConstant(4, getPointerTy())); 2482 } 2483 if (!MemOps.empty()) 2484 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2485 &MemOps[0], MemOps.size()); 2486 } else 2487 // This will point to the next argument passed via stack. 2488 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2489} 2490 2491SDValue 2492ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2493 CallingConv::ID CallConv, bool isVarArg, 2494 const SmallVectorImpl<ISD::InputArg> 2495 &Ins, 2496 DebugLoc dl, SelectionDAG &DAG, 2497 SmallVectorImpl<SDValue> &InVals) 2498 const { 2499 MachineFunction &MF = DAG.getMachineFunction(); 2500 MachineFrameInfo *MFI = MF.getFrameInfo(); 2501 2502 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2503 2504 // Assign locations to all of the incoming arguments. 2505 SmallVector<CCValAssign, 16> ArgLocs; 2506 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2507 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2508 CCInfo.AnalyzeFormalArguments(Ins, 2509 CCAssignFnForNode(CallConv, /* Return*/ false, 2510 isVarArg)); 2511 2512 SmallVector<SDValue, 16> ArgValues; 2513 int lastInsIndex = -1; 2514 2515 SDValue ArgValue; 2516 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2517 CCValAssign &VA = ArgLocs[i]; 2518 2519 // Arguments stored in registers. 2520 if (VA.isRegLoc()) { 2521 EVT RegVT = VA.getLocVT(); 2522 2523 if (VA.needsCustom()) { 2524 // f64 and vector types are split up into multiple registers or 2525 // combinations of registers and stack slots. 2526 if (VA.getLocVT() == MVT::v2f64) { 2527 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2528 Chain, DAG, dl); 2529 VA = ArgLocs[++i]; // skip ahead to next loc 2530 SDValue ArgValue2; 2531 if (VA.isMemLoc()) { 2532 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2533 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2534 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2535 MachinePointerInfo::getFixedStack(FI), 2536 false, false, false, 0); 2537 } else { 2538 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2539 Chain, DAG, dl); 2540 } 2541 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2542 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2543 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2544 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2545 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2546 } else 2547 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2548 2549 } else { 2550 TargetRegisterClass *RC; 2551 2552 if (RegVT == MVT::f32) 2553 RC = ARM::SPRRegisterClass; 2554 else if (RegVT == MVT::f64) 2555 RC = ARM::DPRRegisterClass; 2556 else if (RegVT == MVT::v2f64) 2557 RC = ARM::QPRRegisterClass; 2558 else if (RegVT == MVT::i32) 2559 RC = (AFI->isThumb1OnlyFunction() ? 2560 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2561 else 2562 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2563 2564 // Transform the arguments in physical registers into virtual ones. 2565 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2566 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2567 } 2568 2569 // If this is an 8 or 16-bit value, it is really passed promoted 2570 // to 32 bits. Insert an assert[sz]ext to capture this, then 2571 // truncate to the right size. 2572 switch (VA.getLocInfo()) { 2573 default: llvm_unreachable("Unknown loc info!"); 2574 case CCValAssign::Full: break; 2575 case CCValAssign::BCvt: 2576 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2577 break; 2578 case CCValAssign::SExt: 2579 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2580 DAG.getValueType(VA.getValVT())); 2581 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2582 break; 2583 case CCValAssign::ZExt: 2584 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2585 DAG.getValueType(VA.getValVT())); 2586 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2587 break; 2588 } 2589 2590 InVals.push_back(ArgValue); 2591 2592 } else { // VA.isRegLoc() 2593 2594 // sanity check 2595 assert(VA.isMemLoc()); 2596 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2597 2598 int index = ArgLocs[i].getValNo(); 2599 2600 // Some Ins[] entries become multiple ArgLoc[] entries. 2601 // Process them only once. 2602 if (index != lastInsIndex) 2603 { 2604 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2605 // FIXME: For now, all byval parameter objects are marked mutable. 2606 // This can be changed with more analysis. 2607 // In case of tail call optimization mark all arguments mutable. 2608 // Since they could be overwritten by lowering of arguments in case of 2609 // a tail call. 2610 if (Flags.isByVal()) { 2611 unsigned VARegSize, VARegSaveSize; 2612 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2613 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); 2614 unsigned Bytes = Flags.getByValSize() - VARegSize; 2615 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2616 int FI = MFI->CreateFixedObject(Bytes, 2617 VA.getLocMemOffset(), false); 2618 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2619 } else { 2620 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2621 VA.getLocMemOffset(), true); 2622 2623 // Create load nodes to retrieve arguments from the stack. 2624 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2625 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2626 MachinePointerInfo::getFixedStack(FI), 2627 false, false, false, 0)); 2628 } 2629 lastInsIndex = index; 2630 } 2631 } 2632 } 2633 2634 // varargs 2635 if (isVarArg) 2636 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); 2637 2638 return Chain; 2639} 2640 2641/// isFloatingPointZero - Return true if this is +0.0. 2642static bool isFloatingPointZero(SDValue Op) { 2643 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2644 return CFP->getValueAPF().isPosZero(); 2645 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2646 // Maybe this has already been legalized into the constant pool? 2647 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2648 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2649 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2650 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2651 return CFP->getValueAPF().isPosZero(); 2652 } 2653 } 2654 return false; 2655} 2656 2657/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2658/// the given operands. 2659SDValue 2660ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2661 SDValue &ARMcc, SelectionDAG &DAG, 2662 DebugLoc dl) const { 2663 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2664 unsigned C = RHSC->getZExtValue(); 2665 if (!isLegalICmpImmediate(C)) { 2666 // Constant does not fit, try adjusting it by one? 2667 switch (CC) { 2668 default: break; 2669 case ISD::SETLT: 2670 case ISD::SETGE: 2671 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2672 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2673 RHS = DAG.getConstant(C-1, MVT::i32); 2674 } 2675 break; 2676 case ISD::SETULT: 2677 case ISD::SETUGE: 2678 if (C != 0 && isLegalICmpImmediate(C-1)) { 2679 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2680 RHS = DAG.getConstant(C-1, MVT::i32); 2681 } 2682 break; 2683 case ISD::SETLE: 2684 case ISD::SETGT: 2685 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2686 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2687 RHS = DAG.getConstant(C+1, MVT::i32); 2688 } 2689 break; 2690 case ISD::SETULE: 2691 case ISD::SETUGT: 2692 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2693 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2694 RHS = DAG.getConstant(C+1, MVT::i32); 2695 } 2696 break; 2697 } 2698 } 2699 } 2700 2701 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2702 ARMISD::NodeType CompareType; 2703 switch (CondCode) { 2704 default: 2705 CompareType = ARMISD::CMP; 2706 break; 2707 case ARMCC::EQ: 2708 case ARMCC::NE: 2709 // Uses only Z Flag 2710 CompareType = ARMISD::CMPZ; 2711 break; 2712 } 2713 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2714 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2715} 2716 2717/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2718SDValue 2719ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2720 DebugLoc dl) const { 2721 SDValue Cmp; 2722 if (!isFloatingPointZero(RHS)) 2723 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2724 else 2725 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2726 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2727} 2728 2729/// duplicateCmp - Glue values can have only one use, so this function 2730/// duplicates a comparison node. 2731SDValue 2732ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2733 unsigned Opc = Cmp.getOpcode(); 2734 DebugLoc DL = Cmp.getDebugLoc(); 2735 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2736 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2737 2738 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2739 Cmp = Cmp.getOperand(0); 2740 Opc = Cmp.getOpcode(); 2741 if (Opc == ARMISD::CMPFP) 2742 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2743 else { 2744 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2745 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2746 } 2747 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2748} 2749 2750SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2751 SDValue Cond = Op.getOperand(0); 2752 SDValue SelectTrue = Op.getOperand(1); 2753 SDValue SelectFalse = Op.getOperand(2); 2754 DebugLoc dl = Op.getDebugLoc(); 2755 2756 // Convert: 2757 // 2758 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2759 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2760 // 2761 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2762 const ConstantSDNode *CMOVTrue = 2763 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2764 const ConstantSDNode *CMOVFalse = 2765 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2766 2767 if (CMOVTrue && CMOVFalse) { 2768 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2769 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2770 2771 SDValue True; 2772 SDValue False; 2773 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2774 True = SelectTrue; 2775 False = SelectFalse; 2776 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2777 True = SelectFalse; 2778 False = SelectTrue; 2779 } 2780 2781 if (True.getNode() && False.getNode()) { 2782 EVT VT = Op.getValueType(); 2783 SDValue ARMcc = Cond.getOperand(2); 2784 SDValue CCR = Cond.getOperand(3); 2785 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2786 assert(True.getValueType() == VT); 2787 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2788 } 2789 } 2790 } 2791 2792 return DAG.getSelectCC(dl, Cond, 2793 DAG.getConstant(0, Cond.getValueType()), 2794 SelectTrue, SelectFalse, ISD::SETNE); 2795} 2796 2797SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2798 EVT VT = Op.getValueType(); 2799 SDValue LHS = Op.getOperand(0); 2800 SDValue RHS = Op.getOperand(1); 2801 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2802 SDValue TrueVal = Op.getOperand(2); 2803 SDValue FalseVal = Op.getOperand(3); 2804 DebugLoc dl = Op.getDebugLoc(); 2805 2806 if (LHS.getValueType() == MVT::i32) { 2807 SDValue ARMcc; 2808 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2809 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2810 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2811 } 2812 2813 ARMCC::CondCodes CondCode, CondCode2; 2814 FPCCToARMCC(CC, CondCode, CondCode2); 2815 2816 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2817 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2818 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2819 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2820 ARMcc, CCR, Cmp); 2821 if (CondCode2 != ARMCC::AL) { 2822 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2823 // FIXME: Needs another CMP because flag can have but one use. 2824 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2825 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2826 Result, TrueVal, ARMcc2, CCR, Cmp2); 2827 } 2828 return Result; 2829} 2830 2831/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2832/// to morph to an integer compare sequence. 2833static bool canChangeToInt(SDValue Op, bool &SeenZero, 2834 const ARMSubtarget *Subtarget) { 2835 SDNode *N = Op.getNode(); 2836 if (!N->hasOneUse()) 2837 // Otherwise it requires moving the value from fp to integer registers. 2838 return false; 2839 if (!N->getNumValues()) 2840 return false; 2841 EVT VT = Op.getValueType(); 2842 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2843 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2844 // vmrs are very slow, e.g. cortex-a8. 2845 return false; 2846 2847 if (isFloatingPointZero(Op)) { 2848 SeenZero = true; 2849 return true; 2850 } 2851 return ISD::isNormalLoad(N); 2852} 2853 2854static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2855 if (isFloatingPointZero(Op)) 2856 return DAG.getConstant(0, MVT::i32); 2857 2858 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2859 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2860 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2861 Ld->isVolatile(), Ld->isNonTemporal(), 2862 Ld->isInvariant(), Ld->getAlignment()); 2863 2864 llvm_unreachable("Unknown VFP cmp argument!"); 2865} 2866 2867static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2868 SDValue &RetVal1, SDValue &RetVal2) { 2869 if (isFloatingPointZero(Op)) { 2870 RetVal1 = DAG.getConstant(0, MVT::i32); 2871 RetVal2 = DAG.getConstant(0, MVT::i32); 2872 return; 2873 } 2874 2875 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2876 SDValue Ptr = Ld->getBasePtr(); 2877 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2878 Ld->getChain(), Ptr, 2879 Ld->getPointerInfo(), 2880 Ld->isVolatile(), Ld->isNonTemporal(), 2881 Ld->isInvariant(), Ld->getAlignment()); 2882 2883 EVT PtrType = Ptr.getValueType(); 2884 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2885 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2886 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2887 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2888 Ld->getChain(), NewPtr, 2889 Ld->getPointerInfo().getWithOffset(4), 2890 Ld->isVolatile(), Ld->isNonTemporal(), 2891 Ld->isInvariant(), NewAlign); 2892 return; 2893 } 2894 2895 llvm_unreachable("Unknown VFP cmp argument!"); 2896} 2897 2898/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2899/// f32 and even f64 comparisons to integer ones. 2900SDValue 2901ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2902 SDValue Chain = Op.getOperand(0); 2903 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2904 SDValue LHS = Op.getOperand(2); 2905 SDValue RHS = Op.getOperand(3); 2906 SDValue Dest = Op.getOperand(4); 2907 DebugLoc dl = Op.getDebugLoc(); 2908 2909 bool SeenZero = false; 2910 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2911 canChangeToInt(RHS, SeenZero, Subtarget) && 2912 // If one of the operand is zero, it's safe to ignore the NaN case since 2913 // we only care about equality comparisons. 2914 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2915 // If unsafe fp math optimization is enabled and there are no other uses of 2916 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2917 // to an integer comparison. 2918 if (CC == ISD::SETOEQ) 2919 CC = ISD::SETEQ; 2920 else if (CC == ISD::SETUNE) 2921 CC = ISD::SETNE; 2922 2923 SDValue ARMcc; 2924 if (LHS.getValueType() == MVT::f32) { 2925 LHS = bitcastf32Toi32(LHS, DAG); 2926 RHS = bitcastf32Toi32(RHS, DAG); 2927 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2928 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2929 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2930 Chain, Dest, ARMcc, CCR, Cmp); 2931 } 2932 2933 SDValue LHS1, LHS2; 2934 SDValue RHS1, RHS2; 2935 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2936 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2937 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2938 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2939 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2940 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2941 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2942 } 2943 2944 return SDValue(); 2945} 2946 2947SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2948 SDValue Chain = Op.getOperand(0); 2949 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2950 SDValue LHS = Op.getOperand(2); 2951 SDValue RHS = Op.getOperand(3); 2952 SDValue Dest = Op.getOperand(4); 2953 DebugLoc dl = Op.getDebugLoc(); 2954 2955 if (LHS.getValueType() == MVT::i32) { 2956 SDValue ARMcc; 2957 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2958 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2959 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2960 Chain, Dest, ARMcc, CCR, Cmp); 2961 } 2962 2963 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2964 2965 if (UnsafeFPMath && 2966 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2967 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2968 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2969 if (Result.getNode()) 2970 return Result; 2971 } 2972 2973 ARMCC::CondCodes CondCode, CondCode2; 2974 FPCCToARMCC(CC, CondCode, CondCode2); 2975 2976 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2977 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2978 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2979 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2980 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2981 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2982 if (CondCode2 != ARMCC::AL) { 2983 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2984 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2985 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2986 } 2987 return Res; 2988} 2989 2990SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2991 SDValue Chain = Op.getOperand(0); 2992 SDValue Table = Op.getOperand(1); 2993 SDValue Index = Op.getOperand(2); 2994 DebugLoc dl = Op.getDebugLoc(); 2995 2996 EVT PTy = getPointerTy(); 2997 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2998 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2999 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 3000 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 3001 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 3002 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 3003 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3004 if (Subtarget->isThumb2()) { 3005 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 3006 // which does another jump to the destination. This also makes it easier 3007 // to translate it to TBB / TBH later. 3008 // FIXME: This might not work if the function is extremely large. 3009 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 3010 Addr, Op.getOperand(2), JTI, UId); 3011 } 3012 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3013 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3014 MachinePointerInfo::getJumpTable(), 3015 false, false, false, 0); 3016 Chain = Addr.getValue(1); 3017 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3018 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3019 } else { 3020 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3021 MachinePointerInfo::getJumpTable(), 3022 false, false, false, 0); 3023 Chain = Addr.getValue(1); 3024 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3025 } 3026} 3027 3028static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3029 EVT VT = Op.getValueType(); 3030 assert(VT.getVectorElementType() == MVT::i32 && "Unexpected custom lowering"); 3031 3032 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 3033 return Op; 3034 return DAG.UnrollVectorOp(Op.getNode()); 3035} 3036 3037static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3038 EVT VT = Op.getValueType(); 3039 if (VT.isVector()) 3040 return LowerVectorFP_TO_INT(Op, DAG); 3041 3042 DebugLoc dl = Op.getDebugLoc(); 3043 unsigned Opc; 3044 3045 switch (Op.getOpcode()) { 3046 default: 3047 assert(0 && "Invalid opcode!"); 3048 case ISD::FP_TO_SINT: 3049 Opc = ARMISD::FTOSI; 3050 break; 3051 case ISD::FP_TO_UINT: 3052 Opc = ARMISD::FTOUI; 3053 break; 3054 } 3055 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3056 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3057} 3058 3059static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3060 EVT VT = Op.getValueType(); 3061 DebugLoc dl = Op.getDebugLoc(); 3062 3063 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 3064 if (VT.getVectorElementType() == MVT::f32) 3065 return Op; 3066 return DAG.UnrollVectorOp(Op.getNode()); 3067 } 3068 3069 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3070 "Invalid type for custom lowering!"); 3071 if (VT != MVT::v4f32) 3072 return DAG.UnrollVectorOp(Op.getNode()); 3073 3074 unsigned CastOpc; 3075 unsigned Opc; 3076 switch (Op.getOpcode()) { 3077 default: 3078 assert(0 && "Invalid opcode!"); 3079 case ISD::SINT_TO_FP: 3080 CastOpc = ISD::SIGN_EXTEND; 3081 Opc = ISD::SINT_TO_FP; 3082 break; 3083 case ISD::UINT_TO_FP: 3084 CastOpc = ISD::ZERO_EXTEND; 3085 Opc = ISD::UINT_TO_FP; 3086 break; 3087 } 3088 3089 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3090 return DAG.getNode(Opc, dl, VT, Op); 3091} 3092 3093static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3094 EVT VT = Op.getValueType(); 3095 if (VT.isVector()) 3096 return LowerVectorINT_TO_FP(Op, DAG); 3097 3098 DebugLoc dl = Op.getDebugLoc(); 3099 unsigned Opc; 3100 3101 switch (Op.getOpcode()) { 3102 default: 3103 assert(0 && "Invalid opcode!"); 3104 case ISD::SINT_TO_FP: 3105 Opc = ARMISD::SITOF; 3106 break; 3107 case ISD::UINT_TO_FP: 3108 Opc = ARMISD::UITOF; 3109 break; 3110 } 3111 3112 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3113 return DAG.getNode(Opc, dl, VT, Op); 3114} 3115 3116SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3117 // Implement fcopysign with a fabs and a conditional fneg. 3118 SDValue Tmp0 = Op.getOperand(0); 3119 SDValue Tmp1 = Op.getOperand(1); 3120 DebugLoc dl = Op.getDebugLoc(); 3121 EVT VT = Op.getValueType(); 3122 EVT SrcVT = Tmp1.getValueType(); 3123 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3124 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3125 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3126 3127 if (UseNEON) { 3128 // Use VBSL to copy the sign bit. 3129 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3130 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3131 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3132 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3133 if (VT == MVT::f64) 3134 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3135 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3136 DAG.getConstant(32, MVT::i32)); 3137 else /*if (VT == MVT::f32)*/ 3138 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3139 if (SrcVT == MVT::f32) { 3140 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3141 if (VT == MVT::f64) 3142 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3143 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3144 DAG.getConstant(32, MVT::i32)); 3145 } else if (VT == MVT::f32) 3146 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3147 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3148 DAG.getConstant(32, MVT::i32)); 3149 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3150 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3151 3152 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3153 MVT::i32); 3154 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3155 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3156 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3157 3158 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3159 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3160 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3161 if (VT == MVT::f32) { 3162 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3163 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3164 DAG.getConstant(0, MVT::i32)); 3165 } else { 3166 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3167 } 3168 3169 return Res; 3170 } 3171 3172 // Bitcast operand 1 to i32. 3173 if (SrcVT == MVT::f64) 3174 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3175 &Tmp1, 1).getValue(1); 3176 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3177 3178 // Or in the signbit with integer operations. 3179 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3180 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3181 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3182 if (VT == MVT::f32) { 3183 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3184 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3185 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3186 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3187 } 3188 3189 // f64: Or the high part with signbit and then combine two parts. 3190 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3191 &Tmp0, 1); 3192 SDValue Lo = Tmp0.getValue(0); 3193 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3194 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3195 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3196} 3197 3198SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3199 MachineFunction &MF = DAG.getMachineFunction(); 3200 MachineFrameInfo *MFI = MF.getFrameInfo(); 3201 MFI->setReturnAddressIsTaken(true); 3202 3203 EVT VT = Op.getValueType(); 3204 DebugLoc dl = Op.getDebugLoc(); 3205 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3206 if (Depth) { 3207 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3208 SDValue Offset = DAG.getConstant(4, MVT::i32); 3209 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3210 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3211 MachinePointerInfo(), false, false, false, 0); 3212 } 3213 3214 // Return LR, which contains the return address. Mark it an implicit live-in. 3215 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3216 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3217} 3218 3219SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3220 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3221 MFI->setFrameAddressIsTaken(true); 3222 3223 EVT VT = Op.getValueType(); 3224 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3225 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3226 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3227 ? ARM::R7 : ARM::R11; 3228 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3229 while (Depth--) 3230 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3231 MachinePointerInfo(), 3232 false, false, false, 0); 3233 return FrameAddr; 3234} 3235 3236/// ExpandBITCAST - If the target supports VFP, this function is called to 3237/// expand a bit convert where either the source or destination type is i64 to 3238/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3239/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3240/// vectors), since the legalizer won't know what to do with that. 3241static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3242 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3243 DebugLoc dl = N->getDebugLoc(); 3244 SDValue Op = N->getOperand(0); 3245 3246 // This function is only supposed to be called for i64 types, either as the 3247 // source or destination of the bit convert. 3248 EVT SrcVT = Op.getValueType(); 3249 EVT DstVT = N->getValueType(0); 3250 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3251 "ExpandBITCAST called for non-i64 type"); 3252 3253 // Turn i64->f64 into VMOVDRR. 3254 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3255 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3256 DAG.getConstant(0, MVT::i32)); 3257 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3258 DAG.getConstant(1, MVT::i32)); 3259 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3260 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3261 } 3262 3263 // Turn f64->i64 into VMOVRRD. 3264 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3265 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3266 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3267 // Merge the pieces into a single i64 value. 3268 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3269 } 3270 3271 return SDValue(); 3272} 3273 3274/// getZeroVector - Returns a vector of specified type with all zero elements. 3275/// Zero vectors are used to represent vector negation and in those cases 3276/// will be implemented with the NEON VNEG instruction. However, VNEG does 3277/// not support i64 elements, so sometimes the zero vectors will need to be 3278/// explicitly constructed. Regardless, use a canonical VMOV to create the 3279/// zero vector. 3280static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3281 assert(VT.isVector() && "Expected a vector type"); 3282 // The canonical modified immediate encoding of a zero vector is....0! 3283 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3284 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3285 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3286 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3287} 3288 3289/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3290/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3291SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3292 SelectionDAG &DAG) const { 3293 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3294 EVT VT = Op.getValueType(); 3295 unsigned VTBits = VT.getSizeInBits(); 3296 DebugLoc dl = Op.getDebugLoc(); 3297 SDValue ShOpLo = Op.getOperand(0); 3298 SDValue ShOpHi = Op.getOperand(1); 3299 SDValue ShAmt = Op.getOperand(2); 3300 SDValue ARMcc; 3301 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3302 3303 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3304 3305 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3306 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3307 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3308 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3309 DAG.getConstant(VTBits, MVT::i32)); 3310 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3311 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3312 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3313 3314 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3315 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3316 ARMcc, DAG, dl); 3317 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3318 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3319 CCR, Cmp); 3320 3321 SDValue Ops[2] = { Lo, Hi }; 3322 return DAG.getMergeValues(Ops, 2, dl); 3323} 3324 3325/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3326/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3327SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3328 SelectionDAG &DAG) const { 3329 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3330 EVT VT = Op.getValueType(); 3331 unsigned VTBits = VT.getSizeInBits(); 3332 DebugLoc dl = Op.getDebugLoc(); 3333 SDValue ShOpLo = Op.getOperand(0); 3334 SDValue ShOpHi = Op.getOperand(1); 3335 SDValue ShAmt = Op.getOperand(2); 3336 SDValue ARMcc; 3337 3338 assert(Op.getOpcode() == ISD::SHL_PARTS); 3339 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3340 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3341 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3342 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3343 DAG.getConstant(VTBits, MVT::i32)); 3344 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3345 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3346 3347 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3348 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3349 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3350 ARMcc, DAG, dl); 3351 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3352 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3353 CCR, Cmp); 3354 3355 SDValue Ops[2] = { Lo, Hi }; 3356 return DAG.getMergeValues(Ops, 2, dl); 3357} 3358 3359SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3360 SelectionDAG &DAG) const { 3361 // The rounding mode is in bits 23:22 of the FPSCR. 3362 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3363 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3364 // so that the shift + and get folded into a bitfield extract. 3365 DebugLoc dl = Op.getDebugLoc(); 3366 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3367 DAG.getConstant(Intrinsic::arm_get_fpscr, 3368 MVT::i32)); 3369 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3370 DAG.getConstant(1U << 22, MVT::i32)); 3371 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3372 DAG.getConstant(22, MVT::i32)); 3373 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3374 DAG.getConstant(3, MVT::i32)); 3375} 3376 3377static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3378 const ARMSubtarget *ST) { 3379 EVT VT = N->getValueType(0); 3380 DebugLoc dl = N->getDebugLoc(); 3381 3382 if (!ST->hasV6T2Ops()) 3383 return SDValue(); 3384 3385 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3386 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3387} 3388 3389static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3390 const ARMSubtarget *ST) { 3391 EVT VT = N->getValueType(0); 3392 DebugLoc dl = N->getDebugLoc(); 3393 3394 if (!VT.isVector()) 3395 return SDValue(); 3396 3397 // Lower vector shifts on NEON to use VSHL. 3398 assert(ST->hasNEON() && "unexpected vector shift"); 3399 3400 // Left shifts translate directly to the vshiftu intrinsic. 3401 if (N->getOpcode() == ISD::SHL) 3402 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3403 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3404 N->getOperand(0), N->getOperand(1)); 3405 3406 assert((N->getOpcode() == ISD::SRA || 3407 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3408 3409 // NEON uses the same intrinsics for both left and right shifts. For 3410 // right shifts, the shift amounts are negative, so negate the vector of 3411 // shift amounts. 3412 EVT ShiftVT = N->getOperand(1).getValueType(); 3413 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3414 getZeroVector(ShiftVT, DAG, dl), 3415 N->getOperand(1)); 3416 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3417 Intrinsic::arm_neon_vshifts : 3418 Intrinsic::arm_neon_vshiftu); 3419 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3420 DAG.getConstant(vshiftInt, MVT::i32), 3421 N->getOperand(0), NegatedCount); 3422} 3423 3424static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3425 const ARMSubtarget *ST) { 3426 EVT VT = N->getValueType(0); 3427 DebugLoc dl = N->getDebugLoc(); 3428 3429 // We can get here for a node like i32 = ISD::SHL i32, i64 3430 if (VT != MVT::i64) 3431 return SDValue(); 3432 3433 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3434 "Unknown shift to lower!"); 3435 3436 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3437 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3438 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3439 return SDValue(); 3440 3441 // If we are in thumb mode, we don't have RRX. 3442 if (ST->isThumb1Only()) return SDValue(); 3443 3444 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3445 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3446 DAG.getConstant(0, MVT::i32)); 3447 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3448 DAG.getConstant(1, MVT::i32)); 3449 3450 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3451 // captures the result into a carry flag. 3452 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3453 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3454 3455 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3456 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3457 3458 // Merge the pieces into a single i64 value. 3459 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3460} 3461 3462static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3463 SDValue TmpOp0, TmpOp1; 3464 bool Invert = false; 3465 bool Swap = false; 3466 unsigned Opc = 0; 3467 3468 SDValue Op0 = Op.getOperand(0); 3469 SDValue Op1 = Op.getOperand(1); 3470 SDValue CC = Op.getOperand(2); 3471 EVT VT = Op.getValueType(); 3472 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3473 DebugLoc dl = Op.getDebugLoc(); 3474 3475 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3476 switch (SetCCOpcode) { 3477 default: llvm_unreachable("Illegal FP comparison"); break; 3478 case ISD::SETUNE: 3479 case ISD::SETNE: Invert = true; // Fallthrough 3480 case ISD::SETOEQ: 3481 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3482 case ISD::SETOLT: 3483 case ISD::SETLT: Swap = true; // Fallthrough 3484 case ISD::SETOGT: 3485 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3486 case ISD::SETOLE: 3487 case ISD::SETLE: Swap = true; // Fallthrough 3488 case ISD::SETOGE: 3489 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3490 case ISD::SETUGE: Swap = true; // Fallthrough 3491 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3492 case ISD::SETUGT: Swap = true; // Fallthrough 3493 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3494 case ISD::SETUEQ: Invert = true; // Fallthrough 3495 case ISD::SETONE: 3496 // Expand this to (OLT | OGT). 3497 TmpOp0 = Op0; 3498 TmpOp1 = Op1; 3499 Opc = ISD::OR; 3500 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3501 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3502 break; 3503 case ISD::SETUO: Invert = true; // Fallthrough 3504 case ISD::SETO: 3505 // Expand this to (OLT | OGE). 3506 TmpOp0 = Op0; 3507 TmpOp1 = Op1; 3508 Opc = ISD::OR; 3509 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3510 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3511 break; 3512 } 3513 } else { 3514 // Integer comparisons. 3515 switch (SetCCOpcode) { 3516 default: llvm_unreachable("Illegal integer comparison"); break; 3517 case ISD::SETNE: Invert = true; 3518 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3519 case ISD::SETLT: Swap = true; 3520 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3521 case ISD::SETLE: Swap = true; 3522 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3523 case ISD::SETULT: Swap = true; 3524 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3525 case ISD::SETULE: Swap = true; 3526 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3527 } 3528 3529 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3530 if (Opc == ARMISD::VCEQ) { 3531 3532 SDValue AndOp; 3533 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3534 AndOp = Op0; 3535 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3536 AndOp = Op1; 3537 3538 // Ignore bitconvert. 3539 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3540 AndOp = AndOp.getOperand(0); 3541 3542 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3543 Opc = ARMISD::VTST; 3544 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3545 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3546 Invert = !Invert; 3547 } 3548 } 3549 } 3550 3551 if (Swap) 3552 std::swap(Op0, Op1); 3553 3554 // If one of the operands is a constant vector zero, attempt to fold the 3555 // comparison to a specialized compare-against-zero form. 3556 SDValue SingleOp; 3557 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3558 SingleOp = Op0; 3559 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3560 if (Opc == ARMISD::VCGE) 3561 Opc = ARMISD::VCLEZ; 3562 else if (Opc == ARMISD::VCGT) 3563 Opc = ARMISD::VCLTZ; 3564 SingleOp = Op1; 3565 } 3566 3567 SDValue Result; 3568 if (SingleOp.getNode()) { 3569 switch (Opc) { 3570 case ARMISD::VCEQ: 3571 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3572 case ARMISD::VCGE: 3573 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3574 case ARMISD::VCLEZ: 3575 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3576 case ARMISD::VCGT: 3577 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3578 case ARMISD::VCLTZ: 3579 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3580 default: 3581 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3582 } 3583 } else { 3584 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3585 } 3586 3587 if (Invert) 3588 Result = DAG.getNOT(dl, Result, VT); 3589 3590 return Result; 3591} 3592 3593/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3594/// valid vector constant for a NEON instruction with a "modified immediate" 3595/// operand (e.g., VMOV). If so, return the encoded value. 3596static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3597 unsigned SplatBitSize, SelectionDAG &DAG, 3598 EVT &VT, bool is128Bits, NEONModImmType type) { 3599 unsigned OpCmode, Imm; 3600 3601 // SplatBitSize is set to the smallest size that splats the vector, so a 3602 // zero vector will always have SplatBitSize == 8. However, NEON modified 3603 // immediate instructions others than VMOV do not support the 8-bit encoding 3604 // of a zero vector, and the default encoding of zero is supposed to be the 3605 // 32-bit version. 3606 if (SplatBits == 0) 3607 SplatBitSize = 32; 3608 3609 switch (SplatBitSize) { 3610 case 8: 3611 if (type != VMOVModImm) 3612 return SDValue(); 3613 // Any 1-byte value is OK. Op=0, Cmode=1110. 3614 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3615 OpCmode = 0xe; 3616 Imm = SplatBits; 3617 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3618 break; 3619 3620 case 16: 3621 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3622 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3623 if ((SplatBits & ~0xff) == 0) { 3624 // Value = 0x00nn: Op=x, Cmode=100x. 3625 OpCmode = 0x8; 3626 Imm = SplatBits; 3627 break; 3628 } 3629 if ((SplatBits & ~0xff00) == 0) { 3630 // Value = 0xnn00: Op=x, Cmode=101x. 3631 OpCmode = 0xa; 3632 Imm = SplatBits >> 8; 3633 break; 3634 } 3635 return SDValue(); 3636 3637 case 32: 3638 // NEON's 32-bit VMOV supports splat values where: 3639 // * only one byte is nonzero, or 3640 // * the least significant byte is 0xff and the second byte is nonzero, or 3641 // * the least significant 2 bytes are 0xff and the third is nonzero. 3642 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3643 if ((SplatBits & ~0xff) == 0) { 3644 // Value = 0x000000nn: Op=x, Cmode=000x. 3645 OpCmode = 0; 3646 Imm = SplatBits; 3647 break; 3648 } 3649 if ((SplatBits & ~0xff00) == 0) { 3650 // Value = 0x0000nn00: Op=x, Cmode=001x. 3651 OpCmode = 0x2; 3652 Imm = SplatBits >> 8; 3653 break; 3654 } 3655 if ((SplatBits & ~0xff0000) == 0) { 3656 // Value = 0x00nn0000: Op=x, Cmode=010x. 3657 OpCmode = 0x4; 3658 Imm = SplatBits >> 16; 3659 break; 3660 } 3661 if ((SplatBits & ~0xff000000) == 0) { 3662 // Value = 0xnn000000: Op=x, Cmode=011x. 3663 OpCmode = 0x6; 3664 Imm = SplatBits >> 24; 3665 break; 3666 } 3667 3668 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3669 if (type == OtherModImm) return SDValue(); 3670 3671 if ((SplatBits & ~0xffff) == 0 && 3672 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3673 // Value = 0x0000nnff: Op=x, Cmode=1100. 3674 OpCmode = 0xc; 3675 Imm = SplatBits >> 8; 3676 SplatBits |= 0xff; 3677 break; 3678 } 3679 3680 if ((SplatBits & ~0xffffff) == 0 && 3681 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3682 // Value = 0x00nnffff: Op=x, Cmode=1101. 3683 OpCmode = 0xd; 3684 Imm = SplatBits >> 16; 3685 SplatBits |= 0xffff; 3686 break; 3687 } 3688 3689 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3690 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3691 // VMOV.I32. A (very) minor optimization would be to replicate the value 3692 // and fall through here to test for a valid 64-bit splat. But, then the 3693 // caller would also need to check and handle the change in size. 3694 return SDValue(); 3695 3696 case 64: { 3697 if (type != VMOVModImm) 3698 return SDValue(); 3699 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3700 uint64_t BitMask = 0xff; 3701 uint64_t Val = 0; 3702 unsigned ImmMask = 1; 3703 Imm = 0; 3704 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3705 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3706 Val |= BitMask; 3707 Imm |= ImmMask; 3708 } else if ((SplatBits & BitMask) != 0) { 3709 return SDValue(); 3710 } 3711 BitMask <<= 8; 3712 ImmMask <<= 1; 3713 } 3714 // Op=1, Cmode=1110. 3715 OpCmode = 0x1e; 3716 SplatBits = Val; 3717 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3718 break; 3719 } 3720 3721 default: 3722 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3723 return SDValue(); 3724 } 3725 3726 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3727 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3728} 3729 3730static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3731 bool &ReverseVEXT, unsigned &Imm) { 3732 unsigned NumElts = VT.getVectorNumElements(); 3733 ReverseVEXT = false; 3734 3735 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3736 if (M[0] < 0) 3737 return false; 3738 3739 Imm = M[0]; 3740 3741 // If this is a VEXT shuffle, the immediate value is the index of the first 3742 // element. The other shuffle indices must be the successive elements after 3743 // the first one. 3744 unsigned ExpectedElt = Imm; 3745 for (unsigned i = 1; i < NumElts; ++i) { 3746 // Increment the expected index. If it wraps around, it may still be 3747 // a VEXT but the source vectors must be swapped. 3748 ExpectedElt += 1; 3749 if (ExpectedElt == NumElts * 2) { 3750 ExpectedElt = 0; 3751 ReverseVEXT = true; 3752 } 3753 3754 if (M[i] < 0) continue; // ignore UNDEF indices 3755 if (ExpectedElt != static_cast<unsigned>(M[i])) 3756 return false; 3757 } 3758 3759 // Adjust the index value if the source operands will be swapped. 3760 if (ReverseVEXT) 3761 Imm -= NumElts; 3762 3763 return true; 3764} 3765 3766/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3767/// instruction with the specified blocksize. (The order of the elements 3768/// within each block of the vector is reversed.) 3769static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3770 unsigned BlockSize) { 3771 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3772 "Only possible block sizes for VREV are: 16, 32, 64"); 3773 3774 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3775 if (EltSz == 64) 3776 return false; 3777 3778 unsigned NumElts = VT.getVectorNumElements(); 3779 unsigned BlockElts = M[0] + 1; 3780 // If the first shuffle index is UNDEF, be optimistic. 3781 if (M[0] < 0) 3782 BlockElts = BlockSize / EltSz; 3783 3784 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3785 return false; 3786 3787 for (unsigned i = 0; i < NumElts; ++i) { 3788 if (M[i] < 0) continue; // ignore UNDEF indices 3789 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3790 return false; 3791 } 3792 3793 return true; 3794} 3795 3796static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3797 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3798 // range, then 0 is placed into the resulting vector. So pretty much any mask 3799 // of 8 elements can work here. 3800 return VT == MVT::v8i8 && M.size() == 8; 3801} 3802 3803static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3804 unsigned &WhichResult) { 3805 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3806 if (EltSz == 64) 3807 return false; 3808 3809 unsigned NumElts = VT.getVectorNumElements(); 3810 WhichResult = (M[0] == 0 ? 0 : 1); 3811 for (unsigned i = 0; i < NumElts; i += 2) { 3812 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3813 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3814 return false; 3815 } 3816 return true; 3817} 3818 3819/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3820/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3821/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3822static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3823 unsigned &WhichResult) { 3824 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3825 if (EltSz == 64) 3826 return false; 3827 3828 unsigned NumElts = VT.getVectorNumElements(); 3829 WhichResult = (M[0] == 0 ? 0 : 1); 3830 for (unsigned i = 0; i < NumElts; i += 2) { 3831 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3832 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3833 return false; 3834 } 3835 return true; 3836} 3837 3838static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3839 unsigned &WhichResult) { 3840 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3841 if (EltSz == 64) 3842 return false; 3843 3844 unsigned NumElts = VT.getVectorNumElements(); 3845 WhichResult = (M[0] == 0 ? 0 : 1); 3846 for (unsigned i = 0; i != NumElts; ++i) { 3847 if (M[i] < 0) continue; // ignore UNDEF indices 3848 if ((unsigned) M[i] != 2 * i + WhichResult) 3849 return false; 3850 } 3851 3852 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3853 if (VT.is64BitVector() && EltSz == 32) 3854 return false; 3855 3856 return true; 3857} 3858 3859/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3860/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3861/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3862static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3863 unsigned &WhichResult) { 3864 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3865 if (EltSz == 64) 3866 return false; 3867 3868 unsigned Half = VT.getVectorNumElements() / 2; 3869 WhichResult = (M[0] == 0 ? 0 : 1); 3870 for (unsigned j = 0; j != 2; ++j) { 3871 unsigned Idx = WhichResult; 3872 for (unsigned i = 0; i != Half; ++i) { 3873 int MIdx = M[i + j * Half]; 3874 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3875 return false; 3876 Idx += 2; 3877 } 3878 } 3879 3880 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3881 if (VT.is64BitVector() && EltSz == 32) 3882 return false; 3883 3884 return true; 3885} 3886 3887static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3888 unsigned &WhichResult) { 3889 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3890 if (EltSz == 64) 3891 return false; 3892 3893 unsigned NumElts = VT.getVectorNumElements(); 3894 WhichResult = (M[0] == 0 ? 0 : 1); 3895 unsigned Idx = WhichResult * NumElts / 2; 3896 for (unsigned i = 0; i != NumElts; i += 2) { 3897 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3898 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3899 return false; 3900 Idx += 1; 3901 } 3902 3903 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3904 if (VT.is64BitVector() && EltSz == 32) 3905 return false; 3906 3907 return true; 3908} 3909 3910/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3911/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3912/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3913static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3914 unsigned &WhichResult) { 3915 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3916 if (EltSz == 64) 3917 return false; 3918 3919 unsigned NumElts = VT.getVectorNumElements(); 3920 WhichResult = (M[0] == 0 ? 0 : 1); 3921 unsigned Idx = WhichResult * NumElts / 2; 3922 for (unsigned i = 0; i != NumElts; i += 2) { 3923 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3924 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3925 return false; 3926 Idx += 1; 3927 } 3928 3929 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3930 if (VT.is64BitVector() && EltSz == 32) 3931 return false; 3932 3933 return true; 3934} 3935 3936// If N is an integer constant that can be moved into a register in one 3937// instruction, return an SDValue of such a constant (will become a MOV 3938// instruction). Otherwise return null. 3939static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3940 const ARMSubtarget *ST, DebugLoc dl) { 3941 uint64_t Val; 3942 if (!isa<ConstantSDNode>(N)) 3943 return SDValue(); 3944 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3945 3946 if (ST->isThumb1Only()) { 3947 if (Val <= 255 || ~Val <= 255) 3948 return DAG.getConstant(Val, MVT::i32); 3949 } else { 3950 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3951 return DAG.getConstant(Val, MVT::i32); 3952 } 3953 return SDValue(); 3954} 3955 3956// If this is a case we can't handle, return null and let the default 3957// expansion code take care of it. 3958SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3959 const ARMSubtarget *ST) const { 3960 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3961 DebugLoc dl = Op.getDebugLoc(); 3962 EVT VT = Op.getValueType(); 3963 3964 APInt SplatBits, SplatUndef; 3965 unsigned SplatBitSize; 3966 bool HasAnyUndefs; 3967 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3968 if (SplatBitSize <= 64) { 3969 // Check if an immediate VMOV works. 3970 EVT VmovVT; 3971 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3972 SplatUndef.getZExtValue(), SplatBitSize, 3973 DAG, VmovVT, VT.is128BitVector(), 3974 VMOVModImm); 3975 if (Val.getNode()) { 3976 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3977 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3978 } 3979 3980 // Try an immediate VMVN. 3981 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 3982 Val = isNEONModifiedImm(NegatedImm, 3983 SplatUndef.getZExtValue(), SplatBitSize, 3984 DAG, VmovVT, VT.is128BitVector(), 3985 VMVNModImm); 3986 if (Val.getNode()) { 3987 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3988 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3989 } 3990 3991 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 3992 if (VT == MVT::v2f32 || VT == MVT::v4f32) { 3993 ConstantFPSDNode *C = cast<ConstantFPSDNode>(Op.getOperand(0)); 3994 int ImmVal = ARM_AM::getFP32Imm(C->getValueAPF()); 3995 if (ImmVal != -1) { 3996 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 3997 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 3998 } 3999 } 4000 } 4001 } 4002 4003 // Scan through the operands to see if only one value is used. 4004 unsigned NumElts = VT.getVectorNumElements(); 4005 bool isOnlyLowElement = true; 4006 bool usesOnlyOneValue = true; 4007 bool isConstant = true; 4008 SDValue Value; 4009 for (unsigned i = 0; i < NumElts; ++i) { 4010 SDValue V = Op.getOperand(i); 4011 if (V.getOpcode() == ISD::UNDEF) 4012 continue; 4013 if (i > 0) 4014 isOnlyLowElement = false; 4015 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4016 isConstant = false; 4017 4018 if (!Value.getNode()) 4019 Value = V; 4020 else if (V != Value) 4021 usesOnlyOneValue = false; 4022 } 4023 4024 if (!Value.getNode()) 4025 return DAG.getUNDEF(VT); 4026 4027 if (isOnlyLowElement) 4028 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 4029 4030 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4031 4032 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 4033 // i32 and try again. 4034 if (usesOnlyOneValue && EltSize <= 32) { 4035 if (!isConstant) 4036 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 4037 if (VT.getVectorElementType().isFloatingPoint()) { 4038 SmallVector<SDValue, 8> Ops; 4039 for (unsigned i = 0; i < NumElts; ++i) 4040 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4041 Op.getOperand(i))); 4042 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 4043 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 4044 Val = LowerBUILD_VECTOR(Val, DAG, ST); 4045 if (Val.getNode()) 4046 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4047 } 4048 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 4049 if (Val.getNode()) 4050 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4051 } 4052 4053 // If all elements are constants and the case above didn't get hit, fall back 4054 // to the default expansion, which will generate a load from the constant 4055 // pool. 4056 if (isConstant) 4057 return SDValue(); 4058 4059 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4060 if (NumElts >= 4) { 4061 SDValue shuffle = ReconstructShuffle(Op, DAG); 4062 if (shuffle != SDValue()) 4063 return shuffle; 4064 } 4065 4066 // Vectors with 32- or 64-bit elements can be built by directly assigning 4067 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4068 // will be legalized. 4069 if (EltSize >= 32) { 4070 // Do the expansion with floating-point types, since that is what the VFP 4071 // registers are defined to use, and since i64 is not legal. 4072 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4073 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4074 SmallVector<SDValue, 8> Ops; 4075 for (unsigned i = 0; i < NumElts; ++i) 4076 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4077 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4078 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4079 } 4080 4081 return SDValue(); 4082} 4083 4084// Gather data to see if the operation can be modelled as a 4085// shuffle in combination with VEXTs. 4086SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4087 SelectionDAG &DAG) const { 4088 DebugLoc dl = Op.getDebugLoc(); 4089 EVT VT = Op.getValueType(); 4090 unsigned NumElts = VT.getVectorNumElements(); 4091 4092 SmallVector<SDValue, 2> SourceVecs; 4093 SmallVector<unsigned, 2> MinElts; 4094 SmallVector<unsigned, 2> MaxElts; 4095 4096 for (unsigned i = 0; i < NumElts; ++i) { 4097 SDValue V = Op.getOperand(i); 4098 if (V.getOpcode() == ISD::UNDEF) 4099 continue; 4100 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4101 // A shuffle can only come from building a vector from various 4102 // elements of other vectors. 4103 return SDValue(); 4104 } else if (V.getOperand(0).getValueType().getVectorElementType() != 4105 VT.getVectorElementType()) { 4106 // This code doesn't know how to handle shuffles where the vector 4107 // element types do not match (this happens because type legalization 4108 // promotes the return type of EXTRACT_VECTOR_ELT). 4109 // FIXME: It might be appropriate to extend this code to handle 4110 // mismatched types. 4111 return SDValue(); 4112 } 4113 4114 // Record this extraction against the appropriate vector if possible... 4115 SDValue SourceVec = V.getOperand(0); 4116 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4117 bool FoundSource = false; 4118 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4119 if (SourceVecs[j] == SourceVec) { 4120 if (MinElts[j] > EltNo) 4121 MinElts[j] = EltNo; 4122 if (MaxElts[j] < EltNo) 4123 MaxElts[j] = EltNo; 4124 FoundSource = true; 4125 break; 4126 } 4127 } 4128 4129 // Or record a new source if not... 4130 if (!FoundSource) { 4131 SourceVecs.push_back(SourceVec); 4132 MinElts.push_back(EltNo); 4133 MaxElts.push_back(EltNo); 4134 } 4135 } 4136 4137 // Currently only do something sane when at most two source vectors 4138 // involved. 4139 if (SourceVecs.size() > 2) 4140 return SDValue(); 4141 4142 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4143 int VEXTOffsets[2] = {0, 0}; 4144 4145 // This loop extracts the usage patterns of the source vectors 4146 // and prepares appropriate SDValues for a shuffle if possible. 4147 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4148 if (SourceVecs[i].getValueType() == VT) { 4149 // No VEXT necessary 4150 ShuffleSrcs[i] = SourceVecs[i]; 4151 VEXTOffsets[i] = 0; 4152 continue; 4153 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4154 // It probably isn't worth padding out a smaller vector just to 4155 // break it down again in a shuffle. 4156 return SDValue(); 4157 } 4158 4159 // Since only 64-bit and 128-bit vectors are legal on ARM and 4160 // we've eliminated the other cases... 4161 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4162 "unexpected vector sizes in ReconstructShuffle"); 4163 4164 if (MaxElts[i] - MinElts[i] >= NumElts) { 4165 // Span too large for a VEXT to cope 4166 return SDValue(); 4167 } 4168 4169 if (MinElts[i] >= NumElts) { 4170 // The extraction can just take the second half 4171 VEXTOffsets[i] = NumElts; 4172 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4173 SourceVecs[i], 4174 DAG.getIntPtrConstant(NumElts)); 4175 } else if (MaxElts[i] < NumElts) { 4176 // The extraction can just take the first half 4177 VEXTOffsets[i] = 0; 4178 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4179 SourceVecs[i], 4180 DAG.getIntPtrConstant(0)); 4181 } else { 4182 // An actual VEXT is needed 4183 VEXTOffsets[i] = MinElts[i]; 4184 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4185 SourceVecs[i], 4186 DAG.getIntPtrConstant(0)); 4187 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4188 SourceVecs[i], 4189 DAG.getIntPtrConstant(NumElts)); 4190 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4191 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4192 } 4193 } 4194 4195 SmallVector<int, 8> Mask; 4196 4197 for (unsigned i = 0; i < NumElts; ++i) { 4198 SDValue Entry = Op.getOperand(i); 4199 if (Entry.getOpcode() == ISD::UNDEF) { 4200 Mask.push_back(-1); 4201 continue; 4202 } 4203 4204 SDValue ExtractVec = Entry.getOperand(0); 4205 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4206 .getOperand(1))->getSExtValue(); 4207 if (ExtractVec == SourceVecs[0]) { 4208 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4209 } else { 4210 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4211 } 4212 } 4213 4214 // Final check before we try to produce nonsense... 4215 if (isShuffleMaskLegal(Mask, VT)) 4216 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4217 &Mask[0]); 4218 4219 return SDValue(); 4220} 4221 4222/// isShuffleMaskLegal - Targets can use this to indicate that they only 4223/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4224/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4225/// are assumed to be legal. 4226bool 4227ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4228 EVT VT) const { 4229 if (VT.getVectorNumElements() == 4 && 4230 (VT.is128BitVector() || VT.is64BitVector())) { 4231 unsigned PFIndexes[4]; 4232 for (unsigned i = 0; i != 4; ++i) { 4233 if (M[i] < 0) 4234 PFIndexes[i] = 8; 4235 else 4236 PFIndexes[i] = M[i]; 4237 } 4238 4239 // Compute the index in the perfect shuffle table. 4240 unsigned PFTableIndex = 4241 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4242 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4243 unsigned Cost = (PFEntry >> 30); 4244 4245 if (Cost <= 4) 4246 return true; 4247 } 4248 4249 bool ReverseVEXT; 4250 unsigned Imm, WhichResult; 4251 4252 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4253 return (EltSize >= 32 || 4254 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4255 isVREVMask(M, VT, 64) || 4256 isVREVMask(M, VT, 32) || 4257 isVREVMask(M, VT, 16) || 4258 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4259 isVTBLMask(M, VT) || 4260 isVTRNMask(M, VT, WhichResult) || 4261 isVUZPMask(M, VT, WhichResult) || 4262 isVZIPMask(M, VT, WhichResult) || 4263 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4264 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4265 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4266} 4267 4268/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4269/// the specified operations to build the shuffle. 4270static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4271 SDValue RHS, SelectionDAG &DAG, 4272 DebugLoc dl) { 4273 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4274 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4275 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4276 4277 enum { 4278 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4279 OP_VREV, 4280 OP_VDUP0, 4281 OP_VDUP1, 4282 OP_VDUP2, 4283 OP_VDUP3, 4284 OP_VEXT1, 4285 OP_VEXT2, 4286 OP_VEXT3, 4287 OP_VUZPL, // VUZP, left result 4288 OP_VUZPR, // VUZP, right result 4289 OP_VZIPL, // VZIP, left result 4290 OP_VZIPR, // VZIP, right result 4291 OP_VTRNL, // VTRN, left result 4292 OP_VTRNR // VTRN, right result 4293 }; 4294 4295 if (OpNum == OP_COPY) { 4296 if (LHSID == (1*9+2)*9+3) return LHS; 4297 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4298 return RHS; 4299 } 4300 4301 SDValue OpLHS, OpRHS; 4302 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4303 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4304 EVT VT = OpLHS.getValueType(); 4305 4306 switch (OpNum) { 4307 default: llvm_unreachable("Unknown shuffle opcode!"); 4308 case OP_VREV: 4309 // VREV divides the vector in half and swaps within the half. 4310 if (VT.getVectorElementType() == MVT::i32 || 4311 VT.getVectorElementType() == MVT::f32) 4312 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4313 // vrev <4 x i16> -> VREV32 4314 if (VT.getVectorElementType() == MVT::i16) 4315 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4316 // vrev <4 x i8> -> VREV16 4317 assert(VT.getVectorElementType() == MVT::i8); 4318 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4319 case OP_VDUP0: 4320 case OP_VDUP1: 4321 case OP_VDUP2: 4322 case OP_VDUP3: 4323 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4324 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4325 case OP_VEXT1: 4326 case OP_VEXT2: 4327 case OP_VEXT3: 4328 return DAG.getNode(ARMISD::VEXT, dl, VT, 4329 OpLHS, OpRHS, 4330 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4331 case OP_VUZPL: 4332 case OP_VUZPR: 4333 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4334 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4335 case OP_VZIPL: 4336 case OP_VZIPR: 4337 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4338 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4339 case OP_VTRNL: 4340 case OP_VTRNR: 4341 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4342 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4343 } 4344} 4345 4346static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4347 SmallVectorImpl<int> &ShuffleMask, 4348 SelectionDAG &DAG) { 4349 // Check to see if we can use the VTBL instruction. 4350 SDValue V1 = Op.getOperand(0); 4351 SDValue V2 = Op.getOperand(1); 4352 DebugLoc DL = Op.getDebugLoc(); 4353 4354 SmallVector<SDValue, 8> VTBLMask; 4355 for (SmallVectorImpl<int>::iterator 4356 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4357 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4358 4359 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4360 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4361 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4362 &VTBLMask[0], 8)); 4363 4364 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4365 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4366 &VTBLMask[0], 8)); 4367} 4368 4369static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4370 SDValue V1 = Op.getOperand(0); 4371 SDValue V2 = Op.getOperand(1); 4372 DebugLoc dl = Op.getDebugLoc(); 4373 EVT VT = Op.getValueType(); 4374 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4375 SmallVector<int, 8> ShuffleMask; 4376 4377 // Convert shuffles that are directly supported on NEON to target-specific 4378 // DAG nodes, instead of keeping them as shuffles and matching them again 4379 // during code selection. This is more efficient and avoids the possibility 4380 // of inconsistencies between legalization and selection. 4381 // FIXME: floating-point vectors should be canonicalized to integer vectors 4382 // of the same time so that they get CSEd properly. 4383 SVN->getMask(ShuffleMask); 4384 4385 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4386 if (EltSize <= 32) { 4387 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4388 int Lane = SVN->getSplatIndex(); 4389 // If this is undef splat, generate it via "just" vdup, if possible. 4390 if (Lane == -1) Lane = 0; 4391 4392 // Test if V1 is a SCALAR_TO_VECTOR. 4393 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4394 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4395 } 4396 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 4397 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 4398 // reaches it). 4399 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 4400 !isa<ConstantSDNode>(V1.getOperand(0))) { 4401 bool IsScalarToVector = true; 4402 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 4403 if (V1.getOperand(i).getOpcode() != ISD::UNDEF) { 4404 IsScalarToVector = false; 4405 break; 4406 } 4407 if (IsScalarToVector) 4408 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4409 } 4410 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4411 DAG.getConstant(Lane, MVT::i32)); 4412 } 4413 4414 bool ReverseVEXT; 4415 unsigned Imm; 4416 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4417 if (ReverseVEXT) 4418 std::swap(V1, V2); 4419 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4420 DAG.getConstant(Imm, MVT::i32)); 4421 } 4422 4423 if (isVREVMask(ShuffleMask, VT, 64)) 4424 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4425 if (isVREVMask(ShuffleMask, VT, 32)) 4426 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4427 if (isVREVMask(ShuffleMask, VT, 16)) 4428 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4429 4430 // Check for Neon shuffles that modify both input vectors in place. 4431 // If both results are used, i.e., if there are two shuffles with the same 4432 // source operands and with masks corresponding to both results of one of 4433 // these operations, DAG memoization will ensure that a single node is 4434 // used for both shuffles. 4435 unsigned WhichResult; 4436 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4437 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4438 V1, V2).getValue(WhichResult); 4439 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4440 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4441 V1, V2).getValue(WhichResult); 4442 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4443 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4444 V1, V2).getValue(WhichResult); 4445 4446 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4447 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4448 V1, V1).getValue(WhichResult); 4449 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4450 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4451 V1, V1).getValue(WhichResult); 4452 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4453 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4454 V1, V1).getValue(WhichResult); 4455 } 4456 4457 // If the shuffle is not directly supported and it has 4 elements, use 4458 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4459 unsigned NumElts = VT.getVectorNumElements(); 4460 if (NumElts == 4) { 4461 unsigned PFIndexes[4]; 4462 for (unsigned i = 0; i != 4; ++i) { 4463 if (ShuffleMask[i] < 0) 4464 PFIndexes[i] = 8; 4465 else 4466 PFIndexes[i] = ShuffleMask[i]; 4467 } 4468 4469 // Compute the index in the perfect shuffle table. 4470 unsigned PFTableIndex = 4471 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4472 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4473 unsigned Cost = (PFEntry >> 30); 4474 4475 if (Cost <= 4) 4476 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4477 } 4478 4479 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4480 if (EltSize >= 32) { 4481 // Do the expansion with floating-point types, since that is what the VFP 4482 // registers are defined to use, and since i64 is not legal. 4483 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4484 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4485 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4486 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4487 SmallVector<SDValue, 8> Ops; 4488 for (unsigned i = 0; i < NumElts; ++i) { 4489 if (ShuffleMask[i] < 0) 4490 Ops.push_back(DAG.getUNDEF(EltVT)); 4491 else 4492 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4493 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4494 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4495 MVT::i32))); 4496 } 4497 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4498 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4499 } 4500 4501 if (VT == MVT::v8i8) { 4502 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4503 if (NewOp.getNode()) 4504 return NewOp; 4505 } 4506 4507 return SDValue(); 4508} 4509 4510static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4511 // INSERT_VECTOR_ELT is legal only for immediate indexes. 4512 SDValue Lane = Op.getOperand(2); 4513 if (!isa<ConstantSDNode>(Lane)) 4514 return SDValue(); 4515 4516 return Op; 4517} 4518 4519static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4520 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4521 SDValue Lane = Op.getOperand(1); 4522 if (!isa<ConstantSDNode>(Lane)) 4523 return SDValue(); 4524 4525 SDValue Vec = Op.getOperand(0); 4526 if (Op.getValueType() == MVT::i32 && 4527 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4528 DebugLoc dl = Op.getDebugLoc(); 4529 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4530 } 4531 4532 return Op; 4533} 4534 4535static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4536 // The only time a CONCAT_VECTORS operation can have legal types is when 4537 // two 64-bit vectors are concatenated to a 128-bit vector. 4538 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4539 "unexpected CONCAT_VECTORS"); 4540 DebugLoc dl = Op.getDebugLoc(); 4541 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4542 SDValue Op0 = Op.getOperand(0); 4543 SDValue Op1 = Op.getOperand(1); 4544 if (Op0.getOpcode() != ISD::UNDEF) 4545 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4546 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4547 DAG.getIntPtrConstant(0)); 4548 if (Op1.getOpcode() != ISD::UNDEF) 4549 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4550 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4551 DAG.getIntPtrConstant(1)); 4552 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4553} 4554 4555/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4556/// element has been zero/sign-extended, depending on the isSigned parameter, 4557/// from an integer type half its size. 4558static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4559 bool isSigned) { 4560 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4561 EVT VT = N->getValueType(0); 4562 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4563 SDNode *BVN = N->getOperand(0).getNode(); 4564 if (BVN->getValueType(0) != MVT::v4i32 || 4565 BVN->getOpcode() != ISD::BUILD_VECTOR) 4566 return false; 4567 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4568 unsigned HiElt = 1 - LoElt; 4569 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4570 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4571 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4572 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4573 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4574 return false; 4575 if (isSigned) { 4576 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4577 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4578 return true; 4579 } else { 4580 if (Hi0->isNullValue() && Hi1->isNullValue()) 4581 return true; 4582 } 4583 return false; 4584 } 4585 4586 if (N->getOpcode() != ISD::BUILD_VECTOR) 4587 return false; 4588 4589 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4590 SDNode *Elt = N->getOperand(i).getNode(); 4591 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4592 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4593 unsigned HalfSize = EltSize / 2; 4594 if (isSigned) { 4595 if (!isIntN(HalfSize, C->getSExtValue())) 4596 return false; 4597 } else { 4598 if (!isUIntN(HalfSize, C->getZExtValue())) 4599 return false; 4600 } 4601 continue; 4602 } 4603 return false; 4604 } 4605 4606 return true; 4607} 4608 4609/// isSignExtended - Check if a node is a vector value that is sign-extended 4610/// or a constant BUILD_VECTOR with sign-extended elements. 4611static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4612 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4613 return true; 4614 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4615 return true; 4616 return false; 4617} 4618 4619/// isZeroExtended - Check if a node is a vector value that is zero-extended 4620/// or a constant BUILD_VECTOR with zero-extended elements. 4621static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4622 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4623 return true; 4624 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4625 return true; 4626 return false; 4627} 4628 4629/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4630/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4631static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4632 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4633 return N->getOperand(0); 4634 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4635 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4636 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4637 LD->isNonTemporal(), LD->isInvariant(), 4638 LD->getAlignment()); 4639 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4640 // have been legalized as a BITCAST from v4i32. 4641 if (N->getOpcode() == ISD::BITCAST) { 4642 SDNode *BVN = N->getOperand(0).getNode(); 4643 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4644 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4645 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4646 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4647 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4648 } 4649 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4650 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4651 EVT VT = N->getValueType(0); 4652 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4653 unsigned NumElts = VT.getVectorNumElements(); 4654 MVT TruncVT = MVT::getIntegerVT(EltSize); 4655 SmallVector<SDValue, 8> Ops; 4656 for (unsigned i = 0; i != NumElts; ++i) { 4657 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4658 const APInt &CInt = C->getAPIntValue(); 4659 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4660 } 4661 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4662 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4663} 4664 4665static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4666 unsigned Opcode = N->getOpcode(); 4667 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4668 SDNode *N0 = N->getOperand(0).getNode(); 4669 SDNode *N1 = N->getOperand(1).getNode(); 4670 return N0->hasOneUse() && N1->hasOneUse() && 4671 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4672 } 4673 return false; 4674} 4675 4676static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4677 unsigned Opcode = N->getOpcode(); 4678 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4679 SDNode *N0 = N->getOperand(0).getNode(); 4680 SDNode *N1 = N->getOperand(1).getNode(); 4681 return N0->hasOneUse() && N1->hasOneUse() && 4682 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4683 } 4684 return false; 4685} 4686 4687static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4688 // Multiplications are only custom-lowered for 128-bit vectors so that 4689 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4690 EVT VT = Op.getValueType(); 4691 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4692 SDNode *N0 = Op.getOperand(0).getNode(); 4693 SDNode *N1 = Op.getOperand(1).getNode(); 4694 unsigned NewOpc = 0; 4695 bool isMLA = false; 4696 bool isN0SExt = isSignExtended(N0, DAG); 4697 bool isN1SExt = isSignExtended(N1, DAG); 4698 if (isN0SExt && isN1SExt) 4699 NewOpc = ARMISD::VMULLs; 4700 else { 4701 bool isN0ZExt = isZeroExtended(N0, DAG); 4702 bool isN1ZExt = isZeroExtended(N1, DAG); 4703 if (isN0ZExt && isN1ZExt) 4704 NewOpc = ARMISD::VMULLu; 4705 else if (isN1SExt || isN1ZExt) { 4706 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4707 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4708 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4709 NewOpc = ARMISD::VMULLs; 4710 isMLA = true; 4711 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4712 NewOpc = ARMISD::VMULLu; 4713 isMLA = true; 4714 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4715 std::swap(N0, N1); 4716 NewOpc = ARMISD::VMULLu; 4717 isMLA = true; 4718 } 4719 } 4720 4721 if (!NewOpc) { 4722 if (VT == MVT::v2i64) 4723 // Fall through to expand this. It is not legal. 4724 return SDValue(); 4725 else 4726 // Other vector multiplications are legal. 4727 return Op; 4728 } 4729 } 4730 4731 // Legalize to a VMULL instruction. 4732 DebugLoc DL = Op.getDebugLoc(); 4733 SDValue Op0; 4734 SDValue Op1 = SkipExtension(N1, DAG); 4735 if (!isMLA) { 4736 Op0 = SkipExtension(N0, DAG); 4737 assert(Op0.getValueType().is64BitVector() && 4738 Op1.getValueType().is64BitVector() && 4739 "unexpected types for extended operands to VMULL"); 4740 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4741 } 4742 4743 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4744 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4745 // vmull q0, d4, d6 4746 // vmlal q0, d5, d6 4747 // is faster than 4748 // vaddl q0, d4, d5 4749 // vmovl q1, d6 4750 // vmul q0, q0, q1 4751 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4752 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4753 EVT Op1VT = Op1.getValueType(); 4754 return DAG.getNode(N0->getOpcode(), DL, VT, 4755 DAG.getNode(NewOpc, DL, VT, 4756 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4757 DAG.getNode(NewOpc, DL, VT, 4758 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4759} 4760 4761static SDValue 4762LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4763 // Convert to float 4764 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4765 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4766 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4767 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4768 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4769 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4770 // Get reciprocal estimate. 4771 // float4 recip = vrecpeq_f32(yf); 4772 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4773 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4774 // Because char has a smaller range than uchar, we can actually get away 4775 // without any newton steps. This requires that we use a weird bias 4776 // of 0xb000, however (again, this has been exhaustively tested). 4777 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4778 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4779 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4780 Y = DAG.getConstant(0xb000, MVT::i32); 4781 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4782 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4783 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4784 // Convert back to short. 4785 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4786 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4787 return X; 4788} 4789 4790static SDValue 4791LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4792 SDValue N2; 4793 // Convert to float. 4794 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4795 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4796 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4797 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4798 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4799 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4800 4801 // Use reciprocal estimate and one refinement step. 4802 // float4 recip = vrecpeq_f32(yf); 4803 // recip *= vrecpsq_f32(yf, recip); 4804 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4805 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4806 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4807 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4808 N1, N2); 4809 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4810 // Because short has a smaller range than ushort, we can actually get away 4811 // with only a single newton step. This requires that we use a weird bias 4812 // of 89, however (again, this has been exhaustively tested). 4813 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 4814 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4815 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4816 N1 = DAG.getConstant(0x89, MVT::i32); 4817 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4818 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4819 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4820 // Convert back to integer and return. 4821 // return vmovn_s32(vcvt_s32_f32(result)); 4822 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4823 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4824 return N0; 4825} 4826 4827static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4828 EVT VT = Op.getValueType(); 4829 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4830 "unexpected type for custom-lowering ISD::SDIV"); 4831 4832 DebugLoc dl = Op.getDebugLoc(); 4833 SDValue N0 = Op.getOperand(0); 4834 SDValue N1 = Op.getOperand(1); 4835 SDValue N2, N3; 4836 4837 if (VT == MVT::v8i8) { 4838 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4839 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4840 4841 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4842 DAG.getIntPtrConstant(4)); 4843 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4844 DAG.getIntPtrConstant(4)); 4845 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4846 DAG.getIntPtrConstant(0)); 4847 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4848 DAG.getIntPtrConstant(0)); 4849 4850 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4851 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4852 4853 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4854 N0 = LowerCONCAT_VECTORS(N0, DAG); 4855 4856 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4857 return N0; 4858 } 4859 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4860} 4861 4862static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4863 EVT VT = Op.getValueType(); 4864 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4865 "unexpected type for custom-lowering ISD::UDIV"); 4866 4867 DebugLoc dl = Op.getDebugLoc(); 4868 SDValue N0 = Op.getOperand(0); 4869 SDValue N1 = Op.getOperand(1); 4870 SDValue N2, N3; 4871 4872 if (VT == MVT::v8i8) { 4873 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4874 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4875 4876 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4877 DAG.getIntPtrConstant(4)); 4878 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4879 DAG.getIntPtrConstant(4)); 4880 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4881 DAG.getIntPtrConstant(0)); 4882 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4883 DAG.getIntPtrConstant(0)); 4884 4885 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4886 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4887 4888 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4889 N0 = LowerCONCAT_VECTORS(N0, DAG); 4890 4891 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4892 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4893 N0); 4894 return N0; 4895 } 4896 4897 // v4i16 sdiv ... Convert to float. 4898 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4899 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4900 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4901 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4902 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4903 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4904 4905 // Use reciprocal estimate and two refinement steps. 4906 // float4 recip = vrecpeq_f32(yf); 4907 // recip *= vrecpsq_f32(yf, recip); 4908 // recip *= vrecpsq_f32(yf, recip); 4909 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4910 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 4911 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4912 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4913 BN1, N2); 4914 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4915 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4916 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4917 BN1, N2); 4918 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4919 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4920 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4921 // and that it will never cause us to return an answer too large). 4922 // float4 result = as_float4(as_int4(xf*recip) + 2); 4923 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4924 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4925 N1 = DAG.getConstant(2, MVT::i32); 4926 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4927 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4928 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4929 // Convert back to integer and return. 4930 // return vmovn_u32(vcvt_s32_f32(result)); 4931 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4932 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4933 return N0; 4934} 4935 4936static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 4937 EVT VT = Op.getNode()->getValueType(0); 4938 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4939 4940 unsigned Opc; 4941 bool ExtraOp = false; 4942 switch (Op.getOpcode()) { 4943 default: assert(0 && "Invalid code"); 4944 case ISD::ADDC: Opc = ARMISD::ADDC; break; 4945 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 4946 case ISD::SUBC: Opc = ARMISD::SUBC; break; 4947 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 4948 } 4949 4950 if (!ExtraOp) 4951 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4952 Op.getOperand(1)); 4953 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4954 Op.getOperand(1), Op.getOperand(2)); 4955} 4956 4957static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 4958 // Monotonic load/store is legal for all targets 4959 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 4960 return Op; 4961 4962 // Aquire/Release load/store is not legal for targets without a 4963 // dmb or equivalent available. 4964 return SDValue(); 4965} 4966 4967 4968static void 4969ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 4970 SelectionDAG &DAG, unsigned NewOp) { 4971 DebugLoc dl = Node->getDebugLoc(); 4972 assert (Node->getValueType(0) == MVT::i64 && 4973 "Only know how to expand i64 atomics"); 4974 4975 SmallVector<SDValue, 6> Ops; 4976 Ops.push_back(Node->getOperand(0)); // Chain 4977 Ops.push_back(Node->getOperand(1)); // Ptr 4978 // Low part of Val1 4979 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4980 Node->getOperand(2), DAG.getIntPtrConstant(0))); 4981 // High part of Val1 4982 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4983 Node->getOperand(2), DAG.getIntPtrConstant(1))); 4984 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 4985 // High part of Val1 4986 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4987 Node->getOperand(3), DAG.getIntPtrConstant(0))); 4988 // High part of Val2 4989 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4990 Node->getOperand(3), DAG.getIntPtrConstant(1))); 4991 } 4992 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4993 SDValue Result = 4994 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 4995 cast<MemSDNode>(Node)->getMemOperand()); 4996 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 4997 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 4998 Results.push_back(Result.getValue(2)); 4999} 5000 5001SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5002 switch (Op.getOpcode()) { 5003 default: llvm_unreachable("Don't know how to custom lower this!"); 5004 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5005 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5006 case ISD::GlobalAddress: 5007 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 5008 LowerGlobalAddressELF(Op, DAG); 5009 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5010 case ISD::SELECT: return LowerSELECT(Op, DAG); 5011 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5012 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 5013 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 5014 case ISD::VASTART: return LowerVASTART(Op, DAG); 5015 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 5016 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 5017 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 5018 case ISD::SINT_TO_FP: 5019 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5020 case ISD::FP_TO_SINT: 5021 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 5022 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5023 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5024 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5025 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 5026 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 5027 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 5028 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 5029 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 5030 Subtarget); 5031 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 5032 case ISD::SHL: 5033 case ISD::SRL: 5034 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 5035 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 5036 case ISD::SRL_PARTS: 5037 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 5038 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 5039 case ISD::SETCC: return LowerVSETCC(Op, DAG); 5040 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 5041 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5042 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5043 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5044 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 5045 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5046 case ISD::MUL: return LowerMUL(Op, DAG); 5047 case ISD::SDIV: return LowerSDIV(Op, DAG); 5048 case ISD::UDIV: return LowerUDIV(Op, DAG); 5049 case ISD::ADDC: 5050 case ISD::ADDE: 5051 case ISD::SUBC: 5052 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 5053 case ISD::ATOMIC_LOAD: 5054 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 5055 } 5056 return SDValue(); 5057} 5058 5059/// ReplaceNodeResults - Replace the results of node with an illegal result 5060/// type with new values built out of custom code. 5061void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 5062 SmallVectorImpl<SDValue>&Results, 5063 SelectionDAG &DAG) const { 5064 SDValue Res; 5065 switch (N->getOpcode()) { 5066 default: 5067 llvm_unreachable("Don't know how to custom expand this!"); 5068 break; 5069 case ISD::BITCAST: 5070 Res = ExpandBITCAST(N, DAG); 5071 break; 5072 case ISD::SRL: 5073 case ISD::SRA: 5074 Res = Expand64BitShift(N, DAG, Subtarget); 5075 break; 5076 case ISD::ATOMIC_LOAD_ADD: 5077 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 5078 return; 5079 case ISD::ATOMIC_LOAD_AND: 5080 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 5081 return; 5082 case ISD::ATOMIC_LOAD_NAND: 5083 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 5084 return; 5085 case ISD::ATOMIC_LOAD_OR: 5086 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 5087 return; 5088 case ISD::ATOMIC_LOAD_SUB: 5089 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 5090 return; 5091 case ISD::ATOMIC_LOAD_XOR: 5092 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5093 return; 5094 case ISD::ATOMIC_SWAP: 5095 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5096 return; 5097 case ISD::ATOMIC_CMP_SWAP: 5098 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5099 return; 5100 } 5101 if (Res.getNode()) 5102 Results.push_back(Res); 5103} 5104 5105//===----------------------------------------------------------------------===// 5106// ARM Scheduler Hooks 5107//===----------------------------------------------------------------------===// 5108 5109MachineBasicBlock * 5110ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5111 MachineBasicBlock *BB, 5112 unsigned Size) const { 5113 unsigned dest = MI->getOperand(0).getReg(); 5114 unsigned ptr = MI->getOperand(1).getReg(); 5115 unsigned oldval = MI->getOperand(2).getReg(); 5116 unsigned newval = MI->getOperand(3).getReg(); 5117 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5118 DebugLoc dl = MI->getDebugLoc(); 5119 bool isThumb2 = Subtarget->isThumb2(); 5120 5121 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5122 unsigned scratch = 5123 MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass 5124 : ARM::GPRRegisterClass); 5125 5126 if (isThumb2) { 5127 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5128 MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass); 5129 MRI.constrainRegClass(newval, ARM::rGPRRegisterClass); 5130 } 5131 5132 unsigned ldrOpc, strOpc; 5133 switch (Size) { 5134 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5135 case 1: 5136 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5137 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5138 break; 5139 case 2: 5140 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5141 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5142 break; 5143 case 4: 5144 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5145 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5146 break; 5147 } 5148 5149 MachineFunction *MF = BB->getParent(); 5150 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5151 MachineFunction::iterator It = BB; 5152 ++It; // insert the new blocks after the current block 5153 5154 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5155 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5156 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5157 MF->insert(It, loop1MBB); 5158 MF->insert(It, loop2MBB); 5159 MF->insert(It, exitMBB); 5160 5161 // Transfer the remainder of BB and its successor edges to exitMBB. 5162 exitMBB->splice(exitMBB->begin(), BB, 5163 llvm::next(MachineBasicBlock::iterator(MI)), 5164 BB->end()); 5165 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5166 5167 // thisMBB: 5168 // ... 5169 // fallthrough --> loop1MBB 5170 BB->addSuccessor(loop1MBB); 5171 5172 // loop1MBB: 5173 // ldrex dest, [ptr] 5174 // cmp dest, oldval 5175 // bne exitMBB 5176 BB = loop1MBB; 5177 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5178 if (ldrOpc == ARM::t2LDREX) 5179 MIB.addImm(0); 5180 AddDefaultPred(MIB); 5181 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5182 .addReg(dest).addReg(oldval)); 5183 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5184 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5185 BB->addSuccessor(loop2MBB); 5186 BB->addSuccessor(exitMBB); 5187 5188 // loop2MBB: 5189 // strex scratch, newval, [ptr] 5190 // cmp scratch, #0 5191 // bne loop1MBB 5192 BB = loop2MBB; 5193 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5194 if (strOpc == ARM::t2STREX) 5195 MIB.addImm(0); 5196 AddDefaultPred(MIB); 5197 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5198 .addReg(scratch).addImm(0)); 5199 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5200 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5201 BB->addSuccessor(loop1MBB); 5202 BB->addSuccessor(exitMBB); 5203 5204 // exitMBB: 5205 // ... 5206 BB = exitMBB; 5207 5208 MI->eraseFromParent(); // The instruction is gone now. 5209 5210 return BB; 5211} 5212 5213MachineBasicBlock * 5214ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5215 unsigned Size, unsigned BinOpcode) const { 5216 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5217 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5218 5219 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5220 MachineFunction *MF = BB->getParent(); 5221 MachineFunction::iterator It = BB; 5222 ++It; 5223 5224 unsigned dest = MI->getOperand(0).getReg(); 5225 unsigned ptr = MI->getOperand(1).getReg(); 5226 unsigned incr = MI->getOperand(2).getReg(); 5227 DebugLoc dl = MI->getDebugLoc(); 5228 bool isThumb2 = Subtarget->isThumb2(); 5229 5230 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5231 if (isThumb2) { 5232 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5233 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5234 } 5235 5236 unsigned ldrOpc, strOpc; 5237 switch (Size) { 5238 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5239 case 1: 5240 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5241 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5242 break; 5243 case 2: 5244 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5245 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5246 break; 5247 case 4: 5248 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5249 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5250 break; 5251 } 5252 5253 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5254 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5255 MF->insert(It, loopMBB); 5256 MF->insert(It, exitMBB); 5257 5258 // Transfer the remainder of BB and its successor edges to exitMBB. 5259 exitMBB->splice(exitMBB->begin(), BB, 5260 llvm::next(MachineBasicBlock::iterator(MI)), 5261 BB->end()); 5262 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5263 5264 TargetRegisterClass *TRC = 5265 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5266 unsigned scratch = MRI.createVirtualRegister(TRC); 5267 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5268 5269 // thisMBB: 5270 // ... 5271 // fallthrough --> loopMBB 5272 BB->addSuccessor(loopMBB); 5273 5274 // loopMBB: 5275 // ldrex dest, ptr 5276 // <binop> scratch2, dest, incr 5277 // strex scratch, scratch2, ptr 5278 // cmp scratch, #0 5279 // bne- loopMBB 5280 // fallthrough --> exitMBB 5281 BB = loopMBB; 5282 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5283 if (ldrOpc == ARM::t2LDREX) 5284 MIB.addImm(0); 5285 AddDefaultPred(MIB); 5286 if (BinOpcode) { 5287 // operand order needs to go the other way for NAND 5288 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5289 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5290 addReg(incr).addReg(dest)).addReg(0); 5291 else 5292 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5293 addReg(dest).addReg(incr)).addReg(0); 5294 } 5295 5296 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5297 if (strOpc == ARM::t2STREX) 5298 MIB.addImm(0); 5299 AddDefaultPred(MIB); 5300 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5301 .addReg(scratch).addImm(0)); 5302 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5303 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5304 5305 BB->addSuccessor(loopMBB); 5306 BB->addSuccessor(exitMBB); 5307 5308 // exitMBB: 5309 // ... 5310 BB = exitMBB; 5311 5312 MI->eraseFromParent(); // The instruction is gone now. 5313 5314 return BB; 5315} 5316 5317MachineBasicBlock * 5318ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5319 MachineBasicBlock *BB, 5320 unsigned Size, 5321 bool signExtend, 5322 ARMCC::CondCodes Cond) const { 5323 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5324 5325 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5326 MachineFunction *MF = BB->getParent(); 5327 MachineFunction::iterator It = BB; 5328 ++It; 5329 5330 unsigned dest = MI->getOperand(0).getReg(); 5331 unsigned ptr = MI->getOperand(1).getReg(); 5332 unsigned incr = MI->getOperand(2).getReg(); 5333 unsigned oldval = dest; 5334 DebugLoc dl = MI->getDebugLoc(); 5335 bool isThumb2 = Subtarget->isThumb2(); 5336 5337 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5338 if (isThumb2) { 5339 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5340 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5341 } 5342 5343 unsigned ldrOpc, strOpc, extendOpc; 5344 switch (Size) { 5345 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5346 case 1: 5347 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5348 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5349 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 5350 break; 5351 case 2: 5352 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5353 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5354 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 5355 break; 5356 case 4: 5357 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5358 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5359 extendOpc = 0; 5360 break; 5361 } 5362 5363 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5364 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5365 MF->insert(It, loopMBB); 5366 MF->insert(It, exitMBB); 5367 5368 // Transfer the remainder of BB and its successor edges to exitMBB. 5369 exitMBB->splice(exitMBB->begin(), BB, 5370 llvm::next(MachineBasicBlock::iterator(MI)), 5371 BB->end()); 5372 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5373 5374 TargetRegisterClass *TRC = 5375 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5376 unsigned scratch = MRI.createVirtualRegister(TRC); 5377 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5378 5379 // thisMBB: 5380 // ... 5381 // fallthrough --> loopMBB 5382 BB->addSuccessor(loopMBB); 5383 5384 // loopMBB: 5385 // ldrex dest, ptr 5386 // (sign extend dest, if required) 5387 // cmp dest, incr 5388 // cmov.cond scratch2, dest, incr 5389 // strex scratch, scratch2, ptr 5390 // cmp scratch, #0 5391 // bne- loopMBB 5392 // fallthrough --> exitMBB 5393 BB = loopMBB; 5394 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5395 if (ldrOpc == ARM::t2LDREX) 5396 MIB.addImm(0); 5397 AddDefaultPred(MIB); 5398 5399 // Sign extend the value, if necessary. 5400 if (signExtend && extendOpc) { 5401 oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass); 5402 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 5403 .addReg(dest) 5404 .addImm(0)); 5405 } 5406 5407 // Build compare and cmov instructions. 5408 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5409 .addReg(oldval).addReg(incr)); 5410 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5411 .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); 5412 5413 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5414 if (strOpc == ARM::t2STREX) 5415 MIB.addImm(0); 5416 AddDefaultPred(MIB); 5417 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5418 .addReg(scratch).addImm(0)); 5419 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5420 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5421 5422 BB->addSuccessor(loopMBB); 5423 BB->addSuccessor(exitMBB); 5424 5425 // exitMBB: 5426 // ... 5427 BB = exitMBB; 5428 5429 MI->eraseFromParent(); // The instruction is gone now. 5430 5431 return BB; 5432} 5433 5434MachineBasicBlock * 5435ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 5436 unsigned Op1, unsigned Op2, 5437 bool NeedsCarry, bool IsCmpxchg) const { 5438 // This also handles ATOMIC_SWAP, indicated by Op1==0. 5439 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5440 5441 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5442 MachineFunction *MF = BB->getParent(); 5443 MachineFunction::iterator It = BB; 5444 ++It; 5445 5446 unsigned destlo = MI->getOperand(0).getReg(); 5447 unsigned desthi = MI->getOperand(1).getReg(); 5448 unsigned ptr = MI->getOperand(2).getReg(); 5449 unsigned vallo = MI->getOperand(3).getReg(); 5450 unsigned valhi = MI->getOperand(4).getReg(); 5451 DebugLoc dl = MI->getDebugLoc(); 5452 bool isThumb2 = Subtarget->isThumb2(); 5453 5454 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5455 if (isThumb2) { 5456 MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass); 5457 MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass); 5458 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5459 } 5460 5461 unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; 5462 unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD; 5463 5464 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5465 MachineBasicBlock *contBB = 0, *cont2BB = 0; 5466 if (IsCmpxchg) { 5467 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 5468 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 5469 } 5470 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5471 MF->insert(It, loopMBB); 5472 if (IsCmpxchg) { 5473 MF->insert(It, contBB); 5474 MF->insert(It, cont2BB); 5475 } 5476 MF->insert(It, exitMBB); 5477 5478 // Transfer the remainder of BB and its successor edges to exitMBB. 5479 exitMBB->splice(exitMBB->begin(), BB, 5480 llvm::next(MachineBasicBlock::iterator(MI)), 5481 BB->end()); 5482 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5483 5484 TargetRegisterClass *TRC = 5485 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5486 unsigned storesuccess = MRI.createVirtualRegister(TRC); 5487 5488 // thisMBB: 5489 // ... 5490 // fallthrough --> loopMBB 5491 BB->addSuccessor(loopMBB); 5492 5493 // loopMBB: 5494 // ldrexd r2, r3, ptr 5495 // <binopa> r0, r2, incr 5496 // <binopb> r1, r3, incr 5497 // strexd storesuccess, r0, r1, ptr 5498 // cmp storesuccess, #0 5499 // bne- loopMBB 5500 // fallthrough --> exitMBB 5501 // 5502 // Note that the registers are explicitly specified because there is not any 5503 // way to force the register allocator to allocate a register pair. 5504 // 5505 // FIXME: The hardcoded registers are not necessary for Thumb2, but we 5506 // need to properly enforce the restriction that the two output registers 5507 // for ldrexd must be different. 5508 BB = loopMBB; 5509 // Load 5510 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 5511 .addReg(ARM::R2, RegState::Define) 5512 .addReg(ARM::R3, RegState::Define).addReg(ptr)); 5513 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 5514 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2); 5515 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3); 5516 5517 if (IsCmpxchg) { 5518 // Add early exit 5519 for (unsigned i = 0; i < 2; i++) { 5520 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 5521 ARM::CMPrr)) 5522 .addReg(i == 0 ? destlo : desthi) 5523 .addReg(i == 0 ? vallo : valhi)); 5524 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5525 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5526 BB->addSuccessor(exitMBB); 5527 BB->addSuccessor(i == 0 ? contBB : cont2BB); 5528 BB = (i == 0 ? contBB : cont2BB); 5529 } 5530 5531 // Copy to physregs for strexd 5532 unsigned setlo = MI->getOperand(5).getReg(); 5533 unsigned sethi = MI->getOperand(6).getReg(); 5534 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo); 5535 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi); 5536 } else if (Op1) { 5537 // Perform binary operation 5538 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0) 5539 .addReg(destlo).addReg(vallo)) 5540 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 5541 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1) 5542 .addReg(desthi).addReg(valhi)).addReg(0); 5543 } else { 5544 // Copy to physregs for strexd 5545 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo); 5546 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi); 5547 } 5548 5549 // Store 5550 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 5551 .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr)); 5552 // Cmp+jump 5553 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5554 .addReg(storesuccess).addImm(0)); 5555 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5556 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5557 5558 BB->addSuccessor(loopMBB); 5559 BB->addSuccessor(exitMBB); 5560 5561 // exitMBB: 5562 // ... 5563 BB = exitMBB; 5564 5565 MI->eraseFromParent(); // The instruction is gone now. 5566 5567 return BB; 5568} 5569 5570/// EmitBasePointerRecalculation - For functions using a base pointer, we 5571/// rematerialize it (via the frame pointer). 5572void ARMTargetLowering:: 5573EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB, 5574 MachineBasicBlock *DispatchBB) const { 5575 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5576 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 5577 MachineFunction &MF = *MI->getParent()->getParent(); 5578 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 5579 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 5580 5581 if (!RI.hasBasePointer(MF)) return; 5582 5583 MachineBasicBlock::iterator MBBI = MI; 5584 5585 int32_t NumBytes = AFI->getFramePtrSpillOffset(); 5586 unsigned FramePtr = RI.getFrameRegister(MF); 5587 assert(MF.getTarget().getFrameLowering()->hasFP(MF) && 5588 "Base pointer without frame pointer?"); 5589 5590 if (AFI->isThumb2Function()) 5591 llvm::emitT2RegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5592 FramePtr, -NumBytes, ARMCC::AL, 0, *AII); 5593 else if (AFI->isThumbFunction()) 5594 llvm::emitThumbRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5595 FramePtr, -NumBytes, *AII, RI); 5596 else 5597 llvm::emitARMRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5598 FramePtr, -NumBytes, ARMCC::AL, 0, *AII); 5599 5600 if (!RI.needsStackRealignment(MF)) return; 5601 5602 // If there's dynamic realignment, adjust for it. 5603 MachineFrameInfo *MFI = MF.getFrameInfo(); 5604 unsigned MaxAlign = MFI->getMaxAlignment(); 5605 assert(!AFI->isThumb1OnlyFunction()); 5606 5607 // Emit bic r6, r6, MaxAlign 5608 unsigned bicOpc = AFI->isThumbFunction() ? ARM::t2BICri : ARM::BICri; 5609 AddDefaultCC( 5610 AddDefaultPred( 5611 BuildMI(*MBB, MBBI, MI->getDebugLoc(), TII->get(bicOpc), ARM::R6) 5612 .addReg(ARM::R6, RegState::Kill) 5613 .addImm(MaxAlign - 1))); 5614} 5615 5616/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 5617/// registers the function context. 5618void ARMTargetLowering:: 5619SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 5620 MachineBasicBlock *DispatchBB, int FI) const { 5621 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5622 DebugLoc dl = MI->getDebugLoc(); 5623 MachineFunction *MF = MBB->getParent(); 5624 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5625 MachineConstantPool *MCP = MF->getConstantPool(); 5626 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5627 const Function *F = MF->getFunction(); 5628 5629 bool isThumb = Subtarget->isThumb(); 5630 bool isThumb2 = Subtarget->isThumb2(); 5631 5632 unsigned PCLabelId = AFI->createPICLabelUId(); 5633 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 5634 ARMConstantPoolValue *CPV = 5635 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 5636 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 5637 5638 const TargetRegisterClass *TRC = 5639 isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5640 5641 // Grab constant pool and fixed stack memory operands. 5642 MachineMemOperand *CPMMO = 5643 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 5644 MachineMemOperand::MOLoad, 4, 4); 5645 5646 MachineMemOperand *FIMMOSt = 5647 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5648 MachineMemOperand::MOStore, 4, 4); 5649 5650 EmitBasePointerRecalculation(MI, MBB, DispatchBB); 5651 5652 // Load the address of the dispatch MBB into the jump buffer. 5653 if (isThumb2) { 5654 // Incoming value: jbuf 5655 // ldr.n r5, LCPI1_1 5656 // orr r5, r5, #1 5657 // add r5, pc 5658 // str r5, [$jbuf, #+4] ; &jbuf[1] 5659 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5660 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 5661 .addConstantPoolIndex(CPI) 5662 .addMemOperand(CPMMO)); 5663 // Set the low bit because of thumb mode. 5664 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5665 AddDefaultCC( 5666 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 5667 .addReg(NewVReg1, RegState::Kill) 5668 .addImm(0x01))); 5669 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5670 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 5671 .addReg(NewVReg2, RegState::Kill) 5672 .addImm(PCLabelId); 5673 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 5674 .addReg(NewVReg3, RegState::Kill) 5675 .addFrameIndex(FI) 5676 .addImm(36) // &jbuf[1] :: pc 5677 .addMemOperand(FIMMOSt)); 5678 } else if (isThumb) { 5679 // Incoming value: jbuf 5680 // ldr.n r1, LCPI1_4 5681 // add r1, pc 5682 // mov r2, #1 5683 // orrs r1, r2 5684 // add r2, $jbuf, #+4 ; &jbuf[1] 5685 // str r1, [r2] 5686 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5687 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 5688 .addConstantPoolIndex(CPI) 5689 .addMemOperand(CPMMO)); 5690 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5691 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 5692 .addReg(NewVReg1, RegState::Kill) 5693 .addImm(PCLabelId); 5694 // Set the low bit because of thumb mode. 5695 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5696 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 5697 .addReg(ARM::CPSR, RegState::Define) 5698 .addImm(1)); 5699 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5700 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 5701 .addReg(ARM::CPSR, RegState::Define) 5702 .addReg(NewVReg2, RegState::Kill) 5703 .addReg(NewVReg3, RegState::Kill)); 5704 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5705 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 5706 .addFrameIndex(FI) 5707 .addImm(36)); // &jbuf[1] :: pc 5708 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 5709 .addReg(NewVReg4, RegState::Kill) 5710 .addReg(NewVReg5, RegState::Kill) 5711 .addImm(0) 5712 .addMemOperand(FIMMOSt)); 5713 } else { 5714 // Incoming value: jbuf 5715 // ldr r1, LCPI1_1 5716 // add r1, pc, r1 5717 // str r1, [$jbuf, #+4] ; &jbuf[1] 5718 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5719 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 5720 .addConstantPoolIndex(CPI) 5721 .addImm(0) 5722 .addMemOperand(CPMMO)); 5723 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5724 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 5725 .addReg(NewVReg1, RegState::Kill) 5726 .addImm(PCLabelId)); 5727 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 5728 .addReg(NewVReg2, RegState::Kill) 5729 .addFrameIndex(FI) 5730 .addImm(36) // &jbuf[1] :: pc 5731 .addMemOperand(FIMMOSt)); 5732 } 5733} 5734 5735MachineBasicBlock *ARMTargetLowering:: 5736EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 5737 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5738 DebugLoc dl = MI->getDebugLoc(); 5739 MachineFunction *MF = MBB->getParent(); 5740 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5741 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5742 MachineFrameInfo *MFI = MF->getFrameInfo(); 5743 int FI = MFI->getFunctionContextIndex(); 5744 5745 const TargetRegisterClass *TRC = 5746 Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5747 5748 // Get a mapping of the call site numbers to all of the landing pads they're 5749 // associated with. 5750 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 5751 unsigned MaxCSNum = 0; 5752 MachineModuleInfo &MMI = MF->getMMI(); 5753 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; ++BB) { 5754 if (!BB->isLandingPad()) continue; 5755 5756 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 5757 // pad. 5758 for (MachineBasicBlock::iterator 5759 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 5760 if (!II->isEHLabel()) continue; 5761 5762 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 5763 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 5764 5765 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 5766 for (SmallVectorImpl<unsigned>::iterator 5767 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 5768 CSI != CSE; ++CSI) { 5769 CallSiteNumToLPad[*CSI].push_back(BB); 5770 MaxCSNum = std::max(MaxCSNum, *CSI); 5771 } 5772 break; 5773 } 5774 } 5775 5776 // Get an ordered list of the machine basic blocks for the jump table. 5777 std::vector<MachineBasicBlock*> LPadList; 5778 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 5779 LPadList.reserve(CallSiteNumToLPad.size()); 5780 for (unsigned I = 1; I <= MaxCSNum; ++I) { 5781 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 5782 for (SmallVectorImpl<MachineBasicBlock*>::iterator 5783 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 5784 LPadList.push_back(*II); 5785 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 5786 } 5787 } 5788 5789 assert(!LPadList.empty() && 5790 "No landing pad destinations for the dispatch jump table!"); 5791 5792 // Create the jump table and associated information. 5793 MachineJumpTableInfo *JTI = 5794 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 5795 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 5796 unsigned UId = AFI->createJumpTableUId(); 5797 5798 // Create the MBBs for the dispatch code. 5799 5800 // Shove the dispatch's address into the return slot in the function context. 5801 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 5802 DispatchBB->setIsLandingPad(); 5803 5804 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 5805 BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); 5806 DispatchBB->addSuccessor(TrapBB); 5807 5808 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 5809 DispatchBB->addSuccessor(DispContBB); 5810 5811 // Insert and MBBs. 5812 MF->insert(MF->end(), DispatchBB); 5813 MF->insert(MF->end(), DispContBB); 5814 MF->insert(MF->end(), TrapBB); 5815 5816 // Insert code into the entry block that creates and registers the function 5817 // context. 5818 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 5819 5820 MachineMemOperand *FIMMOLd = 5821 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5822 MachineMemOperand::MOLoad | 5823 MachineMemOperand::MOVolatile, 4, 4); 5824 5825 unsigned NumLPads = LPadList.size(); 5826 if (Subtarget->isThumb2()) { 5827 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5828 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 5829 .addFrameIndex(FI) 5830 .addImm(4) 5831 .addMemOperand(FIMMOLd)); 5832 5833 if (NumLPads < 256) { 5834 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 5835 .addReg(NewVReg1) 5836 .addImm(LPadList.size())); 5837 } else { 5838 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5839 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 5840 .addImm(NumLPads & 0xFFFF)); 5841 5842 unsigned VReg2 = VReg1; 5843 if ((NumLPads & 0xFFFF0000) != 0) { 5844 VReg2 = MRI->createVirtualRegister(TRC); 5845 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 5846 .addReg(VReg1) 5847 .addImm(NumLPads >> 16)); 5848 } 5849 5850 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 5851 .addReg(NewVReg1) 5852 .addReg(VReg2)); 5853 } 5854 5855 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 5856 .addMBB(TrapBB) 5857 .addImm(ARMCC::HI) 5858 .addReg(ARM::CPSR); 5859 5860 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5861 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 5862 .addJumpTableIndex(MJTI) 5863 .addImm(UId)); 5864 5865 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5866 AddDefaultCC( 5867 AddDefaultPred( 5868 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 5869 .addReg(NewVReg3, RegState::Kill) 5870 .addReg(NewVReg1) 5871 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 5872 5873 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 5874 .addReg(NewVReg4, RegState::Kill) 5875 .addReg(NewVReg1) 5876 .addJumpTableIndex(MJTI) 5877 .addImm(UId); 5878 } else if (Subtarget->isThumb()) { 5879 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5880 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 5881 .addFrameIndex(FI) 5882 .addImm(1) 5883 .addMemOperand(FIMMOLd)); 5884 5885 if (NumLPads < 256) { 5886 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 5887 .addReg(NewVReg1) 5888 .addImm(NumLPads)); 5889 } else { 5890 MachineConstantPool *ConstantPool = MF->getConstantPool(); 5891 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 5892 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 5893 5894 // MachineConstantPool wants an explicit alignment. 5895 unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); 5896 if (Align == 0) 5897 Align = getTargetData()->getTypeAllocSize(C->getType()); 5898 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 5899 5900 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5901 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 5902 .addReg(VReg1, RegState::Define) 5903 .addConstantPoolIndex(Idx)); 5904 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 5905 .addReg(NewVReg1) 5906 .addReg(VReg1)); 5907 } 5908 5909 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 5910 .addMBB(TrapBB) 5911 .addImm(ARMCC::HI) 5912 .addReg(ARM::CPSR); 5913 5914 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5915 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 5916 .addReg(ARM::CPSR, RegState::Define) 5917 .addReg(NewVReg1) 5918 .addImm(2)); 5919 5920 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5921 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 5922 .addJumpTableIndex(MJTI) 5923 .addImm(UId)); 5924 5925 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5926 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 5927 .addReg(ARM::CPSR, RegState::Define) 5928 .addReg(NewVReg2, RegState::Kill) 5929 .addReg(NewVReg3)); 5930 5931 MachineMemOperand *JTMMOLd = 5932 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 5933 MachineMemOperand::MOLoad, 4, 4); 5934 5935 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5936 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 5937 .addReg(NewVReg4, RegState::Kill) 5938 .addImm(0) 5939 .addMemOperand(JTMMOLd)); 5940 5941 unsigned NewVReg6 = MRI->createVirtualRegister(TRC); 5942 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 5943 .addReg(ARM::CPSR, RegState::Define) 5944 .addReg(NewVReg5, RegState::Kill) 5945 .addReg(NewVReg3)); 5946 5947 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 5948 .addReg(NewVReg6, RegState::Kill) 5949 .addJumpTableIndex(MJTI) 5950 .addImm(UId); 5951 } else { 5952 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5953 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 5954 .addFrameIndex(FI) 5955 .addImm(4) 5956 .addMemOperand(FIMMOLd)); 5957 5958 if (NumLPads < 256) { 5959 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 5960 .addReg(NewVReg1) 5961 .addImm(NumLPads)); 5962 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 5963 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5964 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 5965 .addImm(NumLPads & 0xFFFF)); 5966 5967 unsigned VReg2 = VReg1; 5968 if ((NumLPads & 0xFFFF0000) != 0) { 5969 VReg2 = MRI->createVirtualRegister(TRC); 5970 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 5971 .addReg(VReg1) 5972 .addImm(NumLPads >> 16)); 5973 } 5974 5975 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 5976 .addReg(NewVReg1) 5977 .addReg(VReg2)); 5978 } else { 5979 MachineConstantPool *ConstantPool = MF->getConstantPool(); 5980 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 5981 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 5982 5983 // MachineConstantPool wants an explicit alignment. 5984 unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); 5985 if (Align == 0) 5986 Align = getTargetData()->getTypeAllocSize(C->getType()); 5987 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 5988 5989 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5990 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 5991 .addReg(VReg1, RegState::Define) 5992 .addConstantPoolIndex(Idx) 5993 .addImm(0)); 5994 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 5995 .addReg(NewVReg1) 5996 .addReg(VReg1, RegState::Kill)); 5997 } 5998 5999 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 6000 .addMBB(TrapBB) 6001 .addImm(ARMCC::HI) 6002 .addReg(ARM::CPSR); 6003 6004 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6005 AddDefaultCC( 6006 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 6007 .addReg(NewVReg1) 6008 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6009 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6010 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 6011 .addJumpTableIndex(MJTI) 6012 .addImm(UId)); 6013 6014 MachineMemOperand *JTMMOLd = 6015 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6016 MachineMemOperand::MOLoad, 4, 4); 6017 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6018 AddDefaultPred( 6019 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 6020 .addReg(NewVReg3, RegState::Kill) 6021 .addReg(NewVReg4) 6022 .addImm(0) 6023 .addMemOperand(JTMMOLd)); 6024 6025 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 6026 .addReg(NewVReg5, RegState::Kill) 6027 .addReg(NewVReg4) 6028 .addJumpTableIndex(MJTI) 6029 .addImm(UId); 6030 } 6031 6032 // Add the jump table entries as successors to the MBB. 6033 MachineBasicBlock *PrevMBB = 0; 6034 for (std::vector<MachineBasicBlock*>::iterator 6035 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 6036 MachineBasicBlock *CurMBB = *I; 6037 if (PrevMBB != CurMBB) 6038 DispContBB->addSuccessor(CurMBB); 6039 PrevMBB = CurMBB; 6040 } 6041 6042 // N.B. the order the invoke BBs are processed in doesn't matter here. 6043 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 6044 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 6045 const unsigned *SavedRegs = RI.getCalleeSavedRegs(MF); 6046 SmallVector<MachineBasicBlock*, 64> MBBLPads; 6047 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 6048 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 6049 MachineBasicBlock *BB = *I; 6050 6051 // Remove the landing pad successor from the invoke block and replace it 6052 // with the new dispatch block. 6053 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 6054 BB->succ_end()); 6055 while (!Successors.empty()) { 6056 MachineBasicBlock *SMBB = Successors.pop_back_val(); 6057 if (SMBB->isLandingPad()) { 6058 BB->removeSuccessor(SMBB); 6059 MBBLPads.push_back(SMBB); 6060 } 6061 } 6062 6063 BB->addSuccessor(DispatchBB); 6064 6065 // Find the invoke call and mark all of the callee-saved registers as 6066 // 'implicit defined' so that they're spilled. This prevents code from 6067 // moving instructions to before the EH block, where they will never be 6068 // executed. 6069 for (MachineBasicBlock::reverse_iterator 6070 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 6071 if (!II->getDesc().isCall()) continue; 6072 6073 DenseMap<unsigned, bool> DefRegs; 6074 for (MachineInstr::mop_iterator 6075 OI = II->operands_begin(), OE = II->operands_end(); 6076 OI != OE; ++OI) { 6077 if (!OI->isReg()) continue; 6078 DefRegs[OI->getReg()] = true; 6079 } 6080 6081 MachineInstrBuilder MIB(&*II); 6082 6083 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 6084 unsigned Reg = SavedRegs[i]; 6085 if (Subtarget->isThumb2() && 6086 !ARM::tGPRRegisterClass->contains(Reg) && 6087 !ARM::hGPRRegisterClass->contains(Reg)) 6088 continue; 6089 else if (Subtarget->isThumb1Only() && 6090 !ARM::tGPRRegisterClass->contains(Reg)) 6091 continue; 6092 else if (!Subtarget->isThumb() && 6093 !ARM::GPRRegisterClass->contains(Reg)) 6094 continue; 6095 if (!DefRegs[Reg]) 6096 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 6097 } 6098 6099 break; 6100 } 6101 } 6102 6103 // Mark all former landing pads as non-landing pads. The dispatch is the only 6104 // landing pad now. 6105 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6106 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 6107 (*I)->setIsLandingPad(false); 6108 6109 // The instruction is gone now. 6110 MI->eraseFromParent(); 6111 6112 return MBB; 6113} 6114 6115static 6116MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 6117 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 6118 E = MBB->succ_end(); I != E; ++I) 6119 if (*I != Succ) 6120 return *I; 6121 llvm_unreachable("Expecting a BB with two successors!"); 6122} 6123 6124MachineBasicBlock * 6125ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6126 MachineBasicBlock *BB) const { 6127 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6128 DebugLoc dl = MI->getDebugLoc(); 6129 bool isThumb2 = Subtarget->isThumb2(); 6130 switch (MI->getOpcode()) { 6131 default: { 6132 MI->dump(); 6133 llvm_unreachable("Unexpected instr type to insert"); 6134 } 6135 // The Thumb2 pre-indexed stores have the same MI operands, they just 6136 // define them differently in the .td files from the isel patterns, so 6137 // they need pseudos. 6138 case ARM::t2STR_preidx: 6139 MI->setDesc(TII->get(ARM::t2STR_PRE)); 6140 return BB; 6141 case ARM::t2STRB_preidx: 6142 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 6143 return BB; 6144 case ARM::t2STRH_preidx: 6145 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 6146 return BB; 6147 6148 case ARM::STRi_preidx: 6149 case ARM::STRBi_preidx: { 6150 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 6151 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 6152 // Decode the offset. 6153 unsigned Offset = MI->getOperand(4).getImm(); 6154 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 6155 Offset = ARM_AM::getAM2Offset(Offset); 6156 if (isSub) 6157 Offset = -Offset; 6158 6159 MachineMemOperand *MMO = *MI->memoperands_begin(); 6160 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 6161 .addOperand(MI->getOperand(0)) // Rn_wb 6162 .addOperand(MI->getOperand(1)) // Rt 6163 .addOperand(MI->getOperand(2)) // Rn 6164 .addImm(Offset) // offset (skip GPR==zero_reg) 6165 .addOperand(MI->getOperand(5)) // pred 6166 .addOperand(MI->getOperand(6)) 6167 .addMemOperand(MMO); 6168 MI->eraseFromParent(); 6169 return BB; 6170 } 6171 case ARM::STRr_preidx: 6172 case ARM::STRBr_preidx: 6173 case ARM::STRH_preidx: { 6174 unsigned NewOpc; 6175 switch (MI->getOpcode()) { 6176 default: llvm_unreachable("unexpected opcode!"); 6177 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 6178 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 6179 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 6180 } 6181 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 6182 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 6183 MIB.addOperand(MI->getOperand(i)); 6184 MI->eraseFromParent(); 6185 return BB; 6186 } 6187 case ARM::ATOMIC_LOAD_ADD_I8: 6188 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6189 case ARM::ATOMIC_LOAD_ADD_I16: 6190 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6191 case ARM::ATOMIC_LOAD_ADD_I32: 6192 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6193 6194 case ARM::ATOMIC_LOAD_AND_I8: 6195 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6196 case ARM::ATOMIC_LOAD_AND_I16: 6197 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6198 case ARM::ATOMIC_LOAD_AND_I32: 6199 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6200 6201 case ARM::ATOMIC_LOAD_OR_I8: 6202 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6203 case ARM::ATOMIC_LOAD_OR_I16: 6204 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6205 case ARM::ATOMIC_LOAD_OR_I32: 6206 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6207 6208 case ARM::ATOMIC_LOAD_XOR_I8: 6209 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6210 case ARM::ATOMIC_LOAD_XOR_I16: 6211 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6212 case ARM::ATOMIC_LOAD_XOR_I32: 6213 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6214 6215 case ARM::ATOMIC_LOAD_NAND_I8: 6216 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6217 case ARM::ATOMIC_LOAD_NAND_I16: 6218 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6219 case ARM::ATOMIC_LOAD_NAND_I32: 6220 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6221 6222 case ARM::ATOMIC_LOAD_SUB_I8: 6223 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6224 case ARM::ATOMIC_LOAD_SUB_I16: 6225 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6226 case ARM::ATOMIC_LOAD_SUB_I32: 6227 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6228 6229 case ARM::ATOMIC_LOAD_MIN_I8: 6230 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 6231 case ARM::ATOMIC_LOAD_MIN_I16: 6232 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 6233 case ARM::ATOMIC_LOAD_MIN_I32: 6234 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 6235 6236 case ARM::ATOMIC_LOAD_MAX_I8: 6237 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 6238 case ARM::ATOMIC_LOAD_MAX_I16: 6239 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 6240 case ARM::ATOMIC_LOAD_MAX_I32: 6241 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 6242 6243 case ARM::ATOMIC_LOAD_UMIN_I8: 6244 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 6245 case ARM::ATOMIC_LOAD_UMIN_I16: 6246 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 6247 case ARM::ATOMIC_LOAD_UMIN_I32: 6248 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 6249 6250 case ARM::ATOMIC_LOAD_UMAX_I8: 6251 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 6252 case ARM::ATOMIC_LOAD_UMAX_I16: 6253 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 6254 case ARM::ATOMIC_LOAD_UMAX_I32: 6255 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 6256 6257 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 6258 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 6259 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 6260 6261 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 6262 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 6263 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 6264 6265 6266 case ARM::ATOMADD6432: 6267 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 6268 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 6269 /*NeedsCarry*/ true); 6270 case ARM::ATOMSUB6432: 6271 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6272 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6273 /*NeedsCarry*/ true); 6274 case ARM::ATOMOR6432: 6275 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 6276 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6277 case ARM::ATOMXOR6432: 6278 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 6279 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6280 case ARM::ATOMAND6432: 6281 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 6282 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6283 case ARM::ATOMSWAP6432: 6284 return EmitAtomicBinary64(MI, BB, 0, 0, false); 6285 case ARM::ATOMCMPXCHG6432: 6286 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6287 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6288 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 6289 6290 case ARM::tMOVCCr_pseudo: { 6291 // To "insert" a SELECT_CC instruction, we actually have to insert the 6292 // diamond control-flow pattern. The incoming instruction knows the 6293 // destination vreg to set, the condition code register to branch on, the 6294 // true/false values to select between, and a branch opcode to use. 6295 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6296 MachineFunction::iterator It = BB; 6297 ++It; 6298 6299 // thisMBB: 6300 // ... 6301 // TrueVal = ... 6302 // cmpTY ccX, r1, r2 6303 // bCC copy1MBB 6304 // fallthrough --> copy0MBB 6305 MachineBasicBlock *thisMBB = BB; 6306 MachineFunction *F = BB->getParent(); 6307 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6308 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6309 F->insert(It, copy0MBB); 6310 F->insert(It, sinkMBB); 6311 6312 // Transfer the remainder of BB and its successor edges to sinkMBB. 6313 sinkMBB->splice(sinkMBB->begin(), BB, 6314 llvm::next(MachineBasicBlock::iterator(MI)), 6315 BB->end()); 6316 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6317 6318 BB->addSuccessor(copy0MBB); 6319 BB->addSuccessor(sinkMBB); 6320 6321 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 6322 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 6323 6324 // copy0MBB: 6325 // %FalseValue = ... 6326 // # fallthrough to sinkMBB 6327 BB = copy0MBB; 6328 6329 // Update machine-CFG edges 6330 BB->addSuccessor(sinkMBB); 6331 6332 // sinkMBB: 6333 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6334 // ... 6335 BB = sinkMBB; 6336 BuildMI(*BB, BB->begin(), dl, 6337 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 6338 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 6339 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6340 6341 MI->eraseFromParent(); // The pseudo instruction is gone now. 6342 return BB; 6343 } 6344 6345 case ARM::BCCi64: 6346 case ARM::BCCZi64: { 6347 // If there is an unconditional branch to the other successor, remove it. 6348 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 6349 6350 // Compare both parts that make up the double comparison separately for 6351 // equality. 6352 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 6353 6354 unsigned LHS1 = MI->getOperand(1).getReg(); 6355 unsigned LHS2 = MI->getOperand(2).getReg(); 6356 if (RHSisZero) { 6357 AddDefaultPred(BuildMI(BB, dl, 6358 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6359 .addReg(LHS1).addImm(0)); 6360 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6361 .addReg(LHS2).addImm(0) 6362 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6363 } else { 6364 unsigned RHS1 = MI->getOperand(3).getReg(); 6365 unsigned RHS2 = MI->getOperand(4).getReg(); 6366 AddDefaultPred(BuildMI(BB, dl, 6367 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6368 .addReg(LHS1).addReg(RHS1)); 6369 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6370 .addReg(LHS2).addReg(RHS2) 6371 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6372 } 6373 6374 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 6375 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 6376 if (MI->getOperand(0).getImm() == ARMCC::NE) 6377 std::swap(destMBB, exitMBB); 6378 6379 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6380 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 6381 if (isThumb2) 6382 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 6383 else 6384 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 6385 6386 MI->eraseFromParent(); // The pseudo instruction is gone now. 6387 return BB; 6388 } 6389 6390 case ARM::Int_eh_sjlj_setjmp: 6391 case ARM::Int_eh_sjlj_setjmp_nofp: 6392 case ARM::tInt_eh_sjlj_setjmp: 6393 case ARM::t2Int_eh_sjlj_setjmp: 6394 case ARM::t2Int_eh_sjlj_setjmp_nofp: 6395 EmitSjLjDispatchBlock(MI, BB); 6396 return BB; 6397 6398 case ARM::ABS: 6399 case ARM::t2ABS: { 6400 // To insert an ABS instruction, we have to insert the 6401 // diamond control-flow pattern. The incoming instruction knows the 6402 // source vreg to test against 0, the destination vreg to set, 6403 // the condition code register to branch on, the 6404 // true/false values to select between, and a branch opcode to use. 6405 // It transforms 6406 // V1 = ABS V0 6407 // into 6408 // V2 = MOVS V0 6409 // BCC (branch to SinkBB if V0 >= 0) 6410 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 6411 // SinkBB: V1 = PHI(V2, V3) 6412 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6413 MachineFunction::iterator BBI = BB; 6414 ++BBI; 6415 MachineFunction *Fn = BB->getParent(); 6416 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6417 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6418 Fn->insert(BBI, RSBBB); 6419 Fn->insert(BBI, SinkBB); 6420 6421 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 6422 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 6423 bool isThumb2 = Subtarget->isThumb2(); 6424 MachineRegisterInfo &MRI = Fn->getRegInfo(); 6425 // In Thumb mode S must not be specified if source register is the SP or 6426 // PC and if destination register is the SP, so restrict register class 6427 unsigned NewMovDstReg = MRI.createVirtualRegister( 6428 isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); 6429 unsigned NewRsbDstReg = MRI.createVirtualRegister( 6430 isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); 6431 6432 // Transfer the remainder of BB and its successor edges to sinkMBB. 6433 SinkBB->splice(SinkBB->begin(), BB, 6434 llvm::next(MachineBasicBlock::iterator(MI)), 6435 BB->end()); 6436 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 6437 6438 BB->addSuccessor(RSBBB); 6439 BB->addSuccessor(SinkBB); 6440 6441 // fall through to SinkMBB 6442 RSBBB->addSuccessor(SinkBB); 6443 6444 // insert a movs at the end of BB 6445 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVr : ARM::MOVr), 6446 NewMovDstReg) 6447 .addReg(ABSSrcReg, RegState::Kill) 6448 .addImm((unsigned)ARMCC::AL).addReg(0) 6449 .addReg(ARM::CPSR, RegState::Define); 6450 6451 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 6452 BuildMI(BB, dl, 6453 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 6454 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 6455 6456 // insert rsbri in RSBBB 6457 // Note: BCC and rsbri will be converted into predicated rsbmi 6458 // by if-conversion pass 6459 BuildMI(*RSBBB, RSBBB->begin(), dl, 6460 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 6461 .addReg(NewMovDstReg, RegState::Kill) 6462 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 6463 6464 // insert PHI in SinkBB, 6465 // reuse ABSDstReg to not change uses of ABS instruction 6466 BuildMI(*SinkBB, SinkBB->begin(), dl, 6467 TII->get(ARM::PHI), ABSDstReg) 6468 .addReg(NewRsbDstReg).addMBB(RSBBB) 6469 .addReg(NewMovDstReg).addMBB(BB); 6470 6471 // remove ABS instruction 6472 MI->eraseFromParent(); 6473 6474 // return last added BB 6475 return SinkBB; 6476 } 6477 } 6478} 6479 6480void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 6481 SDNode *Node) const { 6482 const MCInstrDesc *MCID = &MI->getDesc(); 6483 if (!MCID->hasPostISelHook()) { 6484 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 6485 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 6486 return; 6487 } 6488 6489 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 6490 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 6491 // operand is still set to noreg. If needed, set the optional operand's 6492 // register to CPSR, and remove the redundant implicit def. 6493 // 6494 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 6495 6496 // Rename pseudo opcodes. 6497 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 6498 if (NewOpc) { 6499 const ARMBaseInstrInfo *TII = 6500 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 6501 MCID = &TII->get(NewOpc); 6502 6503 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 6504 "converted opcode should be the same except for cc_out"); 6505 6506 MI->setDesc(*MCID); 6507 6508 // Add the optional cc_out operand 6509 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 6510 } 6511 unsigned ccOutIdx = MCID->getNumOperands() - 1; 6512 6513 // Any ARM instruction that sets the 's' bit should specify an optional 6514 // "cc_out" operand in the last operand position. 6515 if (!MCID->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 6516 assert(!NewOpc && "Optional cc_out operand required"); 6517 return; 6518 } 6519 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 6520 // since we already have an optional CPSR def. 6521 bool definesCPSR = false; 6522 bool deadCPSR = false; 6523 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 6524 i != e; ++i) { 6525 const MachineOperand &MO = MI->getOperand(i); 6526 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 6527 definesCPSR = true; 6528 if (MO.isDead()) 6529 deadCPSR = true; 6530 MI->RemoveOperand(i); 6531 break; 6532 } 6533 } 6534 if (!definesCPSR) { 6535 assert(!NewOpc && "Optional cc_out operand required"); 6536 return; 6537 } 6538 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 6539 if (deadCPSR) { 6540 assert(!MI->getOperand(ccOutIdx).getReg() && 6541 "expect uninitialized optional cc_out operand"); 6542 return; 6543 } 6544 6545 // If this instruction was defined with an optional CPSR def and its dag node 6546 // had a live implicit CPSR def, then activate the optional CPSR def. 6547 MachineOperand &MO = MI->getOperand(ccOutIdx); 6548 MO.setReg(ARM::CPSR); 6549 MO.setIsDef(true); 6550} 6551 6552//===----------------------------------------------------------------------===// 6553// ARM Optimization Hooks 6554//===----------------------------------------------------------------------===// 6555 6556static 6557SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 6558 TargetLowering::DAGCombinerInfo &DCI) { 6559 SelectionDAG &DAG = DCI.DAG; 6560 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6561 EVT VT = N->getValueType(0); 6562 unsigned Opc = N->getOpcode(); 6563 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 6564 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 6565 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 6566 ISD::CondCode CC = ISD::SETCC_INVALID; 6567 6568 if (isSlctCC) { 6569 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 6570 } else { 6571 SDValue CCOp = Slct.getOperand(0); 6572 if (CCOp.getOpcode() == ISD::SETCC) 6573 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 6574 } 6575 6576 bool DoXform = false; 6577 bool InvCC = false; 6578 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 6579 "Bad input!"); 6580 6581 if (LHS.getOpcode() == ISD::Constant && 6582 cast<ConstantSDNode>(LHS)->isNullValue()) { 6583 DoXform = true; 6584 } else if (CC != ISD::SETCC_INVALID && 6585 RHS.getOpcode() == ISD::Constant && 6586 cast<ConstantSDNode>(RHS)->isNullValue()) { 6587 std::swap(LHS, RHS); 6588 SDValue Op0 = Slct.getOperand(0); 6589 EVT OpVT = isSlctCC ? Op0.getValueType() : 6590 Op0.getOperand(0).getValueType(); 6591 bool isInt = OpVT.isInteger(); 6592 CC = ISD::getSetCCInverse(CC, isInt); 6593 6594 if (!TLI.isCondCodeLegal(CC, OpVT)) 6595 return SDValue(); // Inverse operator isn't legal. 6596 6597 DoXform = true; 6598 InvCC = true; 6599 } 6600 6601 if (DoXform) { 6602 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 6603 if (isSlctCC) 6604 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 6605 Slct.getOperand(0), Slct.getOperand(1), CC); 6606 SDValue CCOp = Slct.getOperand(0); 6607 if (InvCC) 6608 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 6609 CCOp.getOperand(0), CCOp.getOperand(1), CC); 6610 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 6611 CCOp, OtherOp, Result); 6612 } 6613 return SDValue(); 6614} 6615 6616// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 6617// (only after legalization). 6618static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 6619 TargetLowering::DAGCombinerInfo &DCI, 6620 const ARMSubtarget *Subtarget) { 6621 6622 // Only perform optimization if after legalize, and if NEON is available. We 6623 // also expected both operands to be BUILD_VECTORs. 6624 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 6625 || N0.getOpcode() != ISD::BUILD_VECTOR 6626 || N1.getOpcode() != ISD::BUILD_VECTOR) 6627 return SDValue(); 6628 6629 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 6630 EVT VT = N->getValueType(0); 6631 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 6632 return SDValue(); 6633 6634 // Check that the vector operands are of the right form. 6635 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 6636 // operands, where N is the size of the formed vector. 6637 // Each EXTRACT_VECTOR should have the same input vector and odd or even 6638 // index such that we have a pair wise add pattern. 6639 6640 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 6641 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6642 return SDValue(); 6643 SDValue Vec = N0->getOperand(0)->getOperand(0); 6644 SDNode *V = Vec.getNode(); 6645 unsigned nextIndex = 0; 6646 6647 // For each operands to the ADD which are BUILD_VECTORs, 6648 // check to see if each of their operands are an EXTRACT_VECTOR with 6649 // the same vector and appropriate index. 6650 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 6651 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 6652 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 6653 6654 SDValue ExtVec0 = N0->getOperand(i); 6655 SDValue ExtVec1 = N1->getOperand(i); 6656 6657 // First operand is the vector, verify its the same. 6658 if (V != ExtVec0->getOperand(0).getNode() || 6659 V != ExtVec1->getOperand(0).getNode()) 6660 return SDValue(); 6661 6662 // Second is the constant, verify its correct. 6663 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 6664 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 6665 6666 // For the constant, we want to see all the even or all the odd. 6667 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 6668 || C1->getZExtValue() != nextIndex+1) 6669 return SDValue(); 6670 6671 // Increment index. 6672 nextIndex+=2; 6673 } else 6674 return SDValue(); 6675 } 6676 6677 // Create VPADDL node. 6678 SelectionDAG &DAG = DCI.DAG; 6679 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6680 6681 // Build operand list. 6682 SmallVector<SDValue, 8> Ops; 6683 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 6684 TLI.getPointerTy())); 6685 6686 // Input is the vector. 6687 Ops.push_back(Vec); 6688 6689 // Get widened type and narrowed type. 6690 MVT widenType; 6691 unsigned numElem = VT.getVectorNumElements(); 6692 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 6693 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 6694 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 6695 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 6696 default: 6697 assert(0 && "Invalid vector element type for padd optimization."); 6698 } 6699 6700 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 6701 widenType, &Ops[0], Ops.size()); 6702 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 6703} 6704 6705/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 6706/// operands N0 and N1. This is a helper for PerformADDCombine that is 6707/// called with the default operands, and if that fails, with commuted 6708/// operands. 6709static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 6710 TargetLowering::DAGCombinerInfo &DCI, 6711 const ARMSubtarget *Subtarget){ 6712 6713 // Attempt to create vpaddl for this add. 6714 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 6715 if (Result.getNode()) 6716 return Result; 6717 6718 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 6719 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 6720 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 6721 if (Result.getNode()) return Result; 6722 } 6723 return SDValue(); 6724} 6725 6726/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 6727/// 6728static SDValue PerformADDCombine(SDNode *N, 6729 TargetLowering::DAGCombinerInfo &DCI, 6730 const ARMSubtarget *Subtarget) { 6731 SDValue N0 = N->getOperand(0); 6732 SDValue N1 = N->getOperand(1); 6733 6734 // First try with the default operand order. 6735 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 6736 if (Result.getNode()) 6737 return Result; 6738 6739 // If that didn't work, try again with the operands commuted. 6740 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 6741} 6742 6743/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 6744/// 6745static SDValue PerformSUBCombine(SDNode *N, 6746 TargetLowering::DAGCombinerInfo &DCI) { 6747 SDValue N0 = N->getOperand(0); 6748 SDValue N1 = N->getOperand(1); 6749 6750 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 6751 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 6752 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 6753 if (Result.getNode()) return Result; 6754 } 6755 6756 return SDValue(); 6757} 6758 6759/// PerformVMULCombine 6760/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 6761/// special multiplier accumulator forwarding. 6762/// vmul d3, d0, d2 6763/// vmla d3, d1, d2 6764/// is faster than 6765/// vadd d3, d0, d1 6766/// vmul d3, d3, d2 6767static SDValue PerformVMULCombine(SDNode *N, 6768 TargetLowering::DAGCombinerInfo &DCI, 6769 const ARMSubtarget *Subtarget) { 6770 if (!Subtarget->hasVMLxForwarding()) 6771 return SDValue(); 6772 6773 SelectionDAG &DAG = DCI.DAG; 6774 SDValue N0 = N->getOperand(0); 6775 SDValue N1 = N->getOperand(1); 6776 unsigned Opcode = N0.getOpcode(); 6777 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6778 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 6779 Opcode = N1.getOpcode(); 6780 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6781 Opcode != ISD::FADD && Opcode != ISD::FSUB) 6782 return SDValue(); 6783 std::swap(N0, N1); 6784 } 6785 6786 EVT VT = N->getValueType(0); 6787 DebugLoc DL = N->getDebugLoc(); 6788 SDValue N00 = N0->getOperand(0); 6789 SDValue N01 = N0->getOperand(1); 6790 return DAG.getNode(Opcode, DL, VT, 6791 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 6792 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 6793} 6794 6795static SDValue PerformMULCombine(SDNode *N, 6796 TargetLowering::DAGCombinerInfo &DCI, 6797 const ARMSubtarget *Subtarget) { 6798 SelectionDAG &DAG = DCI.DAG; 6799 6800 if (Subtarget->isThumb1Only()) 6801 return SDValue(); 6802 6803 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6804 return SDValue(); 6805 6806 EVT VT = N->getValueType(0); 6807 if (VT.is64BitVector() || VT.is128BitVector()) 6808 return PerformVMULCombine(N, DCI, Subtarget); 6809 if (VT != MVT::i32) 6810 return SDValue(); 6811 6812 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6813 if (!C) 6814 return SDValue(); 6815 6816 uint64_t MulAmt = C->getZExtValue(); 6817 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 6818 ShiftAmt = ShiftAmt & (32 - 1); 6819 SDValue V = N->getOperand(0); 6820 DebugLoc DL = N->getDebugLoc(); 6821 6822 SDValue Res; 6823 MulAmt >>= ShiftAmt; 6824 if (isPowerOf2_32(MulAmt - 1)) { 6825 // (mul x, 2^N + 1) => (add (shl x, N), x) 6826 Res = DAG.getNode(ISD::ADD, DL, VT, 6827 V, DAG.getNode(ISD::SHL, DL, VT, 6828 V, DAG.getConstant(Log2_32(MulAmt-1), 6829 MVT::i32))); 6830 } else if (isPowerOf2_32(MulAmt + 1)) { 6831 // (mul x, 2^N - 1) => (sub (shl x, N), x) 6832 Res = DAG.getNode(ISD::SUB, DL, VT, 6833 DAG.getNode(ISD::SHL, DL, VT, 6834 V, DAG.getConstant(Log2_32(MulAmt+1), 6835 MVT::i32)), 6836 V); 6837 } else 6838 return SDValue(); 6839 6840 if (ShiftAmt != 0) 6841 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 6842 DAG.getConstant(ShiftAmt, MVT::i32)); 6843 6844 // Do not add new nodes to DAG combiner worklist. 6845 DCI.CombineTo(N, Res, false); 6846 return SDValue(); 6847} 6848 6849static SDValue PerformANDCombine(SDNode *N, 6850 TargetLowering::DAGCombinerInfo &DCI) { 6851 6852 // Attempt to use immediate-form VBIC 6853 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6854 DebugLoc dl = N->getDebugLoc(); 6855 EVT VT = N->getValueType(0); 6856 SelectionDAG &DAG = DCI.DAG; 6857 6858 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6859 return SDValue(); 6860 6861 APInt SplatBits, SplatUndef; 6862 unsigned SplatBitSize; 6863 bool HasAnyUndefs; 6864 if (BVN && 6865 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6866 if (SplatBitSize <= 64) { 6867 EVT VbicVT; 6868 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 6869 SplatUndef.getZExtValue(), SplatBitSize, 6870 DAG, VbicVT, VT.is128BitVector(), 6871 OtherModImm); 6872 if (Val.getNode()) { 6873 SDValue Input = 6874 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 6875 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 6876 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 6877 } 6878 } 6879 } 6880 6881 return SDValue(); 6882} 6883 6884/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 6885static SDValue PerformORCombine(SDNode *N, 6886 TargetLowering::DAGCombinerInfo &DCI, 6887 const ARMSubtarget *Subtarget) { 6888 // Attempt to use immediate-form VORR 6889 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6890 DebugLoc dl = N->getDebugLoc(); 6891 EVT VT = N->getValueType(0); 6892 SelectionDAG &DAG = DCI.DAG; 6893 6894 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6895 return SDValue(); 6896 6897 APInt SplatBits, SplatUndef; 6898 unsigned SplatBitSize; 6899 bool HasAnyUndefs; 6900 if (BVN && Subtarget->hasNEON() && 6901 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6902 if (SplatBitSize <= 64) { 6903 EVT VorrVT; 6904 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 6905 SplatUndef.getZExtValue(), SplatBitSize, 6906 DAG, VorrVT, VT.is128BitVector(), 6907 OtherModImm); 6908 if (Val.getNode()) { 6909 SDValue Input = 6910 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 6911 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 6912 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 6913 } 6914 } 6915 } 6916 6917 SDValue N0 = N->getOperand(0); 6918 if (N0.getOpcode() != ISD::AND) 6919 return SDValue(); 6920 SDValue N1 = N->getOperand(1); 6921 6922 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 6923 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 6924 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 6925 APInt SplatUndef; 6926 unsigned SplatBitSize; 6927 bool HasAnyUndefs; 6928 6929 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 6930 APInt SplatBits0; 6931 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 6932 HasAnyUndefs) && !HasAnyUndefs) { 6933 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 6934 APInt SplatBits1; 6935 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 6936 HasAnyUndefs) && !HasAnyUndefs && 6937 SplatBits0 == ~SplatBits1) { 6938 // Canonicalize the vector type to make instruction selection simpler. 6939 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 6940 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 6941 N0->getOperand(1), N0->getOperand(0), 6942 N1->getOperand(0)); 6943 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 6944 } 6945 } 6946 } 6947 6948 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 6949 // reasonable. 6950 6951 // BFI is only available on V6T2+ 6952 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 6953 return SDValue(); 6954 6955 DebugLoc DL = N->getDebugLoc(); 6956 // 1) or (and A, mask), val => ARMbfi A, val, mask 6957 // iff (val & mask) == val 6958 // 6959 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6960 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 6961 // && mask == ~mask2 6962 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 6963 // && ~mask == mask2 6964 // (i.e., copy a bitfield value into another bitfield of the same width) 6965 6966 if (VT != MVT::i32) 6967 return SDValue(); 6968 6969 SDValue N00 = N0.getOperand(0); 6970 6971 // The value and the mask need to be constants so we can verify this is 6972 // actually a bitfield set. If the mask is 0xffff, we can do better 6973 // via a movt instruction, so don't use BFI in that case. 6974 SDValue MaskOp = N0.getOperand(1); 6975 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 6976 if (!MaskC) 6977 return SDValue(); 6978 unsigned Mask = MaskC->getZExtValue(); 6979 if (Mask == 0xffff) 6980 return SDValue(); 6981 SDValue Res; 6982 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 6983 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 6984 if (N1C) { 6985 unsigned Val = N1C->getZExtValue(); 6986 if ((Val & ~Mask) != Val) 6987 return SDValue(); 6988 6989 if (ARM::isBitFieldInvertedMask(Mask)) { 6990 Val >>= CountTrailingZeros_32(~Mask); 6991 6992 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 6993 DAG.getConstant(Val, MVT::i32), 6994 DAG.getConstant(Mask, MVT::i32)); 6995 6996 // Do not add new nodes to DAG combiner worklist. 6997 DCI.CombineTo(N, Res, false); 6998 return SDValue(); 6999 } 7000 } else if (N1.getOpcode() == ISD::AND) { 7001 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 7002 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 7003 if (!N11C) 7004 return SDValue(); 7005 unsigned Mask2 = N11C->getZExtValue(); 7006 7007 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 7008 // as is to match. 7009 if (ARM::isBitFieldInvertedMask(Mask) && 7010 (Mask == ~Mask2)) { 7011 // The pack halfword instruction works better for masks that fit it, 7012 // so use that when it's available. 7013 if (Subtarget->hasT2ExtractPack() && 7014 (Mask == 0xffff || Mask == 0xffff0000)) 7015 return SDValue(); 7016 // 2a 7017 unsigned amt = CountTrailingZeros_32(Mask2); 7018 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 7019 DAG.getConstant(amt, MVT::i32)); 7020 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 7021 DAG.getConstant(Mask, MVT::i32)); 7022 // Do not add new nodes to DAG combiner worklist. 7023 DCI.CombineTo(N, Res, false); 7024 return SDValue(); 7025 } else if (ARM::isBitFieldInvertedMask(~Mask) && 7026 (~Mask == Mask2)) { 7027 // The pack halfword instruction works better for masks that fit it, 7028 // so use that when it's available. 7029 if (Subtarget->hasT2ExtractPack() && 7030 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 7031 return SDValue(); 7032 // 2b 7033 unsigned lsb = CountTrailingZeros_32(Mask); 7034 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 7035 DAG.getConstant(lsb, MVT::i32)); 7036 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 7037 DAG.getConstant(Mask2, MVT::i32)); 7038 // Do not add new nodes to DAG combiner worklist. 7039 DCI.CombineTo(N, Res, false); 7040 return SDValue(); 7041 } 7042 } 7043 7044 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 7045 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 7046 ARM::isBitFieldInvertedMask(~Mask)) { 7047 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 7048 // where lsb(mask) == #shamt and masked bits of B are known zero. 7049 SDValue ShAmt = N00.getOperand(1); 7050 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 7051 unsigned LSB = CountTrailingZeros_32(Mask); 7052 if (ShAmtC != LSB) 7053 return SDValue(); 7054 7055 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 7056 DAG.getConstant(~Mask, MVT::i32)); 7057 7058 // Do not add new nodes to DAG combiner worklist. 7059 DCI.CombineTo(N, Res, false); 7060 } 7061 7062 return SDValue(); 7063} 7064 7065/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 7066/// the bits being cleared by the AND are not demanded by the BFI. 7067static SDValue PerformBFICombine(SDNode *N, 7068 TargetLowering::DAGCombinerInfo &DCI) { 7069 SDValue N1 = N->getOperand(1); 7070 if (N1.getOpcode() == ISD::AND) { 7071 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 7072 if (!N11C) 7073 return SDValue(); 7074 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 7075 unsigned LSB = CountTrailingZeros_32(~InvMask); 7076 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 7077 unsigned Mask = (1 << Width)-1; 7078 unsigned Mask2 = N11C->getZExtValue(); 7079 if ((Mask & (~Mask2)) == 0) 7080 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 7081 N->getOperand(0), N1.getOperand(0), 7082 N->getOperand(2)); 7083 } 7084 return SDValue(); 7085} 7086 7087/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 7088/// ARMISD::VMOVRRD. 7089static SDValue PerformVMOVRRDCombine(SDNode *N, 7090 TargetLowering::DAGCombinerInfo &DCI) { 7091 // vmovrrd(vmovdrr x, y) -> x,y 7092 SDValue InDouble = N->getOperand(0); 7093 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 7094 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 7095 7096 // vmovrrd(load f64) -> (load i32), (load i32) 7097 SDNode *InNode = InDouble.getNode(); 7098 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 7099 InNode->getValueType(0) == MVT::f64 && 7100 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 7101 !cast<LoadSDNode>(InNode)->isVolatile()) { 7102 // TODO: Should this be done for non-FrameIndex operands? 7103 LoadSDNode *LD = cast<LoadSDNode>(InNode); 7104 7105 SelectionDAG &DAG = DCI.DAG; 7106 DebugLoc DL = LD->getDebugLoc(); 7107 SDValue BasePtr = LD->getBasePtr(); 7108 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 7109 LD->getPointerInfo(), LD->isVolatile(), 7110 LD->isNonTemporal(), LD->isInvariant(), 7111 LD->getAlignment()); 7112 7113 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 7114 DAG.getConstant(4, MVT::i32)); 7115 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 7116 LD->getPointerInfo(), LD->isVolatile(), 7117 LD->isNonTemporal(), LD->isInvariant(), 7118 std::min(4U, LD->getAlignment() / 2)); 7119 7120 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 7121 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 7122 DCI.RemoveFromWorklist(LD); 7123 DAG.DeleteNode(LD); 7124 return Result; 7125 } 7126 7127 return SDValue(); 7128} 7129 7130/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 7131/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 7132static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 7133 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 7134 SDValue Op0 = N->getOperand(0); 7135 SDValue Op1 = N->getOperand(1); 7136 if (Op0.getOpcode() == ISD::BITCAST) 7137 Op0 = Op0.getOperand(0); 7138 if (Op1.getOpcode() == ISD::BITCAST) 7139 Op1 = Op1.getOperand(0); 7140 if (Op0.getOpcode() == ARMISD::VMOVRRD && 7141 Op0.getNode() == Op1.getNode() && 7142 Op0.getResNo() == 0 && Op1.getResNo() == 1) 7143 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 7144 N->getValueType(0), Op0.getOperand(0)); 7145 return SDValue(); 7146} 7147 7148/// PerformSTORECombine - Target-specific dag combine xforms for 7149/// ISD::STORE. 7150static SDValue PerformSTORECombine(SDNode *N, 7151 TargetLowering::DAGCombinerInfo &DCI) { 7152 // Bitcast an i64 store extracted from a vector to f64. 7153 // Otherwise, the i64 value will be legalized to a pair of i32 values. 7154 StoreSDNode *St = cast<StoreSDNode>(N); 7155 SDValue StVal = St->getValue(); 7156 if (!ISD::isNormalStore(St) || St->isVolatile()) 7157 return SDValue(); 7158 7159 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 7160 StVal.getNode()->hasOneUse() && !St->isVolatile()) { 7161 SelectionDAG &DAG = DCI.DAG; 7162 DebugLoc DL = St->getDebugLoc(); 7163 SDValue BasePtr = St->getBasePtr(); 7164 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 7165 StVal.getNode()->getOperand(0), BasePtr, 7166 St->getPointerInfo(), St->isVolatile(), 7167 St->isNonTemporal(), St->getAlignment()); 7168 7169 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 7170 DAG.getConstant(4, MVT::i32)); 7171 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 7172 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 7173 St->isNonTemporal(), 7174 std::min(4U, St->getAlignment() / 2)); 7175 } 7176 7177 if (StVal.getValueType() != MVT::i64 || 7178 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7179 return SDValue(); 7180 7181 SelectionDAG &DAG = DCI.DAG; 7182 DebugLoc dl = StVal.getDebugLoc(); 7183 SDValue IntVec = StVal.getOperand(0); 7184 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 7185 IntVec.getValueType().getVectorNumElements()); 7186 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 7187 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7188 Vec, StVal.getOperand(1)); 7189 dl = N->getDebugLoc(); 7190 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 7191 // Make the DAGCombiner fold the bitcasts. 7192 DCI.AddToWorklist(Vec.getNode()); 7193 DCI.AddToWorklist(ExtElt.getNode()); 7194 DCI.AddToWorklist(V.getNode()); 7195 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 7196 St->getPointerInfo(), St->isVolatile(), 7197 St->isNonTemporal(), St->getAlignment(), 7198 St->getTBAAInfo()); 7199} 7200 7201/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 7202/// are normal, non-volatile loads. If so, it is profitable to bitcast an 7203/// i64 vector to have f64 elements, since the value can then be loaded 7204/// directly into a VFP register. 7205static bool hasNormalLoadOperand(SDNode *N) { 7206 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 7207 for (unsigned i = 0; i < NumElts; ++i) { 7208 SDNode *Elt = N->getOperand(i).getNode(); 7209 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 7210 return true; 7211 } 7212 return false; 7213} 7214 7215/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 7216/// ISD::BUILD_VECTOR. 7217static SDValue PerformBUILD_VECTORCombine(SDNode *N, 7218 TargetLowering::DAGCombinerInfo &DCI){ 7219 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 7220 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 7221 // into a pair of GPRs, which is fine when the value is used as a scalar, 7222 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 7223 SelectionDAG &DAG = DCI.DAG; 7224 if (N->getNumOperands() == 2) { 7225 SDValue RV = PerformVMOVDRRCombine(N, DAG); 7226 if (RV.getNode()) 7227 return RV; 7228 } 7229 7230 // Load i64 elements as f64 values so that type legalization does not split 7231 // them up into i32 values. 7232 EVT VT = N->getValueType(0); 7233 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 7234 return SDValue(); 7235 DebugLoc dl = N->getDebugLoc(); 7236 SmallVector<SDValue, 8> Ops; 7237 unsigned NumElts = VT.getVectorNumElements(); 7238 for (unsigned i = 0; i < NumElts; ++i) { 7239 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 7240 Ops.push_back(V); 7241 // Make the DAGCombiner fold the bitcast. 7242 DCI.AddToWorklist(V.getNode()); 7243 } 7244 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 7245 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 7246 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 7247} 7248 7249/// PerformInsertEltCombine - Target-specific dag combine xforms for 7250/// ISD::INSERT_VECTOR_ELT. 7251static SDValue PerformInsertEltCombine(SDNode *N, 7252 TargetLowering::DAGCombinerInfo &DCI) { 7253 // Bitcast an i64 load inserted into a vector to f64. 7254 // Otherwise, the i64 value will be legalized to a pair of i32 values. 7255 EVT VT = N->getValueType(0); 7256 SDNode *Elt = N->getOperand(1).getNode(); 7257 if (VT.getVectorElementType() != MVT::i64 || 7258 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 7259 return SDValue(); 7260 7261 SelectionDAG &DAG = DCI.DAG; 7262 DebugLoc dl = N->getDebugLoc(); 7263 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 7264 VT.getVectorNumElements()); 7265 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 7266 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 7267 // Make the DAGCombiner fold the bitcasts. 7268 DCI.AddToWorklist(Vec.getNode()); 7269 DCI.AddToWorklist(V.getNode()); 7270 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 7271 Vec, V, N->getOperand(2)); 7272 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 7273} 7274 7275/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 7276/// ISD::VECTOR_SHUFFLE. 7277static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 7278 // The LLVM shufflevector instruction does not require the shuffle mask 7279 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 7280 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 7281 // operands do not match the mask length, they are extended by concatenating 7282 // them with undef vectors. That is probably the right thing for other 7283 // targets, but for NEON it is better to concatenate two double-register 7284 // size vector operands into a single quad-register size vector. Do that 7285 // transformation here: 7286 // shuffle(concat(v1, undef), concat(v2, undef)) -> 7287 // shuffle(concat(v1, v2), undef) 7288 SDValue Op0 = N->getOperand(0); 7289 SDValue Op1 = N->getOperand(1); 7290 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 7291 Op1.getOpcode() != ISD::CONCAT_VECTORS || 7292 Op0.getNumOperands() != 2 || 7293 Op1.getNumOperands() != 2) 7294 return SDValue(); 7295 SDValue Concat0Op1 = Op0.getOperand(1); 7296 SDValue Concat1Op1 = Op1.getOperand(1); 7297 if (Concat0Op1.getOpcode() != ISD::UNDEF || 7298 Concat1Op1.getOpcode() != ISD::UNDEF) 7299 return SDValue(); 7300 // Skip the transformation if any of the types are illegal. 7301 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7302 EVT VT = N->getValueType(0); 7303 if (!TLI.isTypeLegal(VT) || 7304 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 7305 !TLI.isTypeLegal(Concat1Op1.getValueType())) 7306 return SDValue(); 7307 7308 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 7309 Op0.getOperand(0), Op1.getOperand(0)); 7310 // Translate the shuffle mask. 7311 SmallVector<int, 16> NewMask; 7312 unsigned NumElts = VT.getVectorNumElements(); 7313 unsigned HalfElts = NumElts/2; 7314 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 7315 for (unsigned n = 0; n < NumElts; ++n) { 7316 int MaskElt = SVN->getMaskElt(n); 7317 int NewElt = -1; 7318 if (MaskElt < (int)HalfElts) 7319 NewElt = MaskElt; 7320 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 7321 NewElt = HalfElts + MaskElt - NumElts; 7322 NewMask.push_back(NewElt); 7323 } 7324 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 7325 DAG.getUNDEF(VT), NewMask.data()); 7326} 7327 7328/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 7329/// NEON load/store intrinsics to merge base address updates. 7330static SDValue CombineBaseUpdate(SDNode *N, 7331 TargetLowering::DAGCombinerInfo &DCI) { 7332 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 7333 return SDValue(); 7334 7335 SelectionDAG &DAG = DCI.DAG; 7336 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 7337 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 7338 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 7339 SDValue Addr = N->getOperand(AddrOpIdx); 7340 7341 // Search for a use of the address operand that is an increment. 7342 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 7343 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 7344 SDNode *User = *UI; 7345 if (User->getOpcode() != ISD::ADD || 7346 UI.getUse().getResNo() != Addr.getResNo()) 7347 continue; 7348 7349 // Check that the add is independent of the load/store. Otherwise, folding 7350 // it would create a cycle. 7351 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 7352 continue; 7353 7354 // Find the new opcode for the updating load/store. 7355 bool isLoad = true; 7356 bool isLaneOp = false; 7357 unsigned NewOpc = 0; 7358 unsigned NumVecs = 0; 7359 if (isIntrinsic) { 7360 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7361 switch (IntNo) { 7362 default: assert(0 && "unexpected intrinsic for Neon base update"); 7363 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 7364 NumVecs = 1; break; 7365 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 7366 NumVecs = 2; break; 7367 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 7368 NumVecs = 3; break; 7369 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 7370 NumVecs = 4; break; 7371 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 7372 NumVecs = 2; isLaneOp = true; break; 7373 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 7374 NumVecs = 3; isLaneOp = true; break; 7375 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 7376 NumVecs = 4; isLaneOp = true; break; 7377 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 7378 NumVecs = 1; isLoad = false; break; 7379 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 7380 NumVecs = 2; isLoad = false; break; 7381 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 7382 NumVecs = 3; isLoad = false; break; 7383 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 7384 NumVecs = 4; isLoad = false; break; 7385 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 7386 NumVecs = 2; isLoad = false; isLaneOp = true; break; 7387 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 7388 NumVecs = 3; isLoad = false; isLaneOp = true; break; 7389 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 7390 NumVecs = 4; isLoad = false; isLaneOp = true; break; 7391 } 7392 } else { 7393 isLaneOp = true; 7394 switch (N->getOpcode()) { 7395 default: assert(0 && "unexpected opcode for Neon base update"); 7396 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 7397 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 7398 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 7399 } 7400 } 7401 7402 // Find the size of memory referenced by the load/store. 7403 EVT VecTy; 7404 if (isLoad) 7405 VecTy = N->getValueType(0); 7406 else 7407 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 7408 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 7409 if (isLaneOp) 7410 NumBytes /= VecTy.getVectorNumElements(); 7411 7412 // If the increment is a constant, it must match the memory ref size. 7413 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 7414 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 7415 uint64_t IncVal = CInc->getZExtValue(); 7416 if (IncVal != NumBytes) 7417 continue; 7418 } else if (NumBytes >= 3 * 16) { 7419 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 7420 // separate instructions that make it harder to use a non-constant update. 7421 continue; 7422 } 7423 7424 // Create the new updating load/store node. 7425 EVT Tys[6]; 7426 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 7427 unsigned n; 7428 for (n = 0; n < NumResultVecs; ++n) 7429 Tys[n] = VecTy; 7430 Tys[n++] = MVT::i32; 7431 Tys[n] = MVT::Other; 7432 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 7433 SmallVector<SDValue, 8> Ops; 7434 Ops.push_back(N->getOperand(0)); // incoming chain 7435 Ops.push_back(N->getOperand(AddrOpIdx)); 7436 Ops.push_back(Inc); 7437 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 7438 Ops.push_back(N->getOperand(i)); 7439 } 7440 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 7441 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 7442 Ops.data(), Ops.size(), 7443 MemInt->getMemoryVT(), 7444 MemInt->getMemOperand()); 7445 7446 // Update the uses. 7447 std::vector<SDValue> NewResults; 7448 for (unsigned i = 0; i < NumResultVecs; ++i) { 7449 NewResults.push_back(SDValue(UpdN.getNode(), i)); 7450 } 7451 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 7452 DCI.CombineTo(N, NewResults); 7453 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 7454 7455 break; 7456 } 7457 return SDValue(); 7458} 7459 7460/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 7461/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 7462/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 7463/// return true. 7464static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 7465 SelectionDAG &DAG = DCI.DAG; 7466 EVT VT = N->getValueType(0); 7467 // vldN-dup instructions only support 64-bit vectors for N > 1. 7468 if (!VT.is64BitVector()) 7469 return false; 7470 7471 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 7472 SDNode *VLD = N->getOperand(0).getNode(); 7473 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 7474 return false; 7475 unsigned NumVecs = 0; 7476 unsigned NewOpc = 0; 7477 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 7478 if (IntNo == Intrinsic::arm_neon_vld2lane) { 7479 NumVecs = 2; 7480 NewOpc = ARMISD::VLD2DUP; 7481 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 7482 NumVecs = 3; 7483 NewOpc = ARMISD::VLD3DUP; 7484 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 7485 NumVecs = 4; 7486 NewOpc = ARMISD::VLD4DUP; 7487 } else { 7488 return false; 7489 } 7490 7491 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 7492 // numbers match the load. 7493 unsigned VLDLaneNo = 7494 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 7495 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7496 UI != UE; ++UI) { 7497 // Ignore uses of the chain result. 7498 if (UI.getUse().getResNo() == NumVecs) 7499 continue; 7500 SDNode *User = *UI; 7501 if (User->getOpcode() != ARMISD::VDUPLANE || 7502 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 7503 return false; 7504 } 7505 7506 // Create the vldN-dup node. 7507 EVT Tys[5]; 7508 unsigned n; 7509 for (n = 0; n < NumVecs; ++n) 7510 Tys[n] = VT; 7511 Tys[n] = MVT::Other; 7512 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 7513 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 7514 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 7515 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 7516 Ops, 2, VLDMemInt->getMemoryVT(), 7517 VLDMemInt->getMemOperand()); 7518 7519 // Update the uses. 7520 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7521 UI != UE; ++UI) { 7522 unsigned ResNo = UI.getUse().getResNo(); 7523 // Ignore uses of the chain result. 7524 if (ResNo == NumVecs) 7525 continue; 7526 SDNode *User = *UI; 7527 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 7528 } 7529 7530 // Now the vldN-lane intrinsic is dead except for its chain result. 7531 // Update uses of the chain. 7532 std::vector<SDValue> VLDDupResults; 7533 for (unsigned n = 0; n < NumVecs; ++n) 7534 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 7535 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 7536 DCI.CombineTo(VLD, VLDDupResults); 7537 7538 return true; 7539} 7540 7541/// PerformVDUPLANECombine - Target-specific dag combine xforms for 7542/// ARMISD::VDUPLANE. 7543static SDValue PerformVDUPLANECombine(SDNode *N, 7544 TargetLowering::DAGCombinerInfo &DCI) { 7545 SDValue Op = N->getOperand(0); 7546 7547 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 7548 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 7549 if (CombineVLDDUP(N, DCI)) 7550 return SDValue(N, 0); 7551 7552 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 7553 // redundant. Ignore bit_converts for now; element sizes are checked below. 7554 while (Op.getOpcode() == ISD::BITCAST) 7555 Op = Op.getOperand(0); 7556 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 7557 return SDValue(); 7558 7559 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 7560 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 7561 // The canonical VMOV for a zero vector uses a 32-bit element size. 7562 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7563 unsigned EltBits; 7564 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 7565 EltSize = 8; 7566 EVT VT = N->getValueType(0); 7567 if (EltSize > VT.getVectorElementType().getSizeInBits()) 7568 return SDValue(); 7569 7570 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 7571} 7572 7573// isConstVecPow2 - Return true if each vector element is a power of 2, all 7574// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 7575static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 7576{ 7577 integerPart cN; 7578 integerPart c0 = 0; 7579 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 7580 I != E; I++) { 7581 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 7582 if (!C) 7583 return false; 7584 7585 bool isExact; 7586 APFloat APF = C->getValueAPF(); 7587 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 7588 != APFloat::opOK || !isExact) 7589 return false; 7590 7591 c0 = (I == 0) ? cN : c0; 7592 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 7593 return false; 7594 } 7595 C = c0; 7596 return true; 7597} 7598 7599/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 7600/// can replace combinations of VMUL and VCVT (floating-point to integer) 7601/// when the VMUL has a constant operand that is a power of 2. 7602/// 7603/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7604/// vmul.f32 d16, d17, d16 7605/// vcvt.s32.f32 d16, d16 7606/// becomes: 7607/// vcvt.s32.f32 d16, d16, #3 7608static SDValue PerformVCVTCombine(SDNode *N, 7609 TargetLowering::DAGCombinerInfo &DCI, 7610 const ARMSubtarget *Subtarget) { 7611 SelectionDAG &DAG = DCI.DAG; 7612 SDValue Op = N->getOperand(0); 7613 7614 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 7615 Op.getOpcode() != ISD::FMUL) 7616 return SDValue(); 7617 7618 uint64_t C; 7619 SDValue N0 = Op->getOperand(0); 7620 SDValue ConstVec = Op->getOperand(1); 7621 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 7622 7623 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7624 !isConstVecPow2(ConstVec, isSigned, C)) 7625 return SDValue(); 7626 7627 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 7628 Intrinsic::arm_neon_vcvtfp2fxu; 7629 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7630 N->getValueType(0), 7631 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 7632 DAG.getConstant(Log2_64(C), MVT::i32)); 7633} 7634 7635/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 7636/// can replace combinations of VCVT (integer to floating-point) and VDIV 7637/// when the VDIV has a constant operand that is a power of 2. 7638/// 7639/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7640/// vcvt.f32.s32 d16, d16 7641/// vdiv.f32 d16, d17, d16 7642/// becomes: 7643/// vcvt.f32.s32 d16, d16, #3 7644static SDValue PerformVDIVCombine(SDNode *N, 7645 TargetLowering::DAGCombinerInfo &DCI, 7646 const ARMSubtarget *Subtarget) { 7647 SelectionDAG &DAG = DCI.DAG; 7648 SDValue Op = N->getOperand(0); 7649 unsigned OpOpcode = Op.getNode()->getOpcode(); 7650 7651 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 7652 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 7653 return SDValue(); 7654 7655 uint64_t C; 7656 SDValue ConstVec = N->getOperand(1); 7657 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 7658 7659 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7660 !isConstVecPow2(ConstVec, isSigned, C)) 7661 return SDValue(); 7662 7663 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 7664 Intrinsic::arm_neon_vcvtfxu2fp; 7665 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7666 Op.getValueType(), 7667 DAG.getConstant(IntrinsicOpcode, MVT::i32), 7668 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 7669} 7670 7671/// Getvshiftimm - Check if this is a valid build_vector for the immediate 7672/// operand of a vector shift operation, where all the elements of the 7673/// build_vector must have the same constant integer value. 7674static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 7675 // Ignore bit_converts. 7676 while (Op.getOpcode() == ISD::BITCAST) 7677 Op = Op.getOperand(0); 7678 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7679 APInt SplatBits, SplatUndef; 7680 unsigned SplatBitSize; 7681 bool HasAnyUndefs; 7682 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 7683 HasAnyUndefs, ElementBits) || 7684 SplatBitSize > ElementBits) 7685 return false; 7686 Cnt = SplatBits.getSExtValue(); 7687 return true; 7688} 7689 7690/// isVShiftLImm - Check if this is a valid build_vector for the immediate 7691/// operand of a vector shift left operation. That value must be in the range: 7692/// 0 <= Value < ElementBits for a left shift; or 7693/// 0 <= Value <= ElementBits for a long left shift. 7694static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 7695 assert(VT.isVector() && "vector shift count is not a vector type"); 7696 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7697 if (! getVShiftImm(Op, ElementBits, Cnt)) 7698 return false; 7699 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 7700} 7701 7702/// isVShiftRImm - Check if this is a valid build_vector for the immediate 7703/// operand of a vector shift right operation. For a shift opcode, the value 7704/// is positive, but for an intrinsic the value count must be negative. The 7705/// absolute value must be in the range: 7706/// 1 <= |Value| <= ElementBits for a right shift; or 7707/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 7708static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 7709 int64_t &Cnt) { 7710 assert(VT.isVector() && "vector shift count is not a vector type"); 7711 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7712 if (! getVShiftImm(Op, ElementBits, Cnt)) 7713 return false; 7714 if (isIntrinsic) 7715 Cnt = -Cnt; 7716 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 7717} 7718 7719/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 7720static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 7721 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 7722 switch (IntNo) { 7723 default: 7724 // Don't do anything for most intrinsics. 7725 break; 7726 7727 // Vector shifts: check for immediate versions and lower them. 7728 // Note: This is done during DAG combining instead of DAG legalizing because 7729 // the build_vectors for 64-bit vector element shift counts are generally 7730 // not legal, and it is hard to see their values after they get legalized to 7731 // loads from a constant pool. 7732 case Intrinsic::arm_neon_vshifts: 7733 case Intrinsic::arm_neon_vshiftu: 7734 case Intrinsic::arm_neon_vshiftls: 7735 case Intrinsic::arm_neon_vshiftlu: 7736 case Intrinsic::arm_neon_vshiftn: 7737 case Intrinsic::arm_neon_vrshifts: 7738 case Intrinsic::arm_neon_vrshiftu: 7739 case Intrinsic::arm_neon_vrshiftn: 7740 case Intrinsic::arm_neon_vqshifts: 7741 case Intrinsic::arm_neon_vqshiftu: 7742 case Intrinsic::arm_neon_vqshiftsu: 7743 case Intrinsic::arm_neon_vqshiftns: 7744 case Intrinsic::arm_neon_vqshiftnu: 7745 case Intrinsic::arm_neon_vqshiftnsu: 7746 case Intrinsic::arm_neon_vqrshiftns: 7747 case Intrinsic::arm_neon_vqrshiftnu: 7748 case Intrinsic::arm_neon_vqrshiftnsu: { 7749 EVT VT = N->getOperand(1).getValueType(); 7750 int64_t Cnt; 7751 unsigned VShiftOpc = 0; 7752 7753 switch (IntNo) { 7754 case Intrinsic::arm_neon_vshifts: 7755 case Intrinsic::arm_neon_vshiftu: 7756 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 7757 VShiftOpc = ARMISD::VSHL; 7758 break; 7759 } 7760 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 7761 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 7762 ARMISD::VSHRs : ARMISD::VSHRu); 7763 break; 7764 } 7765 return SDValue(); 7766 7767 case Intrinsic::arm_neon_vshiftls: 7768 case Intrinsic::arm_neon_vshiftlu: 7769 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 7770 break; 7771 llvm_unreachable("invalid shift count for vshll intrinsic"); 7772 7773 case Intrinsic::arm_neon_vrshifts: 7774 case Intrinsic::arm_neon_vrshiftu: 7775 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 7776 break; 7777 return SDValue(); 7778 7779 case Intrinsic::arm_neon_vqshifts: 7780 case Intrinsic::arm_neon_vqshiftu: 7781 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7782 break; 7783 return SDValue(); 7784 7785 case Intrinsic::arm_neon_vqshiftsu: 7786 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7787 break; 7788 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 7789 7790 case Intrinsic::arm_neon_vshiftn: 7791 case Intrinsic::arm_neon_vrshiftn: 7792 case Intrinsic::arm_neon_vqshiftns: 7793 case Intrinsic::arm_neon_vqshiftnu: 7794 case Intrinsic::arm_neon_vqshiftnsu: 7795 case Intrinsic::arm_neon_vqrshiftns: 7796 case Intrinsic::arm_neon_vqrshiftnu: 7797 case Intrinsic::arm_neon_vqrshiftnsu: 7798 // Narrowing shifts require an immediate right shift. 7799 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 7800 break; 7801 llvm_unreachable("invalid shift count for narrowing vector shift " 7802 "intrinsic"); 7803 7804 default: 7805 llvm_unreachable("unhandled vector shift"); 7806 } 7807 7808 switch (IntNo) { 7809 case Intrinsic::arm_neon_vshifts: 7810 case Intrinsic::arm_neon_vshiftu: 7811 // Opcode already set above. 7812 break; 7813 case Intrinsic::arm_neon_vshiftls: 7814 case Intrinsic::arm_neon_vshiftlu: 7815 if (Cnt == VT.getVectorElementType().getSizeInBits()) 7816 VShiftOpc = ARMISD::VSHLLi; 7817 else 7818 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 7819 ARMISD::VSHLLs : ARMISD::VSHLLu); 7820 break; 7821 case Intrinsic::arm_neon_vshiftn: 7822 VShiftOpc = ARMISD::VSHRN; break; 7823 case Intrinsic::arm_neon_vrshifts: 7824 VShiftOpc = ARMISD::VRSHRs; break; 7825 case Intrinsic::arm_neon_vrshiftu: 7826 VShiftOpc = ARMISD::VRSHRu; break; 7827 case Intrinsic::arm_neon_vrshiftn: 7828 VShiftOpc = ARMISD::VRSHRN; break; 7829 case Intrinsic::arm_neon_vqshifts: 7830 VShiftOpc = ARMISD::VQSHLs; break; 7831 case Intrinsic::arm_neon_vqshiftu: 7832 VShiftOpc = ARMISD::VQSHLu; break; 7833 case Intrinsic::arm_neon_vqshiftsu: 7834 VShiftOpc = ARMISD::VQSHLsu; break; 7835 case Intrinsic::arm_neon_vqshiftns: 7836 VShiftOpc = ARMISD::VQSHRNs; break; 7837 case Intrinsic::arm_neon_vqshiftnu: 7838 VShiftOpc = ARMISD::VQSHRNu; break; 7839 case Intrinsic::arm_neon_vqshiftnsu: 7840 VShiftOpc = ARMISD::VQSHRNsu; break; 7841 case Intrinsic::arm_neon_vqrshiftns: 7842 VShiftOpc = ARMISD::VQRSHRNs; break; 7843 case Intrinsic::arm_neon_vqrshiftnu: 7844 VShiftOpc = ARMISD::VQRSHRNu; break; 7845 case Intrinsic::arm_neon_vqrshiftnsu: 7846 VShiftOpc = ARMISD::VQRSHRNsu; break; 7847 } 7848 7849 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7850 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 7851 } 7852 7853 case Intrinsic::arm_neon_vshiftins: { 7854 EVT VT = N->getOperand(1).getValueType(); 7855 int64_t Cnt; 7856 unsigned VShiftOpc = 0; 7857 7858 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 7859 VShiftOpc = ARMISD::VSLI; 7860 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 7861 VShiftOpc = ARMISD::VSRI; 7862 else { 7863 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 7864 } 7865 7866 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7867 N->getOperand(1), N->getOperand(2), 7868 DAG.getConstant(Cnt, MVT::i32)); 7869 } 7870 7871 case Intrinsic::arm_neon_vqrshifts: 7872 case Intrinsic::arm_neon_vqrshiftu: 7873 // No immediate versions of these to check for. 7874 break; 7875 } 7876 7877 return SDValue(); 7878} 7879 7880/// PerformShiftCombine - Checks for immediate versions of vector shifts and 7881/// lowers them. As with the vector shift intrinsics, this is done during DAG 7882/// combining instead of DAG legalizing because the build_vectors for 64-bit 7883/// vector element shift counts are generally not legal, and it is hard to see 7884/// their values after they get legalized to loads from a constant pool. 7885static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 7886 const ARMSubtarget *ST) { 7887 EVT VT = N->getValueType(0); 7888 7889 // Nothing to be done for scalar shifts. 7890 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7891 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 7892 return SDValue(); 7893 7894 assert(ST->hasNEON() && "unexpected vector shift"); 7895 int64_t Cnt; 7896 7897 switch (N->getOpcode()) { 7898 default: llvm_unreachable("unexpected shift opcode"); 7899 7900 case ISD::SHL: 7901 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 7902 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 7903 DAG.getConstant(Cnt, MVT::i32)); 7904 break; 7905 7906 case ISD::SRA: 7907 case ISD::SRL: 7908 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 7909 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 7910 ARMISD::VSHRs : ARMISD::VSHRu); 7911 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 7912 DAG.getConstant(Cnt, MVT::i32)); 7913 } 7914 } 7915 return SDValue(); 7916} 7917 7918/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 7919/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 7920static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 7921 const ARMSubtarget *ST) { 7922 SDValue N0 = N->getOperand(0); 7923 7924 // Check for sign- and zero-extensions of vector extract operations of 8- 7925 // and 16-bit vector elements. NEON supports these directly. They are 7926 // handled during DAG combining because type legalization will promote them 7927 // to 32-bit types and it is messy to recognize the operations after that. 7928 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7929 SDValue Vec = N0.getOperand(0); 7930 SDValue Lane = N0.getOperand(1); 7931 EVT VT = N->getValueType(0); 7932 EVT EltVT = N0.getValueType(); 7933 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7934 7935 if (VT == MVT::i32 && 7936 (EltVT == MVT::i8 || EltVT == MVT::i16) && 7937 TLI.isTypeLegal(Vec.getValueType()) && 7938 isa<ConstantSDNode>(Lane)) { 7939 7940 unsigned Opc = 0; 7941 switch (N->getOpcode()) { 7942 default: llvm_unreachable("unexpected opcode"); 7943 case ISD::SIGN_EXTEND: 7944 Opc = ARMISD::VGETLANEs; 7945 break; 7946 case ISD::ZERO_EXTEND: 7947 case ISD::ANY_EXTEND: 7948 Opc = ARMISD::VGETLANEu; 7949 break; 7950 } 7951 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 7952 } 7953 } 7954 7955 return SDValue(); 7956} 7957 7958/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 7959/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 7960static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 7961 const ARMSubtarget *ST) { 7962 // If the target supports NEON, try to use vmax/vmin instructions for f32 7963 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 7964 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 7965 // a NaN; only do the transformation when it matches that behavior. 7966 7967 // For now only do this when using NEON for FP operations; if using VFP, it 7968 // is not obvious that the benefit outweighs the cost of switching to the 7969 // NEON pipeline. 7970 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 7971 N->getValueType(0) != MVT::f32) 7972 return SDValue(); 7973 7974 SDValue CondLHS = N->getOperand(0); 7975 SDValue CondRHS = N->getOperand(1); 7976 SDValue LHS = N->getOperand(2); 7977 SDValue RHS = N->getOperand(3); 7978 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 7979 7980 unsigned Opcode = 0; 7981 bool IsReversed; 7982 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 7983 IsReversed = false; // x CC y ? x : y 7984 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 7985 IsReversed = true ; // x CC y ? y : x 7986 } else { 7987 return SDValue(); 7988 } 7989 7990 bool IsUnordered; 7991 switch (CC) { 7992 default: break; 7993 case ISD::SETOLT: 7994 case ISD::SETOLE: 7995 case ISD::SETLT: 7996 case ISD::SETLE: 7997 case ISD::SETULT: 7998 case ISD::SETULE: 7999 // If LHS is NaN, an ordered comparison will be false and the result will 8000 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 8001 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 8002 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 8003 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 8004 break; 8005 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 8006 // will return -0, so vmin can only be used for unsafe math or if one of 8007 // the operands is known to be nonzero. 8008 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 8009 !UnsafeFPMath && 8010 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 8011 break; 8012 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 8013 break; 8014 8015 case ISD::SETOGT: 8016 case ISD::SETOGE: 8017 case ISD::SETGT: 8018 case ISD::SETGE: 8019 case ISD::SETUGT: 8020 case ISD::SETUGE: 8021 // If LHS is NaN, an ordered comparison will be false and the result will 8022 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 8023 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 8024 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 8025 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 8026 break; 8027 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 8028 // will return +0, so vmax can only be used for unsafe math or if one of 8029 // the operands is known to be nonzero. 8030 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 8031 !UnsafeFPMath && 8032 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 8033 break; 8034 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 8035 break; 8036 } 8037 8038 if (!Opcode) 8039 return SDValue(); 8040 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 8041} 8042 8043/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 8044SDValue 8045ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 8046 SDValue Cmp = N->getOperand(4); 8047 if (Cmp.getOpcode() != ARMISD::CMPZ) 8048 // Only looking at EQ and NE cases. 8049 return SDValue(); 8050 8051 EVT VT = N->getValueType(0); 8052 DebugLoc dl = N->getDebugLoc(); 8053 SDValue LHS = Cmp.getOperand(0); 8054 SDValue RHS = Cmp.getOperand(1); 8055 SDValue FalseVal = N->getOperand(0); 8056 SDValue TrueVal = N->getOperand(1); 8057 SDValue ARMcc = N->getOperand(2); 8058 ARMCC::CondCodes CC = 8059 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 8060 8061 // Simplify 8062 // mov r1, r0 8063 // cmp r1, x 8064 // mov r0, y 8065 // moveq r0, x 8066 // to 8067 // cmp r0, x 8068 // movne r0, y 8069 // 8070 // mov r1, r0 8071 // cmp r1, x 8072 // mov r0, x 8073 // movne r0, y 8074 // to 8075 // cmp r0, x 8076 // movne r0, y 8077 /// FIXME: Turn this into a target neutral optimization? 8078 SDValue Res; 8079 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 8080 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 8081 N->getOperand(3), Cmp); 8082 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 8083 SDValue ARMcc; 8084 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 8085 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 8086 N->getOperand(3), NewCmp); 8087 } 8088 8089 if (Res.getNode()) { 8090 APInt KnownZero, KnownOne; 8091 APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()); 8092 DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne); 8093 // Capture demanded bits information that would be otherwise lost. 8094 if (KnownZero == 0xfffffffe) 8095 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8096 DAG.getValueType(MVT::i1)); 8097 else if (KnownZero == 0xffffff00) 8098 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8099 DAG.getValueType(MVT::i8)); 8100 else if (KnownZero == 0xffff0000) 8101 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8102 DAG.getValueType(MVT::i16)); 8103 } 8104 8105 return Res; 8106} 8107 8108SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 8109 DAGCombinerInfo &DCI) const { 8110 switch (N->getOpcode()) { 8111 default: break; 8112 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 8113 case ISD::SUB: return PerformSUBCombine(N, DCI); 8114 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 8115 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 8116 case ISD::AND: return PerformANDCombine(N, DCI); 8117 case ARMISD::BFI: return PerformBFICombine(N, DCI); 8118 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 8119 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 8120 case ISD::STORE: return PerformSTORECombine(N, DCI); 8121 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 8122 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 8123 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 8124 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 8125 case ISD::FP_TO_SINT: 8126 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 8127 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 8128 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 8129 case ISD::SHL: 8130 case ISD::SRA: 8131 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 8132 case ISD::SIGN_EXTEND: 8133 case ISD::ZERO_EXTEND: 8134 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 8135 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 8136 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 8137 case ARMISD::VLD2DUP: 8138 case ARMISD::VLD3DUP: 8139 case ARMISD::VLD4DUP: 8140 return CombineBaseUpdate(N, DCI); 8141 case ISD::INTRINSIC_VOID: 8142 case ISD::INTRINSIC_W_CHAIN: 8143 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 8144 case Intrinsic::arm_neon_vld1: 8145 case Intrinsic::arm_neon_vld2: 8146 case Intrinsic::arm_neon_vld3: 8147 case Intrinsic::arm_neon_vld4: 8148 case Intrinsic::arm_neon_vld2lane: 8149 case Intrinsic::arm_neon_vld3lane: 8150 case Intrinsic::arm_neon_vld4lane: 8151 case Intrinsic::arm_neon_vst1: 8152 case Intrinsic::arm_neon_vst2: 8153 case Intrinsic::arm_neon_vst3: 8154 case Intrinsic::arm_neon_vst4: 8155 case Intrinsic::arm_neon_vst2lane: 8156 case Intrinsic::arm_neon_vst3lane: 8157 case Intrinsic::arm_neon_vst4lane: 8158 return CombineBaseUpdate(N, DCI); 8159 default: break; 8160 } 8161 break; 8162 } 8163 return SDValue(); 8164} 8165 8166bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 8167 EVT VT) const { 8168 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 8169} 8170 8171bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 8172 if (!Subtarget->allowsUnalignedMem()) 8173 return false; 8174 8175 switch (VT.getSimpleVT().SimpleTy) { 8176 default: 8177 return false; 8178 case MVT::i8: 8179 case MVT::i16: 8180 case MVT::i32: 8181 return true; 8182 // FIXME: VLD1 etc with standard alignment is legal. 8183 } 8184} 8185 8186static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 8187 unsigned AlignCheck) { 8188 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 8189 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 8190} 8191 8192EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 8193 unsigned DstAlign, unsigned SrcAlign, 8194 bool IsZeroVal, 8195 bool MemcpyStrSrc, 8196 MachineFunction &MF) const { 8197 const Function *F = MF.getFunction(); 8198 8199 // See if we can use NEON instructions for this... 8200 if (IsZeroVal && 8201 !F->hasFnAttr(Attribute::NoImplicitFloat) && 8202 Subtarget->hasNEON()) { 8203 if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) { 8204 return MVT::v4i32; 8205 } else if (memOpAlign(SrcAlign, DstAlign, 8) && Size >= 8) { 8206 return MVT::v2i32; 8207 } 8208 } 8209 8210 // Lowering to i32/i16 if the size permits. 8211 if (Size >= 4) { 8212 return MVT::i32; 8213 } else if (Size >= 2) { 8214 return MVT::i16; 8215 } 8216 8217 // Let the target-independent logic figure it out. 8218 return MVT::Other; 8219} 8220 8221static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 8222 if (V < 0) 8223 return false; 8224 8225 unsigned Scale = 1; 8226 switch (VT.getSimpleVT().SimpleTy) { 8227 default: return false; 8228 case MVT::i1: 8229 case MVT::i8: 8230 // Scale == 1; 8231 break; 8232 case MVT::i16: 8233 // Scale == 2; 8234 Scale = 2; 8235 break; 8236 case MVT::i32: 8237 // Scale == 4; 8238 Scale = 4; 8239 break; 8240 } 8241 8242 if ((V & (Scale - 1)) != 0) 8243 return false; 8244 V /= Scale; 8245 return V == (V & ((1LL << 5) - 1)); 8246} 8247 8248static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 8249 const ARMSubtarget *Subtarget) { 8250 bool isNeg = false; 8251 if (V < 0) { 8252 isNeg = true; 8253 V = - V; 8254 } 8255 8256 switch (VT.getSimpleVT().SimpleTy) { 8257 default: return false; 8258 case MVT::i1: 8259 case MVT::i8: 8260 case MVT::i16: 8261 case MVT::i32: 8262 // + imm12 or - imm8 8263 if (isNeg) 8264 return V == (V & ((1LL << 8) - 1)); 8265 return V == (V & ((1LL << 12) - 1)); 8266 case MVT::f32: 8267 case MVT::f64: 8268 // Same as ARM mode. FIXME: NEON? 8269 if (!Subtarget->hasVFP2()) 8270 return false; 8271 if ((V & 3) != 0) 8272 return false; 8273 V >>= 2; 8274 return V == (V & ((1LL << 8) - 1)); 8275 } 8276} 8277 8278/// isLegalAddressImmediate - Return true if the integer value can be used 8279/// as the offset of the target addressing mode for load / store of the 8280/// given type. 8281static bool isLegalAddressImmediate(int64_t V, EVT VT, 8282 const ARMSubtarget *Subtarget) { 8283 if (V == 0) 8284 return true; 8285 8286 if (!VT.isSimple()) 8287 return false; 8288 8289 if (Subtarget->isThumb1Only()) 8290 return isLegalT1AddressImmediate(V, VT); 8291 else if (Subtarget->isThumb2()) 8292 return isLegalT2AddressImmediate(V, VT, Subtarget); 8293 8294 // ARM mode. 8295 if (V < 0) 8296 V = - V; 8297 switch (VT.getSimpleVT().SimpleTy) { 8298 default: return false; 8299 case MVT::i1: 8300 case MVT::i8: 8301 case MVT::i32: 8302 // +- imm12 8303 return V == (V & ((1LL << 12) - 1)); 8304 case MVT::i16: 8305 // +- imm8 8306 return V == (V & ((1LL << 8) - 1)); 8307 case MVT::f32: 8308 case MVT::f64: 8309 if (!Subtarget->hasVFP2()) // FIXME: NEON? 8310 return false; 8311 if ((V & 3) != 0) 8312 return false; 8313 V >>= 2; 8314 return V == (V & ((1LL << 8) - 1)); 8315 } 8316} 8317 8318bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 8319 EVT VT) const { 8320 int Scale = AM.Scale; 8321 if (Scale < 0) 8322 return false; 8323 8324 switch (VT.getSimpleVT().SimpleTy) { 8325 default: return false; 8326 case MVT::i1: 8327 case MVT::i8: 8328 case MVT::i16: 8329 case MVT::i32: 8330 if (Scale == 1) 8331 return true; 8332 // r + r << imm 8333 Scale = Scale & ~1; 8334 return Scale == 2 || Scale == 4 || Scale == 8; 8335 case MVT::i64: 8336 // r + r 8337 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 8338 return true; 8339 return false; 8340 case MVT::isVoid: 8341 // Note, we allow "void" uses (basically, uses that aren't loads or 8342 // stores), because arm allows folding a scale into many arithmetic 8343 // operations. This should be made more precise and revisited later. 8344 8345 // Allow r << imm, but the imm has to be a multiple of two. 8346 if (Scale & 1) return false; 8347 return isPowerOf2_32(Scale); 8348 } 8349} 8350 8351/// isLegalAddressingMode - Return true if the addressing mode represented 8352/// by AM is legal for this target, for a load/store of the specified type. 8353bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 8354 Type *Ty) const { 8355 EVT VT = getValueType(Ty, true); 8356 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 8357 return false; 8358 8359 // Can never fold addr of global into load/store. 8360 if (AM.BaseGV) 8361 return false; 8362 8363 switch (AM.Scale) { 8364 case 0: // no scale reg, must be "r+i" or "r", or "i". 8365 break; 8366 case 1: 8367 if (Subtarget->isThumb1Only()) 8368 return false; 8369 // FALL THROUGH. 8370 default: 8371 // ARM doesn't support any R+R*scale+imm addr modes. 8372 if (AM.BaseOffs) 8373 return false; 8374 8375 if (!VT.isSimple()) 8376 return false; 8377 8378 if (Subtarget->isThumb2()) 8379 return isLegalT2ScaledAddressingMode(AM, VT); 8380 8381 int Scale = AM.Scale; 8382 switch (VT.getSimpleVT().SimpleTy) { 8383 default: return false; 8384 case MVT::i1: 8385 case MVT::i8: 8386 case MVT::i32: 8387 if (Scale < 0) Scale = -Scale; 8388 if (Scale == 1) 8389 return true; 8390 // r + r << imm 8391 return isPowerOf2_32(Scale & ~1); 8392 case MVT::i16: 8393 case MVT::i64: 8394 // r + r 8395 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 8396 return true; 8397 return false; 8398 8399 case MVT::isVoid: 8400 // Note, we allow "void" uses (basically, uses that aren't loads or 8401 // stores), because arm allows folding a scale into many arithmetic 8402 // operations. This should be made more precise and revisited later. 8403 8404 // Allow r << imm, but the imm has to be a multiple of two. 8405 if (Scale & 1) return false; 8406 return isPowerOf2_32(Scale); 8407 } 8408 break; 8409 } 8410 return true; 8411} 8412 8413/// isLegalICmpImmediate - Return true if the specified immediate is legal 8414/// icmp immediate, that is the target has icmp instructions which can compare 8415/// a register against the immediate without having to materialize the 8416/// immediate into a register. 8417bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 8418 if (!Subtarget->isThumb()) 8419 return ARM_AM::getSOImmVal(Imm) != -1; 8420 if (Subtarget->isThumb2()) 8421 return ARM_AM::getT2SOImmVal(Imm) != -1; 8422 return Imm >= 0 && Imm <= 255; 8423} 8424 8425/// isLegalAddImmediate - Return true if the specified immediate is legal 8426/// add immediate, that is the target has add instructions which can add 8427/// a register with the immediate without having to materialize the 8428/// immediate into a register. 8429bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 8430 return ARM_AM::getSOImmVal(Imm) != -1; 8431} 8432 8433static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 8434 bool isSEXTLoad, SDValue &Base, 8435 SDValue &Offset, bool &isInc, 8436 SelectionDAG &DAG) { 8437 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8438 return false; 8439 8440 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 8441 // AddressingMode 3 8442 Base = Ptr->getOperand(0); 8443 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8444 int RHSC = (int)RHS->getZExtValue(); 8445 if (RHSC < 0 && RHSC > -256) { 8446 assert(Ptr->getOpcode() == ISD::ADD); 8447 isInc = false; 8448 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8449 return true; 8450 } 8451 } 8452 isInc = (Ptr->getOpcode() == ISD::ADD); 8453 Offset = Ptr->getOperand(1); 8454 return true; 8455 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 8456 // AddressingMode 2 8457 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8458 int RHSC = (int)RHS->getZExtValue(); 8459 if (RHSC < 0 && RHSC > -0x1000) { 8460 assert(Ptr->getOpcode() == ISD::ADD); 8461 isInc = false; 8462 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8463 Base = Ptr->getOperand(0); 8464 return true; 8465 } 8466 } 8467 8468 if (Ptr->getOpcode() == ISD::ADD) { 8469 isInc = true; 8470 ARM_AM::ShiftOpc ShOpcVal= 8471 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 8472 if (ShOpcVal != ARM_AM::no_shift) { 8473 Base = Ptr->getOperand(1); 8474 Offset = Ptr->getOperand(0); 8475 } else { 8476 Base = Ptr->getOperand(0); 8477 Offset = Ptr->getOperand(1); 8478 } 8479 return true; 8480 } 8481 8482 isInc = (Ptr->getOpcode() == ISD::ADD); 8483 Base = Ptr->getOperand(0); 8484 Offset = Ptr->getOperand(1); 8485 return true; 8486 } 8487 8488 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 8489 return false; 8490} 8491 8492static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 8493 bool isSEXTLoad, SDValue &Base, 8494 SDValue &Offset, bool &isInc, 8495 SelectionDAG &DAG) { 8496 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8497 return false; 8498 8499 Base = Ptr->getOperand(0); 8500 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8501 int RHSC = (int)RHS->getZExtValue(); 8502 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 8503 assert(Ptr->getOpcode() == ISD::ADD); 8504 isInc = false; 8505 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8506 return true; 8507 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 8508 isInc = Ptr->getOpcode() == ISD::ADD; 8509 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 8510 return true; 8511 } 8512 } 8513 8514 return false; 8515} 8516 8517/// getPreIndexedAddressParts - returns true by value, base pointer and 8518/// offset pointer and addressing mode by reference if the node's address 8519/// can be legally represented as pre-indexed load / store address. 8520bool 8521ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 8522 SDValue &Offset, 8523 ISD::MemIndexedMode &AM, 8524 SelectionDAG &DAG) const { 8525 if (Subtarget->isThumb1Only()) 8526 return false; 8527 8528 EVT VT; 8529 SDValue Ptr; 8530 bool isSEXTLoad = false; 8531 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8532 Ptr = LD->getBasePtr(); 8533 VT = LD->getMemoryVT(); 8534 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8535 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8536 Ptr = ST->getBasePtr(); 8537 VT = ST->getMemoryVT(); 8538 } else 8539 return false; 8540 8541 bool isInc; 8542 bool isLegal = false; 8543 if (Subtarget->isThumb2()) 8544 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8545 Offset, isInc, DAG); 8546 else 8547 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8548 Offset, isInc, DAG); 8549 if (!isLegal) 8550 return false; 8551 8552 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 8553 return true; 8554} 8555 8556/// getPostIndexedAddressParts - returns true by value, base pointer and 8557/// offset pointer and addressing mode by reference if this node can be 8558/// combined with a load / store to form a post-indexed load / store. 8559bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 8560 SDValue &Base, 8561 SDValue &Offset, 8562 ISD::MemIndexedMode &AM, 8563 SelectionDAG &DAG) const { 8564 if (Subtarget->isThumb1Only()) 8565 return false; 8566 8567 EVT VT; 8568 SDValue Ptr; 8569 bool isSEXTLoad = false; 8570 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8571 VT = LD->getMemoryVT(); 8572 Ptr = LD->getBasePtr(); 8573 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8574 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8575 VT = ST->getMemoryVT(); 8576 Ptr = ST->getBasePtr(); 8577 } else 8578 return false; 8579 8580 bool isInc; 8581 bool isLegal = false; 8582 if (Subtarget->isThumb2()) 8583 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8584 isInc, DAG); 8585 else 8586 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8587 isInc, DAG); 8588 if (!isLegal) 8589 return false; 8590 8591 if (Ptr != Base) { 8592 // Swap base ptr and offset to catch more post-index load / store when 8593 // it's legal. In Thumb2 mode, offset must be an immediate. 8594 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 8595 !Subtarget->isThumb2()) 8596 std::swap(Base, Offset); 8597 8598 // Post-indexed load / store update the base pointer. 8599 if (Ptr != Base) 8600 return false; 8601 } 8602 8603 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 8604 return true; 8605} 8606 8607void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 8608 const APInt &Mask, 8609 APInt &KnownZero, 8610 APInt &KnownOne, 8611 const SelectionDAG &DAG, 8612 unsigned Depth) const { 8613 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 8614 switch (Op.getOpcode()) { 8615 default: break; 8616 case ARMISD::CMOV: { 8617 // Bits are known zero/one if known on the LHS and RHS. 8618 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 8619 if (KnownZero == 0 && KnownOne == 0) return; 8620 8621 APInt KnownZeroRHS, KnownOneRHS; 8622 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 8623 KnownZeroRHS, KnownOneRHS, Depth+1); 8624 KnownZero &= KnownZeroRHS; 8625 KnownOne &= KnownOneRHS; 8626 return; 8627 } 8628 } 8629} 8630 8631//===----------------------------------------------------------------------===// 8632// ARM Inline Assembly Support 8633//===----------------------------------------------------------------------===// 8634 8635bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 8636 // Looking for "rev" which is V6+. 8637 if (!Subtarget->hasV6Ops()) 8638 return false; 8639 8640 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 8641 std::string AsmStr = IA->getAsmString(); 8642 SmallVector<StringRef, 4> AsmPieces; 8643 SplitString(AsmStr, AsmPieces, ";\n"); 8644 8645 switch (AsmPieces.size()) { 8646 default: return false; 8647 case 1: 8648 AsmStr = AsmPieces[0]; 8649 AsmPieces.clear(); 8650 SplitString(AsmStr, AsmPieces, " \t,"); 8651 8652 // rev $0, $1 8653 if (AsmPieces.size() == 3 && 8654 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 8655 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 8656 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 8657 if (Ty && Ty->getBitWidth() == 32) 8658 return IntrinsicLowering::LowerToByteSwap(CI); 8659 } 8660 break; 8661 } 8662 8663 return false; 8664} 8665 8666/// getConstraintType - Given a constraint letter, return the type of 8667/// constraint it is for this target. 8668ARMTargetLowering::ConstraintType 8669ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 8670 if (Constraint.size() == 1) { 8671 switch (Constraint[0]) { 8672 default: break; 8673 case 'l': return C_RegisterClass; 8674 case 'w': return C_RegisterClass; 8675 case 'h': return C_RegisterClass; 8676 case 'x': return C_RegisterClass; 8677 case 't': return C_RegisterClass; 8678 case 'j': return C_Other; // Constant for movw. 8679 // An address with a single base register. Due to the way we 8680 // currently handle addresses it is the same as an 'r' memory constraint. 8681 case 'Q': return C_Memory; 8682 } 8683 } else if (Constraint.size() == 2) { 8684 switch (Constraint[0]) { 8685 default: break; 8686 // All 'U+' constraints are addresses. 8687 case 'U': return C_Memory; 8688 } 8689 } 8690 return TargetLowering::getConstraintType(Constraint); 8691} 8692 8693/// Examine constraint type and operand type and determine a weight value. 8694/// This object must already have been set up with the operand type 8695/// and the current alternative constraint selected. 8696TargetLowering::ConstraintWeight 8697ARMTargetLowering::getSingleConstraintMatchWeight( 8698 AsmOperandInfo &info, const char *constraint) const { 8699 ConstraintWeight weight = CW_Invalid; 8700 Value *CallOperandVal = info.CallOperandVal; 8701 // If we don't have a value, we can't do a match, 8702 // but allow it at the lowest weight. 8703 if (CallOperandVal == NULL) 8704 return CW_Default; 8705 Type *type = CallOperandVal->getType(); 8706 // Look at the constraint type. 8707 switch (*constraint) { 8708 default: 8709 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 8710 break; 8711 case 'l': 8712 if (type->isIntegerTy()) { 8713 if (Subtarget->isThumb()) 8714 weight = CW_SpecificReg; 8715 else 8716 weight = CW_Register; 8717 } 8718 break; 8719 case 'w': 8720 if (type->isFloatingPointTy()) 8721 weight = CW_Register; 8722 break; 8723 } 8724 return weight; 8725} 8726 8727typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 8728RCPair 8729ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 8730 EVT VT) const { 8731 if (Constraint.size() == 1) { 8732 // GCC ARM Constraint Letters 8733 switch (Constraint[0]) { 8734 case 'l': // Low regs or general regs. 8735 if (Subtarget->isThumb()) 8736 return RCPair(0U, ARM::tGPRRegisterClass); 8737 else 8738 return RCPair(0U, ARM::GPRRegisterClass); 8739 case 'h': // High regs or no regs. 8740 if (Subtarget->isThumb()) 8741 return RCPair(0U, ARM::hGPRRegisterClass); 8742 break; 8743 case 'r': 8744 return RCPair(0U, ARM::GPRRegisterClass); 8745 case 'w': 8746 if (VT == MVT::f32) 8747 return RCPair(0U, ARM::SPRRegisterClass); 8748 if (VT.getSizeInBits() == 64) 8749 return RCPair(0U, ARM::DPRRegisterClass); 8750 if (VT.getSizeInBits() == 128) 8751 return RCPair(0U, ARM::QPRRegisterClass); 8752 break; 8753 case 'x': 8754 if (VT == MVT::f32) 8755 return RCPair(0U, ARM::SPR_8RegisterClass); 8756 if (VT.getSizeInBits() == 64) 8757 return RCPair(0U, ARM::DPR_8RegisterClass); 8758 if (VT.getSizeInBits() == 128) 8759 return RCPair(0U, ARM::QPR_8RegisterClass); 8760 break; 8761 case 't': 8762 if (VT == MVT::f32) 8763 return RCPair(0U, ARM::SPRRegisterClass); 8764 break; 8765 } 8766 } 8767 if (StringRef("{cc}").equals_lower(Constraint)) 8768 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 8769 8770 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 8771} 8772 8773/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 8774/// vector. If it is invalid, don't add anything to Ops. 8775void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 8776 std::string &Constraint, 8777 std::vector<SDValue>&Ops, 8778 SelectionDAG &DAG) const { 8779 SDValue Result(0, 0); 8780 8781 // Currently only support length 1 constraints. 8782 if (Constraint.length() != 1) return; 8783 8784 char ConstraintLetter = Constraint[0]; 8785 switch (ConstraintLetter) { 8786 default: break; 8787 case 'j': 8788 case 'I': case 'J': case 'K': case 'L': 8789 case 'M': case 'N': case 'O': 8790 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 8791 if (!C) 8792 return; 8793 8794 int64_t CVal64 = C->getSExtValue(); 8795 int CVal = (int) CVal64; 8796 // None of these constraints allow values larger than 32 bits. Check 8797 // that the value fits in an int. 8798 if (CVal != CVal64) 8799 return; 8800 8801 switch (ConstraintLetter) { 8802 case 'j': 8803 // Constant suitable for movw, must be between 0 and 8804 // 65535. 8805 if (Subtarget->hasV6T2Ops()) 8806 if (CVal >= 0 && CVal <= 65535) 8807 break; 8808 return; 8809 case 'I': 8810 if (Subtarget->isThumb1Only()) { 8811 // This must be a constant between 0 and 255, for ADD 8812 // immediates. 8813 if (CVal >= 0 && CVal <= 255) 8814 break; 8815 } else if (Subtarget->isThumb2()) { 8816 // A constant that can be used as an immediate value in a 8817 // data-processing instruction. 8818 if (ARM_AM::getT2SOImmVal(CVal) != -1) 8819 break; 8820 } else { 8821 // A constant that can be used as an immediate value in a 8822 // data-processing instruction. 8823 if (ARM_AM::getSOImmVal(CVal) != -1) 8824 break; 8825 } 8826 return; 8827 8828 case 'J': 8829 if (Subtarget->isThumb()) { // FIXME thumb2 8830 // This must be a constant between -255 and -1, for negated ADD 8831 // immediates. This can be used in GCC with an "n" modifier that 8832 // prints the negated value, for use with SUB instructions. It is 8833 // not useful otherwise but is implemented for compatibility. 8834 if (CVal >= -255 && CVal <= -1) 8835 break; 8836 } else { 8837 // This must be a constant between -4095 and 4095. It is not clear 8838 // what this constraint is intended for. Implemented for 8839 // compatibility with GCC. 8840 if (CVal >= -4095 && CVal <= 4095) 8841 break; 8842 } 8843 return; 8844 8845 case 'K': 8846 if (Subtarget->isThumb1Only()) { 8847 // A 32-bit value where only one byte has a nonzero value. Exclude 8848 // zero to match GCC. This constraint is used by GCC internally for 8849 // constants that can be loaded with a move/shift combination. 8850 // It is not useful otherwise but is implemented for compatibility. 8851 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 8852 break; 8853 } else if (Subtarget->isThumb2()) { 8854 // A constant whose bitwise inverse can be used as an immediate 8855 // value in a data-processing instruction. This can be used in GCC 8856 // with a "B" modifier that prints the inverted value, for use with 8857 // BIC and MVN instructions. It is not useful otherwise but is 8858 // implemented for compatibility. 8859 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 8860 break; 8861 } else { 8862 // A constant whose bitwise inverse can be used as an immediate 8863 // value in a data-processing instruction. This can be used in GCC 8864 // with a "B" modifier that prints the inverted value, for use with 8865 // BIC and MVN instructions. It is not useful otherwise but is 8866 // implemented for compatibility. 8867 if (ARM_AM::getSOImmVal(~CVal) != -1) 8868 break; 8869 } 8870 return; 8871 8872 case 'L': 8873 if (Subtarget->isThumb1Only()) { 8874 // This must be a constant between -7 and 7, 8875 // for 3-operand ADD/SUB immediate instructions. 8876 if (CVal >= -7 && CVal < 7) 8877 break; 8878 } else if (Subtarget->isThumb2()) { 8879 // A constant whose negation can be used as an immediate value in a 8880 // data-processing instruction. This can be used in GCC with an "n" 8881 // modifier that prints the negated value, for use with SUB 8882 // instructions. It is not useful otherwise but is implemented for 8883 // compatibility. 8884 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 8885 break; 8886 } else { 8887 // A constant whose negation can be used as an immediate value in a 8888 // data-processing instruction. This can be used in GCC with an "n" 8889 // modifier that prints the negated value, for use with SUB 8890 // instructions. It is not useful otherwise but is implemented for 8891 // compatibility. 8892 if (ARM_AM::getSOImmVal(-CVal) != -1) 8893 break; 8894 } 8895 return; 8896 8897 case 'M': 8898 if (Subtarget->isThumb()) { // FIXME thumb2 8899 // This must be a multiple of 4 between 0 and 1020, for 8900 // ADD sp + immediate. 8901 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 8902 break; 8903 } else { 8904 // A power of two or a constant between 0 and 32. This is used in 8905 // GCC for the shift amount on shifted register operands, but it is 8906 // useful in general for any shift amounts. 8907 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 8908 break; 8909 } 8910 return; 8911 8912 case 'N': 8913 if (Subtarget->isThumb()) { // FIXME thumb2 8914 // This must be a constant between 0 and 31, for shift amounts. 8915 if (CVal >= 0 && CVal <= 31) 8916 break; 8917 } 8918 return; 8919 8920 case 'O': 8921 if (Subtarget->isThumb()) { // FIXME thumb2 8922 // This must be a multiple of 4 between -508 and 508, for 8923 // ADD/SUB sp = sp + immediate. 8924 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 8925 break; 8926 } 8927 return; 8928 } 8929 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 8930 break; 8931 } 8932 8933 if (Result.getNode()) { 8934 Ops.push_back(Result); 8935 return; 8936 } 8937 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 8938} 8939 8940bool 8941ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 8942 // The ARM target isn't yet aware of offsets. 8943 return false; 8944} 8945 8946bool ARM::isBitFieldInvertedMask(unsigned v) { 8947 if (v == 0xffffffff) 8948 return 0; 8949 // there can be 1's on either or both "outsides", all the "inside" 8950 // bits must be 0's 8951 unsigned int lsb = 0, msb = 31; 8952 while (v & (1 << msb)) --msb; 8953 while (v & (1 << lsb)) ++lsb; 8954 for (unsigned int i = lsb; i <= msb; ++i) { 8955 if (v & (1 << i)) 8956 return 0; 8957 } 8958 return 1; 8959} 8960 8961/// isFPImmLegal - Returns true if the target can instruction select the 8962/// specified FP immediate natively. If false, the legalizer will 8963/// materialize the FP immediate as a load from a constant pool. 8964bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 8965 if (!Subtarget->hasVFP3()) 8966 return false; 8967 if (VT == MVT::f32) 8968 return ARM_AM::getFP32Imm(Imm) != -1; 8969 if (VT == MVT::f64) 8970 return ARM_AM::getFP64Imm(Imm) != -1; 8971 return false; 8972} 8973 8974/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 8975/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 8976/// specified in the intrinsic calls. 8977bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 8978 const CallInst &I, 8979 unsigned Intrinsic) const { 8980 switch (Intrinsic) { 8981 case Intrinsic::arm_neon_vld1: 8982 case Intrinsic::arm_neon_vld2: 8983 case Intrinsic::arm_neon_vld3: 8984 case Intrinsic::arm_neon_vld4: 8985 case Intrinsic::arm_neon_vld2lane: 8986 case Intrinsic::arm_neon_vld3lane: 8987 case Intrinsic::arm_neon_vld4lane: { 8988 Info.opc = ISD::INTRINSIC_W_CHAIN; 8989 // Conservatively set memVT to the entire set of vectors loaded. 8990 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 8991 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8992 Info.ptrVal = I.getArgOperand(0); 8993 Info.offset = 0; 8994 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8995 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8996 Info.vol = false; // volatile loads with NEON intrinsics not supported 8997 Info.readMem = true; 8998 Info.writeMem = false; 8999 return true; 9000 } 9001 case Intrinsic::arm_neon_vst1: 9002 case Intrinsic::arm_neon_vst2: 9003 case Intrinsic::arm_neon_vst3: 9004 case Intrinsic::arm_neon_vst4: 9005 case Intrinsic::arm_neon_vst2lane: 9006 case Intrinsic::arm_neon_vst3lane: 9007 case Intrinsic::arm_neon_vst4lane: { 9008 Info.opc = ISD::INTRINSIC_VOID; 9009 // Conservatively set memVT to the entire set of vectors stored. 9010 unsigned NumElts = 0; 9011 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 9012 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 9013 if (!ArgTy->isVectorTy()) 9014 break; 9015 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 9016 } 9017 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 9018 Info.ptrVal = I.getArgOperand(0); 9019 Info.offset = 0; 9020 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 9021 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 9022 Info.vol = false; // volatile stores with NEON intrinsics not supported 9023 Info.readMem = false; 9024 Info.writeMem = true; 9025 return true; 9026 } 9027 case Intrinsic::arm_strexd: { 9028 Info.opc = ISD::INTRINSIC_W_CHAIN; 9029 Info.memVT = MVT::i64; 9030 Info.ptrVal = I.getArgOperand(2); 9031 Info.offset = 0; 9032 Info.align = 8; 9033 Info.vol = true; 9034 Info.readMem = false; 9035 Info.writeMem = true; 9036 return true; 9037 } 9038 case Intrinsic::arm_ldrexd: { 9039 Info.opc = ISD::INTRINSIC_W_CHAIN; 9040 Info.memVT = MVT::i64; 9041 Info.ptrVal = I.getArgOperand(0); 9042 Info.offset = 0; 9043 Info.align = 8; 9044 Info.vol = true; 9045 Info.readMem = true; 9046 Info.writeMem = false; 9047 return true; 9048 } 9049 default: 9050 break; 9051 } 9052 9053 return false; 9054} 9055