ARMISelLowering.cpp revision b52ba9f8a896b6717d6395ad59f6550e1fa475b0
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARMISelLowering.h" 17#include "ARM.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMSubtarget.h" 23#include "ARMTargetMachine.h" 24#include "ARMTargetObjectFile.h" 25#include "MCTargetDesc/ARMAddressingModes.h" 26#include "llvm/CallingConv.h" 27#include "llvm/Constants.h" 28#include "llvm/Function.h" 29#include "llvm/GlobalValue.h" 30#include "llvm/Instruction.h" 31#include "llvm/Instructions.h" 32#include "llvm/Intrinsics.h" 33#include "llvm/Type.h" 34#include "llvm/CodeGen/CallingConvLower.h" 35#include "llvm/CodeGen/IntrinsicLowering.h" 36#include "llvm/CodeGen/MachineBasicBlock.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineInstrBuilder.h" 40#include "llvm/CodeGen/MachineModuleInfo.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/SelectionDAG.h" 43#include "llvm/MC/MCSectionMachO.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/ADT/StringExtras.h" 46#include "llvm/ADT/Statistic.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/raw_ostream.h" 51using namespace llvm; 52 53STATISTIC(NumTailCalls, "Number of tail calls"); 54STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 55STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 56 57// This option should go away when tail calls fully work. 58static cl::opt<bool> 59EnableARMTailCalls("arm-tail-calls", cl::Hidden, 60 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 61 cl::init(false)); 62 63cl::opt<bool> 64EnableARMLongCalls("arm-long-calls", cl::Hidden, 65 cl::desc("Generate calls via indirect call instructions"), 66 cl::init(false)); 67 68static cl::opt<bool> 69ARMInterworking("arm-interworking", cl::Hidden, 70 cl::desc("Enable / disable ARM interworking (for debugging only)"), 71 cl::init(true)); 72 73namespace { 74 class ARMCCState : public CCState { 75 public: 76 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 77 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 78 LLVMContext &C, ParmContext PC) 79 : CCState(CC, isVarArg, MF, TM, locs, C) { 80 assert(((PC == Call) || (PC == Prologue)) && 81 "ARMCCState users must specify whether their context is call" 82 "or prologue generation."); 83 CallOrPrologue = PC; 84 } 85 }; 86} 87 88// The APCS parameter registers. 89static const uint16_t GPRArgRegs[] = { 90 ARM::R0, ARM::R1, ARM::R2, ARM::R3 91}; 92 93void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 94 MVT PromotedBitwiseVT) { 95 if (VT != PromotedLdStVT) { 96 setOperationAction(ISD::LOAD, VT, Promote); 97 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 98 99 setOperationAction(ISD::STORE, VT, Promote); 100 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 101 } 102 103 MVT ElemTy = VT.getVectorElementType(); 104 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 105 setOperationAction(ISD::SETCC, VT, Custom); 106 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 107 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 108 if (ElemTy == MVT::i32) { 109 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 110 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 111 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 112 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 113 } else { 114 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 115 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 116 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 117 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 118 } 119 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 120 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 121 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 122 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 123 setOperationAction(ISD::SELECT, VT, Expand); 124 setOperationAction(ISD::SELECT_CC, VT, Expand); 125 setOperationAction(ISD::VSELECT, VT, Expand); 126 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 127 if (VT.isInteger()) { 128 setOperationAction(ISD::SHL, VT, Custom); 129 setOperationAction(ISD::SRA, VT, Custom); 130 setOperationAction(ISD::SRL, VT, Custom); 131 } 132 133 // Promote all bit-wise operations. 134 if (VT.isInteger() && VT != PromotedBitwiseVT) { 135 setOperationAction(ISD::AND, VT, Promote); 136 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 137 setOperationAction(ISD::OR, VT, Promote); 138 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 139 setOperationAction(ISD::XOR, VT, Promote); 140 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 141 } 142 143 // Neon does not support vector divide/remainder operations. 144 setOperationAction(ISD::SDIV, VT, Expand); 145 setOperationAction(ISD::UDIV, VT, Expand); 146 setOperationAction(ISD::FDIV, VT, Expand); 147 setOperationAction(ISD::SREM, VT, Expand); 148 setOperationAction(ISD::UREM, VT, Expand); 149 setOperationAction(ISD::FREM, VT, Expand); 150} 151 152void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 153 addRegisterClass(VT, &ARM::DPRRegClass); 154 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 155} 156 157void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 158 addRegisterClass(VT, &ARM::QPRRegClass); 159 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 160} 161 162static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 163 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 164 return new TargetLoweringObjectFileMachO(); 165 166 return new ARMElfTargetObjectFile(); 167} 168 169ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 170 : TargetLowering(TM, createTLOF(TM)) { 171 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 172 RegInfo = TM.getRegisterInfo(); 173 Itins = TM.getInstrItineraryData(); 174 175 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 176 177 if (Subtarget->isTargetDarwin()) { 178 // Uses VFP for Thumb libfuncs if available. 179 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 180 // Single-precision floating-point arithmetic. 181 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 182 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 183 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 184 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 185 186 // Double-precision floating-point arithmetic. 187 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 188 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 189 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 190 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 191 192 // Single-precision comparisons. 193 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 194 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 195 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 196 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 197 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 198 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 199 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 200 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 201 202 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 210 211 // Double-precision comparisons. 212 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 213 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 214 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 215 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 216 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 217 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 218 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 219 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 220 221 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 222 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 223 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 224 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 225 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 226 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 227 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 228 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 229 230 // Floating-point to integer conversions. 231 // i64 conversions are done via library routines even when generating VFP 232 // instructions, so use the same ones. 233 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 234 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 235 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 236 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 237 238 // Conversions between floating types. 239 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 240 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 241 242 // Integer to floating-point conversions. 243 // i64 conversions are done via library routines even when generating VFP 244 // instructions, so use the same ones. 245 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 246 // e.g., __floatunsidf vs. __floatunssidfvfp. 247 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 248 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 249 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 250 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 251 } 252 } 253 254 // These libcalls are not available in 32-bit. 255 setLibcallName(RTLIB::SHL_I128, 0); 256 setLibcallName(RTLIB::SRL_I128, 0); 257 setLibcallName(RTLIB::SRA_I128, 0); 258 259 if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetDarwin()) { 260 // Double-precision floating-point arithmetic helper functions 261 // RTABI chapter 4.1.2, Table 2 262 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 263 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 264 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 265 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 266 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 267 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 268 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 269 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 270 271 // Double-precision floating-point comparison helper functions 272 // RTABI chapter 4.1.2, Table 3 273 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 274 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 275 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 276 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 277 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 278 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 279 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 280 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 281 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 282 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 283 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 284 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 285 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 286 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 287 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 288 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 289 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 290 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 291 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 292 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 293 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 294 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 295 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 297 298 // Single-precision floating-point arithmetic helper functions 299 // RTABI chapter 4.1.2, Table 4 300 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 301 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 302 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 303 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 304 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 305 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 306 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 307 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 308 309 // Single-precision floating-point comparison helper functions 310 // RTABI chapter 4.1.2, Table 5 311 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 312 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 313 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 314 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 315 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 316 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 317 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 318 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 319 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 320 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 321 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 322 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 323 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 324 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 325 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 326 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 327 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 335 336 // Floating-point to integer conversions. 337 // RTABI chapter 4.1.2, Table 6 338 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 339 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 340 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 341 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 342 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 343 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 344 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 345 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 346 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 347 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 348 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 349 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 350 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 351 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 352 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 353 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 354 355 // Conversions between floating types. 356 // RTABI chapter 4.1.2, Table 7 357 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 358 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 359 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 361 362 // Integer to floating-point conversions. 363 // RTABI chapter 4.1.2, Table 8 364 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 365 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 366 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 367 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 368 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 369 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 370 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 371 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 372 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 373 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 376 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 377 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 378 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 380 381 // Long long helper functions 382 // RTABI chapter 4.2, Table 9 383 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 384 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 385 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 386 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 387 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 388 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 391 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 392 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 393 394 // Integer division functions 395 // RTABI chapter 4.3.1 396 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 397 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 398 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 399 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 400 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 401 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 402 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 403 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 404 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 405 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 406 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 407 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 408 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 409 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 410 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 411 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 412 413 // Memory operations 414 // RTABI chapter 4.3.4 415 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 416 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 417 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 418 setLibcallCallingConv(RTLIB::MEMCPY, CallingConv::ARM_AAPCS); 419 setLibcallCallingConv(RTLIB::MEMMOVE, CallingConv::ARM_AAPCS); 420 setLibcallCallingConv(RTLIB::MEMSET, CallingConv::ARM_AAPCS); 421 } 422 423 // Use divmod compiler-rt calls for iOS 5.0 and later. 424 if (Subtarget->getTargetTriple().getOS() == Triple::IOS && 425 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 426 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 427 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 428 } 429 430 if (Subtarget->isThumb1Only()) 431 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 432 else 433 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 434 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 435 !Subtarget->isThumb1Only()) { 436 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 437 if (!Subtarget->isFPOnlySP()) 438 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 439 440 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 441 } 442 443 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 444 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 445 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 446 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 447 setTruncStoreAction((MVT::SimpleValueType)VT, 448 (MVT::SimpleValueType)InnerVT, Expand); 449 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 450 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 451 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 452 } 453 454 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 455 456 if (Subtarget->hasNEON()) { 457 addDRTypeForNEON(MVT::v2f32); 458 addDRTypeForNEON(MVT::v8i8); 459 addDRTypeForNEON(MVT::v4i16); 460 addDRTypeForNEON(MVT::v2i32); 461 addDRTypeForNEON(MVT::v1i64); 462 463 addQRTypeForNEON(MVT::v4f32); 464 addQRTypeForNEON(MVT::v2f64); 465 addQRTypeForNEON(MVT::v16i8); 466 addQRTypeForNEON(MVT::v8i16); 467 addQRTypeForNEON(MVT::v4i32); 468 addQRTypeForNEON(MVT::v2i64); 469 470 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 471 // neither Neon nor VFP support any arithmetic operations on it. 472 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 473 // supported for v4f32. 474 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 475 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 476 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 477 // FIXME: Code duplication: FDIV and FREM are expanded always, see 478 // ARMTargetLowering::addTypeForNEON method for details. 479 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 480 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 481 // FIXME: Create unittest. 482 // In another words, find a way when "copysign" appears in DAG with vector 483 // operands. 484 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 485 // FIXME: Code duplication: SETCC has custom operation action, see 486 // ARMTargetLowering::addTypeForNEON method for details. 487 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 488 // FIXME: Create unittest for FNEG and for FABS. 489 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 490 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 491 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 492 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 493 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 494 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 495 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 496 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 497 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 498 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 499 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 500 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 501 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 502 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 503 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 504 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 505 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 506 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 507 508 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 509 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 510 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 511 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 512 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 513 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 514 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 515 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 516 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 517 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 518 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 519 520 // Neon does not support some operations on v1i64 and v2i64 types. 521 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 522 // Custom handling for some quad-vector types to detect VMULL. 523 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 524 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 525 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 526 // Custom handling for some vector types to avoid expensive expansions 527 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 528 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 529 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 530 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 531 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 532 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 533 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 534 // a destination type that is wider than the source, and nor does 535 // it have a FP_TO_[SU]INT instruction with a narrower destination than 536 // source. 537 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 538 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 539 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 540 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 541 542 setTargetDAGCombine(ISD::INTRINSIC_VOID); 543 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 544 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 545 setTargetDAGCombine(ISD::SHL); 546 setTargetDAGCombine(ISD::SRL); 547 setTargetDAGCombine(ISD::SRA); 548 setTargetDAGCombine(ISD::SIGN_EXTEND); 549 setTargetDAGCombine(ISD::ZERO_EXTEND); 550 setTargetDAGCombine(ISD::ANY_EXTEND); 551 setTargetDAGCombine(ISD::SELECT_CC); 552 setTargetDAGCombine(ISD::BUILD_VECTOR); 553 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 554 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 555 setTargetDAGCombine(ISD::STORE); 556 setTargetDAGCombine(ISD::FP_TO_SINT); 557 setTargetDAGCombine(ISD::FP_TO_UINT); 558 setTargetDAGCombine(ISD::FDIV); 559 560 // It is legal to extload from v4i8 to v4i16 or v4i32. 561 MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8, 562 MVT::v4i16, MVT::v2i16, 563 MVT::v2i32}; 564 for (unsigned i = 0; i < 6; ++i) { 565 setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal); 566 setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal); 567 setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal); 568 } 569 } 570 571 // ARM and Thumb2 support UMLAL/SMLAL. 572 if (!Subtarget->isThumb1Only()) 573 setTargetDAGCombine(ISD::ADDC); 574 575 576 computeRegisterProperties(); 577 578 // ARM does not have f32 extending load. 579 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 580 581 // ARM does not have i1 sign extending load. 582 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 583 584 // ARM supports all 4 flavors of integer indexed load / store. 585 if (!Subtarget->isThumb1Only()) { 586 for (unsigned im = (unsigned)ISD::PRE_INC; 587 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 588 setIndexedLoadAction(im, MVT::i1, Legal); 589 setIndexedLoadAction(im, MVT::i8, Legal); 590 setIndexedLoadAction(im, MVT::i16, Legal); 591 setIndexedLoadAction(im, MVT::i32, Legal); 592 setIndexedStoreAction(im, MVT::i1, Legal); 593 setIndexedStoreAction(im, MVT::i8, Legal); 594 setIndexedStoreAction(im, MVT::i16, Legal); 595 setIndexedStoreAction(im, MVT::i32, Legal); 596 } 597 } 598 599 // i64 operation support. 600 setOperationAction(ISD::MUL, MVT::i64, Expand); 601 setOperationAction(ISD::MULHU, MVT::i32, Expand); 602 if (Subtarget->isThumb1Only()) { 603 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 604 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 605 } 606 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 607 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 608 setOperationAction(ISD::MULHS, MVT::i32, Expand); 609 610 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 611 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 612 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 613 setOperationAction(ISD::SRL, MVT::i64, Custom); 614 setOperationAction(ISD::SRA, MVT::i64, Custom); 615 616 if (!Subtarget->isThumb1Only()) { 617 // FIXME: We should do this for Thumb1 as well. 618 setOperationAction(ISD::ADDC, MVT::i32, Custom); 619 setOperationAction(ISD::ADDE, MVT::i32, Custom); 620 setOperationAction(ISD::SUBC, MVT::i32, Custom); 621 setOperationAction(ISD::SUBE, MVT::i32, Custom); 622 } 623 624 // ARM does not have ROTL. 625 setOperationAction(ISD::ROTL, MVT::i32, Expand); 626 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 627 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 628 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 629 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 630 631 // These just redirect to CTTZ and CTLZ on ARM. 632 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand); 633 setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand); 634 635 // Only ARMv6 has BSWAP. 636 if (!Subtarget->hasV6Ops()) 637 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 638 639 if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) && 640 !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) { 641 // These are expanded into libcalls if the cpu doesn't have HW divider. 642 setOperationAction(ISD::SDIV, MVT::i32, Expand); 643 setOperationAction(ISD::UDIV, MVT::i32, Expand); 644 } 645 setOperationAction(ISD::SREM, MVT::i32, Expand); 646 setOperationAction(ISD::UREM, MVT::i32, Expand); 647 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 648 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 649 650 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 651 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 652 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 653 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 654 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 655 656 setOperationAction(ISD::TRAP, MVT::Other, Legal); 657 658 // Use the default implementation. 659 setOperationAction(ISD::VASTART, MVT::Other, Custom); 660 setOperationAction(ISD::VAARG, MVT::Other, Expand); 661 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 662 setOperationAction(ISD::VAEND, MVT::Other, Expand); 663 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 664 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 665 666 if (!Subtarget->isTargetDarwin()) { 667 // Non-Darwin platforms may return values in these registers via the 668 // personality function. 669 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 670 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 671 setExceptionPointerRegister(ARM::R0); 672 setExceptionSelectorRegister(ARM::R1); 673 } 674 675 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 676 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 677 // the default expansion. 678 // FIXME: This should be checking for v6k, not just v6. 679 if (Subtarget->hasDataBarrier() || 680 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 681 // membarrier needs custom lowering; the rest are legal and handled 682 // normally. 683 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 684 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 685 // Custom lowering for 64-bit ops 686 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 687 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 688 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 689 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 690 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 691 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 692 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 693 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 694 setInsertFencesForAtomic(true); 695 } else { 696 // Set them all for expansion, which will force libcalls. 697 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 698 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 699 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 700 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 701 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 702 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 703 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 704 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 705 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 706 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 707 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 708 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 709 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 710 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 711 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 712 // Unordered/Monotonic case. 713 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 714 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 715 // Since the libcalls include locking, fold in the fences 716 setShouldFoldAtomicFences(true); 717 } 718 719 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 720 721 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 722 if (!Subtarget->hasV6Ops()) { 723 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 724 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 725 } 726 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 727 728 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 729 !Subtarget->isThumb1Only()) { 730 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 731 // iff target supports vfp2. 732 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 733 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 734 } 735 736 // We want to custom lower some of our intrinsics. 737 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 738 if (Subtarget->isTargetDarwin()) { 739 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 740 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 741 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 742 } 743 744 setOperationAction(ISD::SETCC, MVT::i32, Expand); 745 setOperationAction(ISD::SETCC, MVT::f32, Expand); 746 setOperationAction(ISD::SETCC, MVT::f64, Expand); 747 setOperationAction(ISD::SELECT, MVT::i32, Custom); 748 setOperationAction(ISD::SELECT, MVT::f32, Custom); 749 setOperationAction(ISD::SELECT, MVT::f64, Custom); 750 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 751 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 752 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 753 754 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 755 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 756 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 757 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 758 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 759 760 // We don't support sin/cos/fmod/copysign/pow 761 setOperationAction(ISD::FSIN, MVT::f64, Expand); 762 setOperationAction(ISD::FSIN, MVT::f32, Expand); 763 setOperationAction(ISD::FCOS, MVT::f32, Expand); 764 setOperationAction(ISD::FCOS, MVT::f64, Expand); 765 setOperationAction(ISD::FREM, MVT::f64, Expand); 766 setOperationAction(ISD::FREM, MVT::f32, Expand); 767 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 768 !Subtarget->isThumb1Only()) { 769 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 770 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 771 } 772 setOperationAction(ISD::FPOW, MVT::f64, Expand); 773 setOperationAction(ISD::FPOW, MVT::f32, Expand); 774 775 if (!Subtarget->hasVFP4()) { 776 setOperationAction(ISD::FMA, MVT::f64, Expand); 777 setOperationAction(ISD::FMA, MVT::f32, Expand); 778 } 779 780 // Various VFP goodness 781 if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) { 782 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 783 if (Subtarget->hasVFP2()) { 784 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 785 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 786 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 787 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 788 } 789 // Special handling for half-precision FP. 790 if (!Subtarget->hasFP16()) { 791 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 792 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 793 } 794 } 795 796 // We have target-specific dag combine patterns for the following nodes: 797 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 798 setTargetDAGCombine(ISD::ADD); 799 setTargetDAGCombine(ISD::SUB); 800 setTargetDAGCombine(ISD::MUL); 801 setTargetDAGCombine(ISD::AND); 802 setTargetDAGCombine(ISD::OR); 803 setTargetDAGCombine(ISD::XOR); 804 805 if (Subtarget->hasV6Ops()) 806 setTargetDAGCombine(ISD::SRL); 807 808 setStackPointerRegisterToSaveRestore(ARM::SP); 809 810 if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() || 811 !Subtarget->hasVFP2()) 812 setSchedulingPreference(Sched::RegPressure); 813 else 814 setSchedulingPreference(Sched::Hybrid); 815 816 //// temporary - rewrite interface to use type 817 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 818 maxStoresPerMemset = 16; 819 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 820 821 // On ARM arguments smaller than 4 bytes are extended, so all arguments 822 // are at least 4 bytes aligned. 823 setMinStackArgumentAlignment(4); 824 825 benefitFromCodePlacementOpt = true; 826 827 // Prefer likely predicted branches to selects on out-of-order cores. 828 predictableSelectIsExpensive = Subtarget->isLikeA9(); 829 830 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 831} 832 833// FIXME: It might make sense to define the representative register class as the 834// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 835// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 836// SPR's representative would be DPR_VFP2. This should work well if register 837// pressure tracking were modified such that a register use would increment the 838// pressure of the register class's representative and all of it's super 839// classes' representatives transitively. We have not implemented this because 840// of the difficulty prior to coalescing of modeling operand register classes 841// due to the common occurrence of cross class copies and subregister insertions 842// and extractions. 843std::pair<const TargetRegisterClass*, uint8_t> 844ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 845 const TargetRegisterClass *RRC = 0; 846 uint8_t Cost = 1; 847 switch (VT.getSimpleVT().SimpleTy) { 848 default: 849 return TargetLowering::findRepresentativeClass(VT); 850 // Use DPR as representative register class for all floating point 851 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 852 // the cost is 1 for both f32 and f64. 853 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 854 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 855 RRC = &ARM::DPRRegClass; 856 // When NEON is used for SP, only half of the register file is available 857 // because operations that define both SP and DP results will be constrained 858 // to the VFP2 class (D0-D15). We currently model this constraint prior to 859 // coalescing by double-counting the SP regs. See the FIXME above. 860 if (Subtarget->useNEONForSinglePrecisionFP()) 861 Cost = 2; 862 break; 863 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 864 case MVT::v4f32: case MVT::v2f64: 865 RRC = &ARM::DPRRegClass; 866 Cost = 2; 867 break; 868 case MVT::v4i64: 869 RRC = &ARM::DPRRegClass; 870 Cost = 4; 871 break; 872 case MVT::v8i64: 873 RRC = &ARM::DPRRegClass; 874 Cost = 8; 875 break; 876 } 877 return std::make_pair(RRC, Cost); 878} 879 880const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 881 switch (Opcode) { 882 default: return 0; 883 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 884 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 885 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 886 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 887 case ARMISD::CALL: return "ARMISD::CALL"; 888 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 889 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 890 case ARMISD::tCALL: return "ARMISD::tCALL"; 891 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 892 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 893 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 894 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 895 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 896 case ARMISD::CMP: return "ARMISD::CMP"; 897 case ARMISD::CMN: return "ARMISD::CMN"; 898 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 899 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 900 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 901 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 902 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 903 904 case ARMISD::CMOV: return "ARMISD::CMOV"; 905 906 case ARMISD::RBIT: return "ARMISD::RBIT"; 907 908 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 909 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 910 case ARMISD::SITOF: return "ARMISD::SITOF"; 911 case ARMISD::UITOF: return "ARMISD::UITOF"; 912 913 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 914 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 915 case ARMISD::RRX: return "ARMISD::RRX"; 916 917 case ARMISD::ADDC: return "ARMISD::ADDC"; 918 case ARMISD::ADDE: return "ARMISD::ADDE"; 919 case ARMISD::SUBC: return "ARMISD::SUBC"; 920 case ARMISD::SUBE: return "ARMISD::SUBE"; 921 922 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 923 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 924 925 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 926 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 927 928 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 929 930 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 931 932 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 933 934 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 935 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 936 937 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 938 939 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 940 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 941 case ARMISD::VCGE: return "ARMISD::VCGE"; 942 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 943 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 944 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 945 case ARMISD::VCGT: return "ARMISD::VCGT"; 946 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 947 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 948 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 949 case ARMISD::VTST: return "ARMISD::VTST"; 950 951 case ARMISD::VSHL: return "ARMISD::VSHL"; 952 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 953 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 954 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 955 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 956 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 957 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 958 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 959 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 960 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 961 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 962 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 963 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 964 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 965 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 966 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 967 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 968 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 969 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 970 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 971 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 972 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 973 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 974 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 975 case ARMISD::VDUP: return "ARMISD::VDUP"; 976 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 977 case ARMISD::VEXT: return "ARMISD::VEXT"; 978 case ARMISD::VREV64: return "ARMISD::VREV64"; 979 case ARMISD::VREV32: return "ARMISD::VREV32"; 980 case ARMISD::VREV16: return "ARMISD::VREV16"; 981 case ARMISD::VZIP: return "ARMISD::VZIP"; 982 case ARMISD::VUZP: return "ARMISD::VUZP"; 983 case ARMISD::VTRN: return "ARMISD::VTRN"; 984 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 985 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 986 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 987 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 988 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 989 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 990 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 991 case ARMISD::FMAX: return "ARMISD::FMAX"; 992 case ARMISD::FMIN: return "ARMISD::FMIN"; 993 case ARMISD::BFI: return "ARMISD::BFI"; 994 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 995 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 996 case ARMISD::VBSL: return "ARMISD::VBSL"; 997 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 998 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 999 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1000 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1001 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1002 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1003 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1004 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1005 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1006 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1007 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1008 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1009 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1010 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1011 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1012 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1013 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1014 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1015 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1016 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1017 } 1018} 1019 1020EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 1021 if (!VT.isVector()) return getPointerTy(); 1022 return VT.changeVectorElementTypeToInteger(); 1023} 1024 1025/// getRegClassFor - Return the register class that should be used for the 1026/// specified value type. 1027const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 1028 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1029 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1030 // load / store 4 to 8 consecutive D registers. 1031 if (Subtarget->hasNEON()) { 1032 if (VT == MVT::v4i64) 1033 return &ARM::QQPRRegClass; 1034 if (VT == MVT::v8i64) 1035 return &ARM::QQQQPRRegClass; 1036 } 1037 return TargetLowering::getRegClassFor(VT); 1038} 1039 1040// Create a fast isel object. 1041FastISel * 1042ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1043 const TargetLibraryInfo *libInfo) const { 1044 return ARM::createFastISel(funcInfo, libInfo); 1045} 1046 1047/// getMaximalGlobalOffset - Returns the maximal possible offset which can 1048/// be used for loads / stores from the global. 1049unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 1050 return (Subtarget->isThumb1Only() ? 127 : 4095); 1051} 1052 1053Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1054 unsigned NumVals = N->getNumValues(); 1055 if (!NumVals) 1056 return Sched::RegPressure; 1057 1058 for (unsigned i = 0; i != NumVals; ++i) { 1059 EVT VT = N->getValueType(i); 1060 if (VT == MVT::Glue || VT == MVT::Other) 1061 continue; 1062 if (VT.isFloatingPoint() || VT.isVector()) 1063 return Sched::ILP; 1064 } 1065 1066 if (!N->isMachineOpcode()) 1067 return Sched::RegPressure; 1068 1069 // Load are scheduled for latency even if there instruction itinerary 1070 // is not available. 1071 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1072 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1073 1074 if (MCID.getNumDefs() == 0) 1075 return Sched::RegPressure; 1076 if (!Itins->isEmpty() && 1077 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1078 return Sched::ILP; 1079 1080 return Sched::RegPressure; 1081} 1082 1083//===----------------------------------------------------------------------===// 1084// Lowering Code 1085//===----------------------------------------------------------------------===// 1086 1087/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1088static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1089 switch (CC) { 1090 default: llvm_unreachable("Unknown condition code!"); 1091 case ISD::SETNE: return ARMCC::NE; 1092 case ISD::SETEQ: return ARMCC::EQ; 1093 case ISD::SETGT: return ARMCC::GT; 1094 case ISD::SETGE: return ARMCC::GE; 1095 case ISD::SETLT: return ARMCC::LT; 1096 case ISD::SETLE: return ARMCC::LE; 1097 case ISD::SETUGT: return ARMCC::HI; 1098 case ISD::SETUGE: return ARMCC::HS; 1099 case ISD::SETULT: return ARMCC::LO; 1100 case ISD::SETULE: return ARMCC::LS; 1101 } 1102} 1103 1104/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1105static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1106 ARMCC::CondCodes &CondCode2) { 1107 CondCode2 = ARMCC::AL; 1108 switch (CC) { 1109 default: llvm_unreachable("Unknown FP condition!"); 1110 case ISD::SETEQ: 1111 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1112 case ISD::SETGT: 1113 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1114 case ISD::SETGE: 1115 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1116 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1117 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1118 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1119 case ISD::SETO: CondCode = ARMCC::VC; break; 1120 case ISD::SETUO: CondCode = ARMCC::VS; break; 1121 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1122 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1123 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1124 case ISD::SETLT: 1125 case ISD::SETULT: CondCode = ARMCC::LT; break; 1126 case ISD::SETLE: 1127 case ISD::SETULE: CondCode = ARMCC::LE; break; 1128 case ISD::SETNE: 1129 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1130 } 1131} 1132 1133//===----------------------------------------------------------------------===// 1134// Calling Convention Implementation 1135//===----------------------------------------------------------------------===// 1136 1137#include "ARMGenCallingConv.inc" 1138 1139/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1140/// given CallingConvention value. 1141CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1142 bool Return, 1143 bool isVarArg) const { 1144 switch (CC) { 1145 default: 1146 llvm_unreachable("Unsupported calling convention"); 1147 case CallingConv::Fast: 1148 if (Subtarget->hasVFP2() && !isVarArg) { 1149 if (!Subtarget->isAAPCS_ABI()) 1150 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1151 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1152 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1153 } 1154 // Fallthrough 1155 case CallingConv::C: { 1156 // Use target triple & subtarget features to do actual dispatch. 1157 if (!Subtarget->isAAPCS_ABI()) 1158 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1159 else if (Subtarget->hasVFP2() && 1160 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1161 !isVarArg) 1162 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1163 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1164 } 1165 case CallingConv::ARM_AAPCS_VFP: 1166 if (!isVarArg) 1167 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1168 // Fallthrough 1169 case CallingConv::ARM_AAPCS: 1170 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1171 case CallingConv::ARM_APCS: 1172 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1173 case CallingConv::GHC: 1174 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1175 } 1176} 1177 1178/// LowerCallResult - Lower the result values of a call into the 1179/// appropriate copies out of appropriate physical registers. 1180SDValue 1181ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1182 CallingConv::ID CallConv, bool isVarArg, 1183 const SmallVectorImpl<ISD::InputArg> &Ins, 1184 DebugLoc dl, SelectionDAG &DAG, 1185 SmallVectorImpl<SDValue> &InVals) const { 1186 1187 // Assign locations to each value returned by this call. 1188 SmallVector<CCValAssign, 16> RVLocs; 1189 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1190 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1191 CCInfo.AnalyzeCallResult(Ins, 1192 CCAssignFnForNode(CallConv, /* Return*/ true, 1193 isVarArg)); 1194 1195 // Copy all of the result registers out of their specified physreg. 1196 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1197 CCValAssign VA = RVLocs[i]; 1198 1199 SDValue Val; 1200 if (VA.needsCustom()) { 1201 // Handle f64 or half of a v2f64. 1202 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1203 InFlag); 1204 Chain = Lo.getValue(1); 1205 InFlag = Lo.getValue(2); 1206 VA = RVLocs[++i]; // skip ahead to next loc 1207 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1208 InFlag); 1209 Chain = Hi.getValue(1); 1210 InFlag = Hi.getValue(2); 1211 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1212 1213 if (VA.getLocVT() == MVT::v2f64) { 1214 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1215 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1216 DAG.getConstant(0, MVT::i32)); 1217 1218 VA = RVLocs[++i]; // skip ahead to next loc 1219 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1220 Chain = Lo.getValue(1); 1221 InFlag = Lo.getValue(2); 1222 VA = RVLocs[++i]; // skip ahead to next loc 1223 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1224 Chain = Hi.getValue(1); 1225 InFlag = Hi.getValue(2); 1226 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1227 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1228 DAG.getConstant(1, MVT::i32)); 1229 } 1230 } else { 1231 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1232 InFlag); 1233 Chain = Val.getValue(1); 1234 InFlag = Val.getValue(2); 1235 } 1236 1237 switch (VA.getLocInfo()) { 1238 default: llvm_unreachable("Unknown loc info!"); 1239 case CCValAssign::Full: break; 1240 case CCValAssign::BCvt: 1241 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1242 break; 1243 } 1244 1245 InVals.push_back(Val); 1246 } 1247 1248 return Chain; 1249} 1250 1251/// LowerMemOpCallTo - Store the argument to the stack. 1252SDValue 1253ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1254 SDValue StackPtr, SDValue Arg, 1255 DebugLoc dl, SelectionDAG &DAG, 1256 const CCValAssign &VA, 1257 ISD::ArgFlagsTy Flags) const { 1258 unsigned LocMemOffset = VA.getLocMemOffset(); 1259 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1260 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1261 return DAG.getStore(Chain, dl, Arg, PtrOff, 1262 MachinePointerInfo::getStack(LocMemOffset), 1263 false, false, 0); 1264} 1265 1266void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1267 SDValue Chain, SDValue &Arg, 1268 RegsToPassVector &RegsToPass, 1269 CCValAssign &VA, CCValAssign &NextVA, 1270 SDValue &StackPtr, 1271 SmallVector<SDValue, 8> &MemOpChains, 1272 ISD::ArgFlagsTy Flags) const { 1273 1274 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1275 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1276 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1277 1278 if (NextVA.isRegLoc()) 1279 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1280 else { 1281 assert(NextVA.isMemLoc()); 1282 if (StackPtr.getNode() == 0) 1283 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1284 1285 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1286 dl, DAG, NextVA, 1287 Flags)); 1288 } 1289} 1290 1291/// LowerCall - Lowering a call into a callseq_start <- 1292/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1293/// nodes. 1294SDValue 1295ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1296 SmallVectorImpl<SDValue> &InVals) const { 1297 SelectionDAG &DAG = CLI.DAG; 1298 DebugLoc &dl = CLI.DL; 1299 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 1300 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 1301 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 1302 SDValue Chain = CLI.Chain; 1303 SDValue Callee = CLI.Callee; 1304 bool &isTailCall = CLI.IsTailCall; 1305 CallingConv::ID CallConv = CLI.CallConv; 1306 bool doesNotRet = CLI.DoesNotReturn; 1307 bool isVarArg = CLI.IsVarArg; 1308 1309 MachineFunction &MF = DAG.getMachineFunction(); 1310 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1311 bool IsSibCall = false; 1312 // Disable tail calls if they're not supported. 1313 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 1314 isTailCall = false; 1315 if (isTailCall) { 1316 // Check if it's really possible to do a tail call. 1317 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1318 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1319 Outs, OutVals, Ins, DAG); 1320 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1321 // detected sibcalls. 1322 if (isTailCall) { 1323 ++NumTailCalls; 1324 IsSibCall = true; 1325 } 1326 } 1327 1328 // Analyze operands of the call, assigning locations to each operand. 1329 SmallVector<CCValAssign, 16> ArgLocs; 1330 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1331 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1332 CCInfo.AnalyzeCallOperands(Outs, 1333 CCAssignFnForNode(CallConv, /* Return*/ false, 1334 isVarArg)); 1335 1336 // Get a count of how many bytes are to be pushed on the stack. 1337 unsigned NumBytes = CCInfo.getNextStackOffset(); 1338 1339 // For tail calls, memory operands are available in our caller's stack. 1340 if (IsSibCall) 1341 NumBytes = 0; 1342 1343 // Adjust the stack pointer for the new arguments... 1344 // These operations are automatically eliminated by the prolog/epilog pass 1345 if (!IsSibCall) 1346 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1347 1348 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1349 1350 RegsToPassVector RegsToPass; 1351 SmallVector<SDValue, 8> MemOpChains; 1352 1353 // Walk the register/memloc assignments, inserting copies/loads. In the case 1354 // of tail call optimization, arguments are handled later. 1355 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1356 i != e; 1357 ++i, ++realArgIdx) { 1358 CCValAssign &VA = ArgLocs[i]; 1359 SDValue Arg = OutVals[realArgIdx]; 1360 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1361 bool isByVal = Flags.isByVal(); 1362 1363 // Promote the value if needed. 1364 switch (VA.getLocInfo()) { 1365 default: llvm_unreachable("Unknown loc info!"); 1366 case CCValAssign::Full: break; 1367 case CCValAssign::SExt: 1368 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1369 break; 1370 case CCValAssign::ZExt: 1371 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1372 break; 1373 case CCValAssign::AExt: 1374 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1375 break; 1376 case CCValAssign::BCvt: 1377 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1378 break; 1379 } 1380 1381 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1382 if (VA.needsCustom()) { 1383 if (VA.getLocVT() == MVT::v2f64) { 1384 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1385 DAG.getConstant(0, MVT::i32)); 1386 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1387 DAG.getConstant(1, MVT::i32)); 1388 1389 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1390 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1391 1392 VA = ArgLocs[++i]; // skip ahead to next loc 1393 if (VA.isRegLoc()) { 1394 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1395 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1396 } else { 1397 assert(VA.isMemLoc()); 1398 1399 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1400 dl, DAG, VA, Flags)); 1401 } 1402 } else { 1403 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1404 StackPtr, MemOpChains, Flags); 1405 } 1406 } else if (VA.isRegLoc()) { 1407 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1408 } else if (isByVal) { 1409 assert(VA.isMemLoc()); 1410 unsigned offset = 0; 1411 1412 // True if this byval aggregate will be split between registers 1413 // and memory. 1414 if (CCInfo.isFirstByValRegValid()) { 1415 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1416 unsigned int i, j; 1417 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1418 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1419 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1420 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1421 MachinePointerInfo(), 1422 false, false, false, 0); 1423 MemOpChains.push_back(Load.getValue(1)); 1424 RegsToPass.push_back(std::make_pair(j, Load)); 1425 } 1426 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1427 CCInfo.clearFirstByValReg(); 1428 } 1429 1430 if (Flags.getByValSize() - 4*offset > 0) { 1431 unsigned LocMemOffset = VA.getLocMemOffset(); 1432 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1433 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1434 StkPtrOff); 1435 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1436 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1437 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1438 MVT::i32); 1439 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32); 1440 1441 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1442 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1443 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1444 Ops, array_lengthof(Ops))); 1445 } 1446 } else if (!IsSibCall) { 1447 assert(VA.isMemLoc()); 1448 1449 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1450 dl, DAG, VA, Flags)); 1451 } 1452 } 1453 1454 if (!MemOpChains.empty()) 1455 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1456 &MemOpChains[0], MemOpChains.size()); 1457 1458 // Build a sequence of copy-to-reg nodes chained together with token chain 1459 // and flag operands which copy the outgoing args into the appropriate regs. 1460 SDValue InFlag; 1461 // Tail call byval lowering might overwrite argument registers so in case of 1462 // tail call optimization the copies to registers are lowered later. 1463 if (!isTailCall) 1464 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1465 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1466 RegsToPass[i].second, InFlag); 1467 InFlag = Chain.getValue(1); 1468 } 1469 1470 // For tail calls lower the arguments to the 'real' stack slot. 1471 if (isTailCall) { 1472 // Force all the incoming stack arguments to be loaded from the stack 1473 // before any new outgoing arguments are stored to the stack, because the 1474 // outgoing stack slots may alias the incoming argument stack slots, and 1475 // the alias isn't otherwise explicit. This is slightly more conservative 1476 // than necessary, because it means that each store effectively depends 1477 // on every argument instead of just those arguments it would clobber. 1478 1479 // Do not flag preceding copytoreg stuff together with the following stuff. 1480 InFlag = SDValue(); 1481 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1482 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1483 RegsToPass[i].second, InFlag); 1484 InFlag = Chain.getValue(1); 1485 } 1486 InFlag =SDValue(); 1487 } 1488 1489 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1490 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1491 // node so that legalize doesn't hack it. 1492 bool isDirect = false; 1493 bool isARMFunc = false; 1494 bool isLocalARMFunc = false; 1495 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1496 1497 if (EnableARMLongCalls) { 1498 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1499 && "long-calls with non-static relocation model!"); 1500 // Handle a global address or an external symbol. If it's not one of 1501 // those, the target's already in a register, so we don't need to do 1502 // anything extra. 1503 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1504 const GlobalValue *GV = G->getGlobal(); 1505 // Create a constant pool entry for the callee address 1506 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1507 ARMConstantPoolValue *CPV = 1508 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1509 1510 // Get the address of the callee into a register 1511 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1512 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1513 Callee = DAG.getLoad(getPointerTy(), dl, 1514 DAG.getEntryNode(), CPAddr, 1515 MachinePointerInfo::getConstantPool(), 1516 false, false, false, 0); 1517 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1518 const char *Sym = S->getSymbol(); 1519 1520 // Create a constant pool entry for the callee address 1521 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1522 ARMConstantPoolValue *CPV = 1523 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1524 ARMPCLabelIndex, 0); 1525 // Get the address of the callee into a register 1526 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1527 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1528 Callee = DAG.getLoad(getPointerTy(), dl, 1529 DAG.getEntryNode(), CPAddr, 1530 MachinePointerInfo::getConstantPool(), 1531 false, false, false, 0); 1532 } 1533 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1534 const GlobalValue *GV = G->getGlobal(); 1535 isDirect = true; 1536 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1537 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1538 getTargetMachine().getRelocationModel() != Reloc::Static; 1539 isARMFunc = !Subtarget->isThumb() || isStub; 1540 // ARM call to a local ARM function is predicable. 1541 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1542 // tBX takes a register source operand. 1543 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1544 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1545 ARMConstantPoolValue *CPV = 1546 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1547 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1548 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1549 Callee = DAG.getLoad(getPointerTy(), dl, 1550 DAG.getEntryNode(), CPAddr, 1551 MachinePointerInfo::getConstantPool(), 1552 false, false, false, 0); 1553 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1554 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1555 getPointerTy(), Callee, PICLabel); 1556 } else { 1557 // On ELF targets for PIC code, direct calls should go through the PLT 1558 unsigned OpFlags = 0; 1559 if (Subtarget->isTargetELF() && 1560 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1561 OpFlags = ARMII::MO_PLT; 1562 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1563 } 1564 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1565 isDirect = true; 1566 bool isStub = Subtarget->isTargetDarwin() && 1567 getTargetMachine().getRelocationModel() != Reloc::Static; 1568 isARMFunc = !Subtarget->isThumb() || isStub; 1569 // tBX takes a register source operand. 1570 const char *Sym = S->getSymbol(); 1571 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1572 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1573 ARMConstantPoolValue *CPV = 1574 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1575 ARMPCLabelIndex, 4); 1576 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1577 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1578 Callee = DAG.getLoad(getPointerTy(), dl, 1579 DAG.getEntryNode(), CPAddr, 1580 MachinePointerInfo::getConstantPool(), 1581 false, false, false, 0); 1582 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1583 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1584 getPointerTy(), Callee, PICLabel); 1585 } else { 1586 unsigned OpFlags = 0; 1587 // On ELF targets for PIC code, direct calls should go through the PLT 1588 if (Subtarget->isTargetELF() && 1589 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1590 OpFlags = ARMII::MO_PLT; 1591 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1592 } 1593 } 1594 1595 // FIXME: handle tail calls differently. 1596 unsigned CallOpc; 1597 if (Subtarget->isThumb()) { 1598 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1599 CallOpc = ARMISD::CALL_NOLINK; 1600 else if (doesNotRet && isDirect && !isARMFunc && 1601 Subtarget->hasRAS() && !Subtarget->isThumb1Only()) 1602 // "mov lr, pc; b _foo" to avoid confusing the RSP 1603 CallOpc = ARMISD::CALL_NOLINK; 1604 else 1605 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1606 } else { 1607 if (!isDirect && !Subtarget->hasV5TOps()) { 1608 CallOpc = ARMISD::CALL_NOLINK; 1609 } else if (doesNotRet && isDirect && Subtarget->hasRAS()) 1610 // "mov lr, pc; b _foo" to avoid confusing the RSP 1611 CallOpc = ARMISD::CALL_NOLINK; 1612 else 1613 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 1614 } 1615 1616 std::vector<SDValue> Ops; 1617 Ops.push_back(Chain); 1618 Ops.push_back(Callee); 1619 1620 // Add argument registers to the end of the list so that they are known live 1621 // into the call. 1622 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1623 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1624 RegsToPass[i].second.getValueType())); 1625 1626 // Add a register mask operand representing the call-preserved registers. 1627 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1628 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 1629 assert(Mask && "Missing call preserved mask for calling convention"); 1630 Ops.push_back(DAG.getRegisterMask(Mask)); 1631 1632 if (InFlag.getNode()) 1633 Ops.push_back(InFlag); 1634 1635 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1636 if (isTailCall) 1637 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1638 1639 // Returns a chain and a flag for retval copy to use. 1640 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1641 InFlag = Chain.getValue(1); 1642 1643 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1644 DAG.getIntPtrConstant(0, true), InFlag); 1645 if (!Ins.empty()) 1646 InFlag = Chain.getValue(1); 1647 1648 // Handle result values, copying them out of physregs into vregs that we 1649 // return. 1650 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1651 dl, DAG, InVals); 1652} 1653 1654/// HandleByVal - Every parameter *after* a byval parameter is passed 1655/// on the stack. Remember the next parameter register to allocate, 1656/// and then confiscate the rest of the parameter registers to insure 1657/// this. 1658void 1659ARMTargetLowering::HandleByVal( 1660 CCState *State, unsigned &size, unsigned Align) const { 1661 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1662 assert((State->getCallOrPrologue() == Prologue || 1663 State->getCallOrPrologue() == Call) && 1664 "unhandled ParmContext"); 1665 if ((!State->isFirstByValRegValid()) && 1666 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1667 if (Subtarget->isAAPCS_ABI() && Align > 4) { 1668 unsigned AlignInRegs = Align / 4; 1669 unsigned Waste = (ARM::R4 - reg) % AlignInRegs; 1670 for (unsigned i = 0; i < Waste; ++i) 1671 reg = State->AllocateReg(GPRArgRegs, 4); 1672 } 1673 if (reg != 0) { 1674 State->setFirstByValReg(reg); 1675 // At a call site, a byval parameter that is split between 1676 // registers and memory needs its size truncated here. In a 1677 // function prologue, such byval parameters are reassembled in 1678 // memory, and are not truncated. 1679 if (State->getCallOrPrologue() == Call) { 1680 unsigned excess = 4 * (ARM::R4 - reg); 1681 assert(size >= excess && "expected larger existing stack allocation"); 1682 size -= excess; 1683 } 1684 } 1685 } 1686 // Confiscate any remaining parameter registers to preclude their 1687 // assignment to subsequent parameters. 1688 while (State->AllocateReg(GPRArgRegs, 4)) 1689 ; 1690} 1691 1692/// MatchingStackOffset - Return true if the given stack call argument is 1693/// already available in the same position (relatively) of the caller's 1694/// incoming argument stack. 1695static 1696bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1697 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1698 const TargetInstrInfo *TII) { 1699 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1700 int FI = INT_MAX; 1701 if (Arg.getOpcode() == ISD::CopyFromReg) { 1702 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1703 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1704 return false; 1705 MachineInstr *Def = MRI->getVRegDef(VR); 1706 if (!Def) 1707 return false; 1708 if (!Flags.isByVal()) { 1709 if (!TII->isLoadFromStackSlot(Def, FI)) 1710 return false; 1711 } else { 1712 return false; 1713 } 1714 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1715 if (Flags.isByVal()) 1716 // ByVal argument is passed in as a pointer but it's now being 1717 // dereferenced. e.g. 1718 // define @foo(%struct.X* %A) { 1719 // tail call @bar(%struct.X* byval %A) 1720 // } 1721 return false; 1722 SDValue Ptr = Ld->getBasePtr(); 1723 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1724 if (!FINode) 1725 return false; 1726 FI = FINode->getIndex(); 1727 } else 1728 return false; 1729 1730 assert(FI != INT_MAX); 1731 if (!MFI->isFixedObjectIndex(FI)) 1732 return false; 1733 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1734} 1735 1736/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1737/// for tail call optimization. Targets which want to do tail call 1738/// optimization should implement this function. 1739bool 1740ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1741 CallingConv::ID CalleeCC, 1742 bool isVarArg, 1743 bool isCalleeStructRet, 1744 bool isCallerStructRet, 1745 const SmallVectorImpl<ISD::OutputArg> &Outs, 1746 const SmallVectorImpl<SDValue> &OutVals, 1747 const SmallVectorImpl<ISD::InputArg> &Ins, 1748 SelectionDAG& DAG) const { 1749 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1750 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1751 bool CCMatch = CallerCC == CalleeCC; 1752 1753 // Look for obvious safe cases to perform tail call optimization that do not 1754 // require ABI changes. This is what gcc calls sibcall. 1755 1756 // Do not sibcall optimize vararg calls unless the call site is not passing 1757 // any arguments. 1758 if (isVarArg && !Outs.empty()) 1759 return false; 1760 1761 // Also avoid sibcall optimization if either caller or callee uses struct 1762 // return semantics. 1763 if (isCalleeStructRet || isCallerStructRet) 1764 return false; 1765 1766 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1767 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1768 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1769 // support in the assembler and linker to be used. This would need to be 1770 // fixed to fully support tail calls in Thumb1. 1771 // 1772 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1773 // LR. This means if we need to reload LR, it takes an extra instructions, 1774 // which outweighs the value of the tail call; but here we don't know yet 1775 // whether LR is going to be used. Probably the right approach is to 1776 // generate the tail call here and turn it back into CALL/RET in 1777 // emitEpilogue if LR is used. 1778 1779 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1780 // but we need to make sure there are enough registers; the only valid 1781 // registers are the 4 used for parameters. We don't currently do this 1782 // case. 1783 if (Subtarget->isThumb1Only()) 1784 return false; 1785 1786 // If the calling conventions do not match, then we'd better make sure the 1787 // results are returned in the same way as what the caller expects. 1788 if (!CCMatch) { 1789 SmallVector<CCValAssign, 16> RVLocs1; 1790 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1791 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1792 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1793 1794 SmallVector<CCValAssign, 16> RVLocs2; 1795 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1796 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1797 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1798 1799 if (RVLocs1.size() != RVLocs2.size()) 1800 return false; 1801 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1802 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1803 return false; 1804 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1805 return false; 1806 if (RVLocs1[i].isRegLoc()) { 1807 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1808 return false; 1809 } else { 1810 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1811 return false; 1812 } 1813 } 1814 } 1815 1816 // If Caller's vararg or byval argument has been split between registers and 1817 // stack, do not perform tail call, since part of the argument is in caller's 1818 // local frame. 1819 const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). 1820 getInfo<ARMFunctionInfo>(); 1821 if (AFI_Caller->getVarArgsRegSaveSize()) 1822 return false; 1823 1824 // If the callee takes no arguments then go on to check the results of the 1825 // call. 1826 if (!Outs.empty()) { 1827 // Check if stack adjustment is needed. For now, do not do this if any 1828 // argument is passed on the stack. 1829 SmallVector<CCValAssign, 16> ArgLocs; 1830 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1831 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1832 CCInfo.AnalyzeCallOperands(Outs, 1833 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1834 if (CCInfo.getNextStackOffset()) { 1835 MachineFunction &MF = DAG.getMachineFunction(); 1836 1837 // Check if the arguments are already laid out in the right way as 1838 // the caller's fixed stack objects. 1839 MachineFrameInfo *MFI = MF.getFrameInfo(); 1840 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1841 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1842 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1843 i != e; 1844 ++i, ++realArgIdx) { 1845 CCValAssign &VA = ArgLocs[i]; 1846 EVT RegVT = VA.getLocVT(); 1847 SDValue Arg = OutVals[realArgIdx]; 1848 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1849 if (VA.getLocInfo() == CCValAssign::Indirect) 1850 return false; 1851 if (VA.needsCustom()) { 1852 // f64 and vector types are split into multiple registers or 1853 // register/stack-slot combinations. The types will not match 1854 // the registers; give up on memory f64 refs until we figure 1855 // out what to do about this. 1856 if (!VA.isRegLoc()) 1857 return false; 1858 if (!ArgLocs[++i].isRegLoc()) 1859 return false; 1860 if (RegVT == MVT::v2f64) { 1861 if (!ArgLocs[++i].isRegLoc()) 1862 return false; 1863 if (!ArgLocs[++i].isRegLoc()) 1864 return false; 1865 } 1866 } else if (!VA.isRegLoc()) { 1867 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1868 MFI, MRI, TII)) 1869 return false; 1870 } 1871 } 1872 } 1873 } 1874 1875 return true; 1876} 1877 1878SDValue 1879ARMTargetLowering::LowerReturn(SDValue Chain, 1880 CallingConv::ID CallConv, bool isVarArg, 1881 const SmallVectorImpl<ISD::OutputArg> &Outs, 1882 const SmallVectorImpl<SDValue> &OutVals, 1883 DebugLoc dl, SelectionDAG &DAG) const { 1884 1885 // CCValAssign - represent the assignment of the return value to a location. 1886 SmallVector<CCValAssign, 16> RVLocs; 1887 1888 // CCState - Info about the registers and stack slots. 1889 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1890 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1891 1892 // Analyze outgoing return values. 1893 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1894 isVarArg)); 1895 1896 // If this is the first return lowered for this function, add 1897 // the regs to the liveout set for the function. 1898 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1899 for (unsigned i = 0; i != RVLocs.size(); ++i) 1900 if (RVLocs[i].isRegLoc()) 1901 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1902 } 1903 1904 SDValue Flag; 1905 1906 // Copy the result values into the output registers. 1907 for (unsigned i = 0, realRVLocIdx = 0; 1908 i != RVLocs.size(); 1909 ++i, ++realRVLocIdx) { 1910 CCValAssign &VA = RVLocs[i]; 1911 assert(VA.isRegLoc() && "Can only return in registers!"); 1912 1913 SDValue Arg = OutVals[realRVLocIdx]; 1914 1915 switch (VA.getLocInfo()) { 1916 default: llvm_unreachable("Unknown loc info!"); 1917 case CCValAssign::Full: break; 1918 case CCValAssign::BCvt: 1919 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1920 break; 1921 } 1922 1923 if (VA.needsCustom()) { 1924 if (VA.getLocVT() == MVT::v2f64) { 1925 // Extract the first half and return it in two registers. 1926 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1927 DAG.getConstant(0, MVT::i32)); 1928 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1929 DAG.getVTList(MVT::i32, MVT::i32), Half); 1930 1931 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1932 Flag = Chain.getValue(1); 1933 VA = RVLocs[++i]; // skip ahead to next loc 1934 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1935 HalfGPRs.getValue(1), Flag); 1936 Flag = Chain.getValue(1); 1937 VA = RVLocs[++i]; // skip ahead to next loc 1938 1939 // Extract the 2nd half and fall through to handle it as an f64 value. 1940 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1941 DAG.getConstant(1, MVT::i32)); 1942 } 1943 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1944 // available. 1945 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1946 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1947 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1948 Flag = Chain.getValue(1); 1949 VA = RVLocs[++i]; // skip ahead to next loc 1950 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1951 Flag); 1952 } else 1953 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1954 1955 // Guarantee that all emitted copies are 1956 // stuck together, avoiding something bad. 1957 Flag = Chain.getValue(1); 1958 } 1959 1960 SDValue result; 1961 if (Flag.getNode()) 1962 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1963 else // Return Void 1964 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1965 1966 return result; 1967} 1968 1969bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1970 if (N->getNumValues() != 1) 1971 return false; 1972 if (!N->hasNUsesOfValue(1, 0)) 1973 return false; 1974 1975 SDValue TCChain = Chain; 1976 SDNode *Copy = *N->use_begin(); 1977 if (Copy->getOpcode() == ISD::CopyToReg) { 1978 // If the copy has a glue operand, we conservatively assume it isn't safe to 1979 // perform a tail call. 1980 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1981 return false; 1982 TCChain = Copy->getOperand(0); 1983 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 1984 SDNode *VMov = Copy; 1985 // f64 returned in a pair of GPRs. 1986 SmallPtrSet<SDNode*, 2> Copies; 1987 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 1988 UI != UE; ++UI) { 1989 if (UI->getOpcode() != ISD::CopyToReg) 1990 return false; 1991 Copies.insert(*UI); 1992 } 1993 if (Copies.size() > 2) 1994 return false; 1995 1996 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 1997 UI != UE; ++UI) { 1998 SDValue UseChain = UI->getOperand(0); 1999 if (Copies.count(UseChain.getNode())) 2000 // Second CopyToReg 2001 Copy = *UI; 2002 else 2003 // First CopyToReg 2004 TCChain = UseChain; 2005 } 2006 } else if (Copy->getOpcode() == ISD::BITCAST) { 2007 // f32 returned in a single GPR. 2008 if (!Copy->hasOneUse()) 2009 return false; 2010 Copy = *Copy->use_begin(); 2011 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2012 return false; 2013 Chain = Copy->getOperand(0); 2014 } else { 2015 return false; 2016 } 2017 2018 bool HasRet = false; 2019 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2020 UI != UE; ++UI) { 2021 if (UI->getOpcode() != ARMISD::RET_FLAG) 2022 return false; 2023 HasRet = true; 2024 } 2025 2026 if (!HasRet) 2027 return false; 2028 2029 Chain = TCChain; 2030 return true; 2031} 2032 2033bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2034 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 2035 return false; 2036 2037 if (!CI->isTailCall()) 2038 return false; 2039 2040 return !Subtarget->isThumb1Only(); 2041} 2042 2043// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2044// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2045// one of the above mentioned nodes. It has to be wrapped because otherwise 2046// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2047// be used to form addressing mode. These wrapped nodes will be selected 2048// into MOVi. 2049static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2050 EVT PtrVT = Op.getValueType(); 2051 // FIXME there is no actual debug info here 2052 DebugLoc dl = Op.getDebugLoc(); 2053 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2054 SDValue Res; 2055 if (CP->isMachineConstantPoolEntry()) 2056 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2057 CP->getAlignment()); 2058 else 2059 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2060 CP->getAlignment()); 2061 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2062} 2063 2064unsigned ARMTargetLowering::getJumpTableEncoding() const { 2065 return MachineJumpTableInfo::EK_Inline; 2066} 2067 2068SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2069 SelectionDAG &DAG) const { 2070 MachineFunction &MF = DAG.getMachineFunction(); 2071 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2072 unsigned ARMPCLabelIndex = 0; 2073 DebugLoc DL = Op.getDebugLoc(); 2074 EVT PtrVT = getPointerTy(); 2075 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2076 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2077 SDValue CPAddr; 2078 if (RelocM == Reloc::Static) { 2079 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2080 } else { 2081 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2082 ARMPCLabelIndex = AFI->createPICLabelUId(); 2083 ARMConstantPoolValue *CPV = 2084 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2085 ARMCP::CPBlockAddress, PCAdj); 2086 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2087 } 2088 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2089 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 2090 MachinePointerInfo::getConstantPool(), 2091 false, false, false, 0); 2092 if (RelocM == Reloc::Static) 2093 return Result; 2094 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2095 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2096} 2097 2098// Lower ISD::GlobalTLSAddress using the "general dynamic" model 2099SDValue 2100ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2101 SelectionDAG &DAG) const { 2102 DebugLoc dl = GA->getDebugLoc(); 2103 EVT PtrVT = getPointerTy(); 2104 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2105 MachineFunction &MF = DAG.getMachineFunction(); 2106 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2107 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2108 ARMConstantPoolValue *CPV = 2109 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2110 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2111 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2112 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2113 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2114 MachinePointerInfo::getConstantPool(), 2115 false, false, false, 0); 2116 SDValue Chain = Argument.getValue(1); 2117 2118 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2119 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2120 2121 // call __tls_get_addr. 2122 ArgListTy Args; 2123 ArgListEntry Entry; 2124 Entry.Node = Argument; 2125 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2126 Args.push_back(Entry); 2127 // FIXME: is there useful debug info available here? 2128 TargetLowering::CallLoweringInfo CLI(Chain, 2129 (Type *) Type::getInt32Ty(*DAG.getContext()), 2130 false, false, false, false, 2131 0, CallingConv::C, /*isTailCall=*/false, 2132 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 2133 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2134 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2135 return CallResult.first; 2136} 2137 2138// Lower ISD::GlobalTLSAddress using the "initial exec" or 2139// "local exec" model. 2140SDValue 2141ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2142 SelectionDAG &DAG, 2143 TLSModel::Model model) const { 2144 const GlobalValue *GV = GA->getGlobal(); 2145 DebugLoc dl = GA->getDebugLoc(); 2146 SDValue Offset; 2147 SDValue Chain = DAG.getEntryNode(); 2148 EVT PtrVT = getPointerTy(); 2149 // Get the Thread Pointer 2150 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2151 2152 if (model == TLSModel::InitialExec) { 2153 MachineFunction &MF = DAG.getMachineFunction(); 2154 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2155 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2156 // Initial exec model. 2157 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2158 ARMConstantPoolValue *CPV = 2159 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2160 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2161 true); 2162 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2163 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2164 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2165 MachinePointerInfo::getConstantPool(), 2166 false, false, false, 0); 2167 Chain = Offset.getValue(1); 2168 2169 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2170 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2171 2172 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2173 MachinePointerInfo::getConstantPool(), 2174 false, false, false, 0); 2175 } else { 2176 // local exec model 2177 assert(model == TLSModel::LocalExec); 2178 ARMConstantPoolValue *CPV = 2179 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2180 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2181 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2182 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2183 MachinePointerInfo::getConstantPool(), 2184 false, false, false, 0); 2185 } 2186 2187 // The address of the thread local variable is the add of the thread 2188 // pointer with the offset of the variable. 2189 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2190} 2191 2192SDValue 2193ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2194 // TODO: implement the "local dynamic" model 2195 assert(Subtarget->isTargetELF() && 2196 "TLS not implemented for non-ELF targets"); 2197 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2198 2199 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2200 2201 switch (model) { 2202 case TLSModel::GeneralDynamic: 2203 case TLSModel::LocalDynamic: 2204 return LowerToTLSGeneralDynamicModel(GA, DAG); 2205 case TLSModel::InitialExec: 2206 case TLSModel::LocalExec: 2207 return LowerToTLSExecModels(GA, DAG, model); 2208 } 2209 llvm_unreachable("bogus TLS model"); 2210} 2211 2212SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2213 SelectionDAG &DAG) const { 2214 EVT PtrVT = getPointerTy(); 2215 DebugLoc dl = Op.getDebugLoc(); 2216 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2217 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2218 if (RelocM == Reloc::PIC_) { 2219 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2220 ARMConstantPoolValue *CPV = 2221 ARMConstantPoolConstant::Create(GV, 2222 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2223 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2224 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2225 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2226 CPAddr, 2227 MachinePointerInfo::getConstantPool(), 2228 false, false, false, 0); 2229 SDValue Chain = Result.getValue(1); 2230 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2231 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2232 if (!UseGOTOFF) 2233 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2234 MachinePointerInfo::getGOT(), 2235 false, false, false, 0); 2236 return Result; 2237 } 2238 2239 // If we have T2 ops, we can materialize the address directly via movt/movw 2240 // pair. This is always cheaper. 2241 if (Subtarget->useMovt()) { 2242 ++NumMovwMovt; 2243 // FIXME: Once remat is capable of dealing with instructions with register 2244 // operands, expand this into two nodes. 2245 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2246 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2247 } else { 2248 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2249 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2250 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2251 MachinePointerInfo::getConstantPool(), 2252 false, false, false, 0); 2253 } 2254} 2255 2256SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2257 SelectionDAG &DAG) const { 2258 EVT PtrVT = getPointerTy(); 2259 DebugLoc dl = Op.getDebugLoc(); 2260 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2261 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2262 MachineFunction &MF = DAG.getMachineFunction(); 2263 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2264 2265 // FIXME: Enable this for static codegen when tool issues are fixed. Also 2266 // update ARMFastISel::ARMMaterializeGV. 2267 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2268 ++NumMovwMovt; 2269 // FIXME: Once remat is capable of dealing with instructions with register 2270 // operands, expand this into two nodes. 2271 if (RelocM == Reloc::Static) 2272 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2273 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2274 2275 unsigned Wrapper = (RelocM == Reloc::PIC_) 2276 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2277 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2278 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2279 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2280 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2281 MachinePointerInfo::getGOT(), 2282 false, false, false, 0); 2283 return Result; 2284 } 2285 2286 unsigned ARMPCLabelIndex = 0; 2287 SDValue CPAddr; 2288 if (RelocM == Reloc::Static) { 2289 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2290 } else { 2291 ARMPCLabelIndex = AFI->createPICLabelUId(); 2292 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2293 ARMConstantPoolValue *CPV = 2294 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2295 PCAdj); 2296 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2297 } 2298 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2299 2300 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2301 MachinePointerInfo::getConstantPool(), 2302 false, false, false, 0); 2303 SDValue Chain = Result.getValue(1); 2304 2305 if (RelocM == Reloc::PIC_) { 2306 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2307 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2308 } 2309 2310 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2311 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2312 false, false, false, 0); 2313 2314 return Result; 2315} 2316 2317SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2318 SelectionDAG &DAG) const { 2319 assert(Subtarget->isTargetELF() && 2320 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2321 MachineFunction &MF = DAG.getMachineFunction(); 2322 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2323 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2324 EVT PtrVT = getPointerTy(); 2325 DebugLoc dl = Op.getDebugLoc(); 2326 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2327 ARMConstantPoolValue *CPV = 2328 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2329 ARMPCLabelIndex, PCAdj); 2330 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2331 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2332 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2333 MachinePointerInfo::getConstantPool(), 2334 false, false, false, 0); 2335 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2336 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2337} 2338 2339SDValue 2340ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2341 DebugLoc dl = Op.getDebugLoc(); 2342 SDValue Val = DAG.getConstant(0, MVT::i32); 2343 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2344 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2345 Op.getOperand(1), Val); 2346} 2347 2348SDValue 2349ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2350 DebugLoc dl = Op.getDebugLoc(); 2351 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2352 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2353} 2354 2355SDValue 2356ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2357 const ARMSubtarget *Subtarget) const { 2358 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2359 DebugLoc dl = Op.getDebugLoc(); 2360 switch (IntNo) { 2361 default: return SDValue(); // Don't custom lower most intrinsics. 2362 case Intrinsic::arm_thread_pointer: { 2363 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2364 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2365 } 2366 case Intrinsic::eh_sjlj_lsda: { 2367 MachineFunction &MF = DAG.getMachineFunction(); 2368 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2369 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2370 EVT PtrVT = getPointerTy(); 2371 DebugLoc dl = Op.getDebugLoc(); 2372 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2373 SDValue CPAddr; 2374 unsigned PCAdj = (RelocM != Reloc::PIC_) 2375 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2376 ARMConstantPoolValue *CPV = 2377 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2378 ARMCP::CPLSDA, PCAdj); 2379 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2380 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2381 SDValue Result = 2382 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2383 MachinePointerInfo::getConstantPool(), 2384 false, false, false, 0); 2385 2386 if (RelocM == Reloc::PIC_) { 2387 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2388 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2389 } 2390 return Result; 2391 } 2392 case Intrinsic::arm_neon_vmulls: 2393 case Intrinsic::arm_neon_vmullu: { 2394 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2395 ? ARMISD::VMULLs : ARMISD::VMULLu; 2396 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2397 Op.getOperand(1), Op.getOperand(2)); 2398 } 2399 } 2400} 2401 2402static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2403 const ARMSubtarget *Subtarget) { 2404 DebugLoc dl = Op.getDebugLoc(); 2405 if (!Subtarget->hasDataBarrier()) { 2406 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2407 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2408 // here. 2409 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2410 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2411 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2412 DAG.getConstant(0, MVT::i32)); 2413 } 2414 2415 SDValue Op5 = Op.getOperand(5); 2416 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2417 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2418 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2419 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2420 2421 ARM_MB::MemBOpt DMBOpt; 2422 if (isDeviceBarrier) 2423 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2424 else 2425 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2426 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2427 DAG.getConstant(DMBOpt, MVT::i32)); 2428} 2429 2430 2431static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2432 const ARMSubtarget *Subtarget) { 2433 // FIXME: handle "fence singlethread" more efficiently. 2434 DebugLoc dl = Op.getDebugLoc(); 2435 if (!Subtarget->hasDataBarrier()) { 2436 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2437 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2438 // here. 2439 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2440 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2441 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2442 DAG.getConstant(0, MVT::i32)); 2443 } 2444 2445 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2446 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2447} 2448 2449static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2450 const ARMSubtarget *Subtarget) { 2451 // ARM pre v5TE and Thumb1 does not have preload instructions. 2452 if (!(Subtarget->isThumb2() || 2453 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2454 // Just preserve the chain. 2455 return Op.getOperand(0); 2456 2457 DebugLoc dl = Op.getDebugLoc(); 2458 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2459 if (!isRead && 2460 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2461 // ARMv7 with MP extension has PLDW. 2462 return Op.getOperand(0); 2463 2464 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2465 if (Subtarget->isThumb()) { 2466 // Invert the bits. 2467 isRead = ~isRead & 1; 2468 isData = ~isData & 1; 2469 } 2470 2471 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2472 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2473 DAG.getConstant(isData, MVT::i32)); 2474} 2475 2476static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2477 MachineFunction &MF = DAG.getMachineFunction(); 2478 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2479 2480 // vastart just stores the address of the VarArgsFrameIndex slot into the 2481 // memory location argument. 2482 DebugLoc dl = Op.getDebugLoc(); 2483 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2484 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2485 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2486 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2487 MachinePointerInfo(SV), false, false, 0); 2488} 2489 2490SDValue 2491ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2492 SDValue &Root, SelectionDAG &DAG, 2493 DebugLoc dl) const { 2494 MachineFunction &MF = DAG.getMachineFunction(); 2495 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2496 2497 const TargetRegisterClass *RC; 2498 if (AFI->isThumb1OnlyFunction()) 2499 RC = &ARM::tGPRRegClass; 2500 else 2501 RC = &ARM::GPRRegClass; 2502 2503 // Transform the arguments stored in physical registers into virtual ones. 2504 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2505 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2506 2507 SDValue ArgValue2; 2508 if (NextVA.isMemLoc()) { 2509 MachineFrameInfo *MFI = MF.getFrameInfo(); 2510 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2511 2512 // Create load node to retrieve arguments from the stack. 2513 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2514 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2515 MachinePointerInfo::getFixedStack(FI), 2516 false, false, false, 0); 2517 } else { 2518 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2519 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2520 } 2521 2522 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2523} 2524 2525void 2526ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2527 unsigned &VARegSize, unsigned &VARegSaveSize) 2528 const { 2529 unsigned NumGPRs; 2530 if (CCInfo.isFirstByValRegValid()) 2531 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2532 else { 2533 unsigned int firstUnalloced; 2534 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2535 sizeof(GPRArgRegs) / 2536 sizeof(GPRArgRegs[0])); 2537 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2538 } 2539 2540 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2541 VARegSize = NumGPRs * 4; 2542 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2543} 2544 2545// The remaining GPRs hold either the beginning of variable-argument 2546// data, or the beginning of an aggregate passed by value (usuall 2547// byval). Either way, we allocate stack slots adjacent to the data 2548// provided by our caller, and store the unallocated registers there. 2549// If this is a variadic function, the va_list pointer will begin with 2550// these values; otherwise, this reassembles a (byval) structure that 2551// was split between registers and memory. 2552void 2553ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2554 DebugLoc dl, SDValue &Chain, 2555 const Value *OrigArg, 2556 unsigned OffsetFromOrigArg, 2557 unsigned ArgOffset) const { 2558 MachineFunction &MF = DAG.getMachineFunction(); 2559 MachineFrameInfo *MFI = MF.getFrameInfo(); 2560 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2561 unsigned firstRegToSaveIndex; 2562 if (CCInfo.isFirstByValRegValid()) 2563 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2564 else { 2565 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2566 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2567 } 2568 2569 unsigned VARegSize, VARegSaveSize; 2570 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2571 if (VARegSaveSize) { 2572 // If this function is vararg, store any remaining integer argument regs 2573 // to their spots on the stack so that they may be loaded by deferencing 2574 // the result of va_next. 2575 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2576 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2577 ArgOffset + VARegSaveSize 2578 - VARegSize, 2579 false)); 2580 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2581 getPointerTy()); 2582 2583 SmallVector<SDValue, 4> MemOps; 2584 for (unsigned i = 0; firstRegToSaveIndex < 4; ++firstRegToSaveIndex, ++i) { 2585 const TargetRegisterClass *RC; 2586 if (AFI->isThumb1OnlyFunction()) 2587 RC = &ARM::tGPRRegClass; 2588 else 2589 RC = &ARM::GPRRegClass; 2590 2591 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2592 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2593 SDValue Store = 2594 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2595 MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i), 2596 false, false, 0); 2597 MemOps.push_back(Store); 2598 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2599 DAG.getConstant(4, getPointerTy())); 2600 } 2601 if (!MemOps.empty()) 2602 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2603 &MemOps[0], MemOps.size()); 2604 } else 2605 // This will point to the next argument passed via stack. 2606 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2607} 2608 2609SDValue 2610ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2611 CallingConv::ID CallConv, bool isVarArg, 2612 const SmallVectorImpl<ISD::InputArg> 2613 &Ins, 2614 DebugLoc dl, SelectionDAG &DAG, 2615 SmallVectorImpl<SDValue> &InVals) 2616 const { 2617 MachineFunction &MF = DAG.getMachineFunction(); 2618 MachineFrameInfo *MFI = MF.getFrameInfo(); 2619 2620 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2621 2622 // Assign locations to all of the incoming arguments. 2623 SmallVector<CCValAssign, 16> ArgLocs; 2624 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2625 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2626 CCInfo.AnalyzeFormalArguments(Ins, 2627 CCAssignFnForNode(CallConv, /* Return*/ false, 2628 isVarArg)); 2629 2630 SmallVector<SDValue, 16> ArgValues; 2631 int lastInsIndex = -1; 2632 SDValue ArgValue; 2633 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 2634 unsigned CurArgIdx = 0; 2635 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2636 CCValAssign &VA = ArgLocs[i]; 2637 std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx); 2638 CurArgIdx = Ins[VA.getValNo()].OrigArgIndex; 2639 // Arguments stored in registers. 2640 if (VA.isRegLoc()) { 2641 EVT RegVT = VA.getLocVT(); 2642 2643 if (VA.needsCustom()) { 2644 // f64 and vector types are split up into multiple registers or 2645 // combinations of registers and stack slots. 2646 if (VA.getLocVT() == MVT::v2f64) { 2647 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2648 Chain, DAG, dl); 2649 VA = ArgLocs[++i]; // skip ahead to next loc 2650 SDValue ArgValue2; 2651 if (VA.isMemLoc()) { 2652 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2653 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2654 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2655 MachinePointerInfo::getFixedStack(FI), 2656 false, false, false, 0); 2657 } else { 2658 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2659 Chain, DAG, dl); 2660 } 2661 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2662 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2663 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2664 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2665 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2666 } else 2667 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2668 2669 } else { 2670 const TargetRegisterClass *RC; 2671 2672 if (RegVT == MVT::f32) 2673 RC = &ARM::SPRRegClass; 2674 else if (RegVT == MVT::f64) 2675 RC = &ARM::DPRRegClass; 2676 else if (RegVT == MVT::v2f64) 2677 RC = &ARM::QPRRegClass; 2678 else if (RegVT == MVT::i32) 2679 RC = AFI->isThumb1OnlyFunction() ? 2680 (const TargetRegisterClass*)&ARM::tGPRRegClass : 2681 (const TargetRegisterClass*)&ARM::GPRRegClass; 2682 else 2683 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2684 2685 // Transform the arguments in physical registers into virtual ones. 2686 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2687 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2688 } 2689 2690 // If this is an 8 or 16-bit value, it is really passed promoted 2691 // to 32 bits. Insert an assert[sz]ext to capture this, then 2692 // truncate to the right size. 2693 switch (VA.getLocInfo()) { 2694 default: llvm_unreachable("Unknown loc info!"); 2695 case CCValAssign::Full: break; 2696 case CCValAssign::BCvt: 2697 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2698 break; 2699 case CCValAssign::SExt: 2700 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2701 DAG.getValueType(VA.getValVT())); 2702 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2703 break; 2704 case CCValAssign::ZExt: 2705 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2706 DAG.getValueType(VA.getValVT())); 2707 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2708 break; 2709 } 2710 2711 InVals.push_back(ArgValue); 2712 2713 } else { // VA.isRegLoc() 2714 2715 // sanity check 2716 assert(VA.isMemLoc()); 2717 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2718 2719 int index = ArgLocs[i].getValNo(); 2720 2721 // Some Ins[] entries become multiple ArgLoc[] entries. 2722 // Process them only once. 2723 if (index != lastInsIndex) 2724 { 2725 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2726 // FIXME: For now, all byval parameter objects are marked mutable. 2727 // This can be changed with more analysis. 2728 // In case of tail call optimization mark all arguments mutable. 2729 // Since they could be overwritten by lowering of arguments in case of 2730 // a tail call. 2731 if (Flags.isByVal()) { 2732 unsigned VARegSize, VARegSaveSize; 2733 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2734 VarArgStyleRegisters(CCInfo, DAG, 2735 dl, Chain, CurOrigArg, Ins[VA.getValNo()].PartOffset, 0); 2736 unsigned Bytes = Flags.getByValSize() - VARegSize; 2737 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2738 int FI = MFI->CreateFixedObject(Bytes, 2739 VA.getLocMemOffset(), false); 2740 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2741 } else { 2742 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2743 VA.getLocMemOffset(), true); 2744 2745 // Create load nodes to retrieve arguments from the stack. 2746 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2747 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2748 MachinePointerInfo::getFixedStack(FI), 2749 false, false, false, 0)); 2750 } 2751 lastInsIndex = index; 2752 } 2753 } 2754 } 2755 2756 // varargs 2757 if (isVarArg) 2758 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0, 0, 2759 CCInfo.getNextStackOffset()); 2760 2761 return Chain; 2762} 2763 2764/// isFloatingPointZero - Return true if this is +0.0. 2765static bool isFloatingPointZero(SDValue Op) { 2766 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2767 return CFP->getValueAPF().isPosZero(); 2768 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2769 // Maybe this has already been legalized into the constant pool? 2770 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2771 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2772 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2773 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2774 return CFP->getValueAPF().isPosZero(); 2775 } 2776 } 2777 return false; 2778} 2779 2780/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2781/// the given operands. 2782SDValue 2783ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2784 SDValue &ARMcc, SelectionDAG &DAG, 2785 DebugLoc dl) const { 2786 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2787 unsigned C = RHSC->getZExtValue(); 2788 if (!isLegalICmpImmediate(C)) { 2789 // Constant does not fit, try adjusting it by one? 2790 switch (CC) { 2791 default: break; 2792 case ISD::SETLT: 2793 case ISD::SETGE: 2794 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2795 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2796 RHS = DAG.getConstant(C-1, MVT::i32); 2797 } 2798 break; 2799 case ISD::SETULT: 2800 case ISD::SETUGE: 2801 if (C != 0 && isLegalICmpImmediate(C-1)) { 2802 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2803 RHS = DAG.getConstant(C-1, MVT::i32); 2804 } 2805 break; 2806 case ISD::SETLE: 2807 case ISD::SETGT: 2808 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2809 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2810 RHS = DAG.getConstant(C+1, MVT::i32); 2811 } 2812 break; 2813 case ISD::SETULE: 2814 case ISD::SETUGT: 2815 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2816 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2817 RHS = DAG.getConstant(C+1, MVT::i32); 2818 } 2819 break; 2820 } 2821 } 2822 } 2823 2824 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2825 ARMISD::NodeType CompareType; 2826 switch (CondCode) { 2827 default: 2828 CompareType = ARMISD::CMP; 2829 break; 2830 case ARMCC::EQ: 2831 case ARMCC::NE: 2832 // Uses only Z Flag 2833 CompareType = ARMISD::CMPZ; 2834 break; 2835 } 2836 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2837 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2838} 2839 2840/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2841SDValue 2842ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2843 DebugLoc dl) const { 2844 SDValue Cmp; 2845 if (!isFloatingPointZero(RHS)) 2846 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2847 else 2848 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2849 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2850} 2851 2852/// duplicateCmp - Glue values can have only one use, so this function 2853/// duplicates a comparison node. 2854SDValue 2855ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2856 unsigned Opc = Cmp.getOpcode(); 2857 DebugLoc DL = Cmp.getDebugLoc(); 2858 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2859 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2860 2861 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2862 Cmp = Cmp.getOperand(0); 2863 Opc = Cmp.getOpcode(); 2864 if (Opc == ARMISD::CMPFP) 2865 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2866 else { 2867 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2868 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2869 } 2870 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2871} 2872 2873SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2874 SDValue Cond = Op.getOperand(0); 2875 SDValue SelectTrue = Op.getOperand(1); 2876 SDValue SelectFalse = Op.getOperand(2); 2877 DebugLoc dl = Op.getDebugLoc(); 2878 2879 // Convert: 2880 // 2881 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2882 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2883 // 2884 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2885 const ConstantSDNode *CMOVTrue = 2886 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2887 const ConstantSDNode *CMOVFalse = 2888 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2889 2890 if (CMOVTrue && CMOVFalse) { 2891 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2892 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2893 2894 SDValue True; 2895 SDValue False; 2896 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2897 True = SelectTrue; 2898 False = SelectFalse; 2899 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2900 True = SelectFalse; 2901 False = SelectTrue; 2902 } 2903 2904 if (True.getNode() && False.getNode()) { 2905 EVT VT = Op.getValueType(); 2906 SDValue ARMcc = Cond.getOperand(2); 2907 SDValue CCR = Cond.getOperand(3); 2908 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2909 assert(True.getValueType() == VT); 2910 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2911 } 2912 } 2913 } 2914 2915 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 2916 // undefined bits before doing a full-word comparison with zero. 2917 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 2918 DAG.getConstant(1, Cond.getValueType())); 2919 2920 return DAG.getSelectCC(dl, Cond, 2921 DAG.getConstant(0, Cond.getValueType()), 2922 SelectTrue, SelectFalse, ISD::SETNE); 2923} 2924 2925SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2926 EVT VT = Op.getValueType(); 2927 SDValue LHS = Op.getOperand(0); 2928 SDValue RHS = Op.getOperand(1); 2929 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2930 SDValue TrueVal = Op.getOperand(2); 2931 SDValue FalseVal = Op.getOperand(3); 2932 DebugLoc dl = Op.getDebugLoc(); 2933 2934 if (LHS.getValueType() == MVT::i32) { 2935 SDValue ARMcc; 2936 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2937 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2938 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2939 } 2940 2941 ARMCC::CondCodes CondCode, CondCode2; 2942 FPCCToARMCC(CC, CondCode, CondCode2); 2943 2944 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2945 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2946 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2947 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2948 ARMcc, CCR, Cmp); 2949 if (CondCode2 != ARMCC::AL) { 2950 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2951 // FIXME: Needs another CMP because flag can have but one use. 2952 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2953 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2954 Result, TrueVal, ARMcc2, CCR, Cmp2); 2955 } 2956 return Result; 2957} 2958 2959/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2960/// to morph to an integer compare sequence. 2961static bool canChangeToInt(SDValue Op, bool &SeenZero, 2962 const ARMSubtarget *Subtarget) { 2963 SDNode *N = Op.getNode(); 2964 if (!N->hasOneUse()) 2965 // Otherwise it requires moving the value from fp to integer registers. 2966 return false; 2967 if (!N->getNumValues()) 2968 return false; 2969 EVT VT = Op.getValueType(); 2970 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2971 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2972 // vmrs are very slow, e.g. cortex-a8. 2973 return false; 2974 2975 if (isFloatingPointZero(Op)) { 2976 SeenZero = true; 2977 return true; 2978 } 2979 return ISD::isNormalLoad(N); 2980} 2981 2982static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2983 if (isFloatingPointZero(Op)) 2984 return DAG.getConstant(0, MVT::i32); 2985 2986 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2987 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2988 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2989 Ld->isVolatile(), Ld->isNonTemporal(), 2990 Ld->isInvariant(), Ld->getAlignment()); 2991 2992 llvm_unreachable("Unknown VFP cmp argument!"); 2993} 2994 2995static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2996 SDValue &RetVal1, SDValue &RetVal2) { 2997 if (isFloatingPointZero(Op)) { 2998 RetVal1 = DAG.getConstant(0, MVT::i32); 2999 RetVal2 = DAG.getConstant(0, MVT::i32); 3000 return; 3001 } 3002 3003 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 3004 SDValue Ptr = Ld->getBasePtr(); 3005 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3006 Ld->getChain(), Ptr, 3007 Ld->getPointerInfo(), 3008 Ld->isVolatile(), Ld->isNonTemporal(), 3009 Ld->isInvariant(), Ld->getAlignment()); 3010 3011 EVT PtrType = Ptr.getValueType(); 3012 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 3013 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 3014 PtrType, Ptr, DAG.getConstant(4, PtrType)); 3015 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3016 Ld->getChain(), NewPtr, 3017 Ld->getPointerInfo().getWithOffset(4), 3018 Ld->isVolatile(), Ld->isNonTemporal(), 3019 Ld->isInvariant(), NewAlign); 3020 return; 3021 } 3022 3023 llvm_unreachable("Unknown VFP cmp argument!"); 3024} 3025 3026/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 3027/// f32 and even f64 comparisons to integer ones. 3028SDValue 3029ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 3030 SDValue Chain = Op.getOperand(0); 3031 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3032 SDValue LHS = Op.getOperand(2); 3033 SDValue RHS = Op.getOperand(3); 3034 SDValue Dest = Op.getOperand(4); 3035 DebugLoc dl = Op.getDebugLoc(); 3036 3037 bool LHSSeenZero = false; 3038 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 3039 bool RHSSeenZero = false; 3040 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 3041 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 3042 // If unsafe fp math optimization is enabled and there are no other uses of 3043 // the CMP operands, and the condition code is EQ or NE, we can optimize it 3044 // to an integer comparison. 3045 if (CC == ISD::SETOEQ) 3046 CC = ISD::SETEQ; 3047 else if (CC == ISD::SETUNE) 3048 CC = ISD::SETNE; 3049 3050 SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32); 3051 SDValue ARMcc; 3052 if (LHS.getValueType() == MVT::f32) { 3053 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3054 bitcastf32Toi32(LHS, DAG), Mask); 3055 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3056 bitcastf32Toi32(RHS, DAG), Mask); 3057 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3058 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3059 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3060 Chain, Dest, ARMcc, CCR, Cmp); 3061 } 3062 3063 SDValue LHS1, LHS2; 3064 SDValue RHS1, RHS2; 3065 expandf64Toi32(LHS, DAG, LHS1, LHS2); 3066 expandf64Toi32(RHS, DAG, RHS1, RHS2); 3067 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 3068 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 3069 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3070 ARMcc = DAG.getConstant(CondCode, MVT::i32); 3071 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3072 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 3073 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 3074 } 3075 3076 return SDValue(); 3077} 3078 3079SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3080 SDValue Chain = Op.getOperand(0); 3081 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3082 SDValue LHS = Op.getOperand(2); 3083 SDValue RHS = Op.getOperand(3); 3084 SDValue Dest = Op.getOperand(4); 3085 DebugLoc dl = Op.getDebugLoc(); 3086 3087 if (LHS.getValueType() == MVT::i32) { 3088 SDValue ARMcc; 3089 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3090 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3091 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3092 Chain, Dest, ARMcc, CCR, Cmp); 3093 } 3094 3095 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 3096 3097 if (getTargetMachine().Options.UnsafeFPMath && 3098 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 3099 CC == ISD::SETNE || CC == ISD::SETUNE)) { 3100 SDValue Result = OptimizeVFPBrcond(Op, DAG); 3101 if (Result.getNode()) 3102 return Result; 3103 } 3104 3105 ARMCC::CondCodes CondCode, CondCode2; 3106 FPCCToARMCC(CC, CondCode, CondCode2); 3107 3108 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3109 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3110 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3111 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3112 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 3113 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3114 if (CondCode2 != ARMCC::AL) { 3115 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 3116 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 3117 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3118 } 3119 return Res; 3120} 3121 3122SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 3123 SDValue Chain = Op.getOperand(0); 3124 SDValue Table = Op.getOperand(1); 3125 SDValue Index = Op.getOperand(2); 3126 DebugLoc dl = Op.getDebugLoc(); 3127 3128 EVT PTy = getPointerTy(); 3129 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 3130 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 3131 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 3132 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 3133 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 3134 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 3135 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3136 if (Subtarget->isThumb2()) { 3137 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 3138 // which does another jump to the destination. This also makes it easier 3139 // to translate it to TBB / TBH later. 3140 // FIXME: This might not work if the function is extremely large. 3141 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 3142 Addr, Op.getOperand(2), JTI, UId); 3143 } 3144 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3145 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3146 MachinePointerInfo::getJumpTable(), 3147 false, false, false, 0); 3148 Chain = Addr.getValue(1); 3149 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3150 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3151 } else { 3152 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3153 MachinePointerInfo::getJumpTable(), 3154 false, false, false, 0); 3155 Chain = Addr.getValue(1); 3156 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3157 } 3158} 3159 3160static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3161 EVT VT = Op.getValueType(); 3162 DebugLoc dl = Op.getDebugLoc(); 3163 3164 if (Op.getValueType().getVectorElementType() == MVT::i32) { 3165 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 3166 return Op; 3167 return DAG.UnrollVectorOp(Op.getNode()); 3168 } 3169 3170 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 3171 "Invalid type for custom lowering!"); 3172 if (VT != MVT::v4i16) 3173 return DAG.UnrollVectorOp(Op.getNode()); 3174 3175 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 3176 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 3177} 3178 3179static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3180 EVT VT = Op.getValueType(); 3181 if (VT.isVector()) 3182 return LowerVectorFP_TO_INT(Op, DAG); 3183 3184 DebugLoc dl = Op.getDebugLoc(); 3185 unsigned Opc; 3186 3187 switch (Op.getOpcode()) { 3188 default: llvm_unreachable("Invalid opcode!"); 3189 case ISD::FP_TO_SINT: 3190 Opc = ARMISD::FTOSI; 3191 break; 3192 case ISD::FP_TO_UINT: 3193 Opc = ARMISD::FTOUI; 3194 break; 3195 } 3196 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3197 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3198} 3199 3200static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3201 EVT VT = Op.getValueType(); 3202 DebugLoc dl = Op.getDebugLoc(); 3203 3204 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 3205 if (VT.getVectorElementType() == MVT::f32) 3206 return Op; 3207 return DAG.UnrollVectorOp(Op.getNode()); 3208 } 3209 3210 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3211 "Invalid type for custom lowering!"); 3212 if (VT != MVT::v4f32) 3213 return DAG.UnrollVectorOp(Op.getNode()); 3214 3215 unsigned CastOpc; 3216 unsigned Opc; 3217 switch (Op.getOpcode()) { 3218 default: llvm_unreachable("Invalid opcode!"); 3219 case ISD::SINT_TO_FP: 3220 CastOpc = ISD::SIGN_EXTEND; 3221 Opc = ISD::SINT_TO_FP; 3222 break; 3223 case ISD::UINT_TO_FP: 3224 CastOpc = ISD::ZERO_EXTEND; 3225 Opc = ISD::UINT_TO_FP; 3226 break; 3227 } 3228 3229 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3230 return DAG.getNode(Opc, dl, VT, Op); 3231} 3232 3233static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3234 EVT VT = Op.getValueType(); 3235 if (VT.isVector()) 3236 return LowerVectorINT_TO_FP(Op, DAG); 3237 3238 DebugLoc dl = Op.getDebugLoc(); 3239 unsigned Opc; 3240 3241 switch (Op.getOpcode()) { 3242 default: llvm_unreachable("Invalid opcode!"); 3243 case ISD::SINT_TO_FP: 3244 Opc = ARMISD::SITOF; 3245 break; 3246 case ISD::UINT_TO_FP: 3247 Opc = ARMISD::UITOF; 3248 break; 3249 } 3250 3251 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3252 return DAG.getNode(Opc, dl, VT, Op); 3253} 3254 3255SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3256 // Implement fcopysign with a fabs and a conditional fneg. 3257 SDValue Tmp0 = Op.getOperand(0); 3258 SDValue Tmp1 = Op.getOperand(1); 3259 DebugLoc dl = Op.getDebugLoc(); 3260 EVT VT = Op.getValueType(); 3261 EVT SrcVT = Tmp1.getValueType(); 3262 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3263 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3264 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3265 3266 if (UseNEON) { 3267 // Use VBSL to copy the sign bit. 3268 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3269 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3270 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3271 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3272 if (VT == MVT::f64) 3273 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3274 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3275 DAG.getConstant(32, MVT::i32)); 3276 else /*if (VT == MVT::f32)*/ 3277 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3278 if (SrcVT == MVT::f32) { 3279 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3280 if (VT == MVT::f64) 3281 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3282 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3283 DAG.getConstant(32, MVT::i32)); 3284 } else if (VT == MVT::f32) 3285 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3286 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3287 DAG.getConstant(32, MVT::i32)); 3288 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3289 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3290 3291 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3292 MVT::i32); 3293 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3294 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3295 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3296 3297 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3298 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3299 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3300 if (VT == MVT::f32) { 3301 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3302 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3303 DAG.getConstant(0, MVT::i32)); 3304 } else { 3305 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3306 } 3307 3308 return Res; 3309 } 3310 3311 // Bitcast operand 1 to i32. 3312 if (SrcVT == MVT::f64) 3313 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3314 &Tmp1, 1).getValue(1); 3315 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3316 3317 // Or in the signbit with integer operations. 3318 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3319 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3320 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3321 if (VT == MVT::f32) { 3322 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3323 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3324 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3325 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3326 } 3327 3328 // f64: Or the high part with signbit and then combine two parts. 3329 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3330 &Tmp0, 1); 3331 SDValue Lo = Tmp0.getValue(0); 3332 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3333 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3334 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3335} 3336 3337SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3338 MachineFunction &MF = DAG.getMachineFunction(); 3339 MachineFrameInfo *MFI = MF.getFrameInfo(); 3340 MFI->setReturnAddressIsTaken(true); 3341 3342 EVT VT = Op.getValueType(); 3343 DebugLoc dl = Op.getDebugLoc(); 3344 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3345 if (Depth) { 3346 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3347 SDValue Offset = DAG.getConstant(4, MVT::i32); 3348 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3349 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3350 MachinePointerInfo(), false, false, false, 0); 3351 } 3352 3353 // Return LR, which contains the return address. Mark it an implicit live-in. 3354 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3355 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3356} 3357 3358SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3359 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3360 MFI->setFrameAddressIsTaken(true); 3361 3362 EVT VT = Op.getValueType(); 3363 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3364 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3365 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3366 ? ARM::R7 : ARM::R11; 3367 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3368 while (Depth--) 3369 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3370 MachinePointerInfo(), 3371 false, false, false, 0); 3372 return FrameAddr; 3373} 3374 3375/// ExpandBITCAST - If the target supports VFP, this function is called to 3376/// expand a bit convert where either the source or destination type is i64 to 3377/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3378/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3379/// vectors), since the legalizer won't know what to do with that. 3380static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3381 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3382 DebugLoc dl = N->getDebugLoc(); 3383 SDValue Op = N->getOperand(0); 3384 3385 // This function is only supposed to be called for i64 types, either as the 3386 // source or destination of the bit convert. 3387 EVT SrcVT = Op.getValueType(); 3388 EVT DstVT = N->getValueType(0); 3389 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3390 "ExpandBITCAST called for non-i64 type"); 3391 3392 // Turn i64->f64 into VMOVDRR. 3393 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3394 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3395 DAG.getConstant(0, MVT::i32)); 3396 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3397 DAG.getConstant(1, MVT::i32)); 3398 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3399 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3400 } 3401 3402 // Turn f64->i64 into VMOVRRD. 3403 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3404 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3405 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3406 // Merge the pieces into a single i64 value. 3407 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3408 } 3409 3410 return SDValue(); 3411} 3412 3413/// getZeroVector - Returns a vector of specified type with all zero elements. 3414/// Zero vectors are used to represent vector negation and in those cases 3415/// will be implemented with the NEON VNEG instruction. However, VNEG does 3416/// not support i64 elements, so sometimes the zero vectors will need to be 3417/// explicitly constructed. Regardless, use a canonical VMOV to create the 3418/// zero vector. 3419static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3420 assert(VT.isVector() && "Expected a vector type"); 3421 // The canonical modified immediate encoding of a zero vector is....0! 3422 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3423 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3424 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3425 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3426} 3427 3428/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3429/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3430SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3431 SelectionDAG &DAG) const { 3432 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3433 EVT VT = Op.getValueType(); 3434 unsigned VTBits = VT.getSizeInBits(); 3435 DebugLoc dl = Op.getDebugLoc(); 3436 SDValue ShOpLo = Op.getOperand(0); 3437 SDValue ShOpHi = Op.getOperand(1); 3438 SDValue ShAmt = Op.getOperand(2); 3439 SDValue ARMcc; 3440 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3441 3442 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3443 3444 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3445 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3446 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3447 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3448 DAG.getConstant(VTBits, MVT::i32)); 3449 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3450 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3451 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3452 3453 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3454 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3455 ARMcc, DAG, dl); 3456 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3457 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3458 CCR, Cmp); 3459 3460 SDValue Ops[2] = { Lo, Hi }; 3461 return DAG.getMergeValues(Ops, 2, dl); 3462} 3463 3464/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3465/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3466SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3467 SelectionDAG &DAG) const { 3468 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3469 EVT VT = Op.getValueType(); 3470 unsigned VTBits = VT.getSizeInBits(); 3471 DebugLoc dl = Op.getDebugLoc(); 3472 SDValue ShOpLo = Op.getOperand(0); 3473 SDValue ShOpHi = Op.getOperand(1); 3474 SDValue ShAmt = Op.getOperand(2); 3475 SDValue ARMcc; 3476 3477 assert(Op.getOpcode() == ISD::SHL_PARTS); 3478 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3479 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3480 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3481 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3482 DAG.getConstant(VTBits, MVT::i32)); 3483 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3484 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3485 3486 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3487 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3488 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3489 ARMcc, DAG, dl); 3490 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3491 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3492 CCR, Cmp); 3493 3494 SDValue Ops[2] = { Lo, Hi }; 3495 return DAG.getMergeValues(Ops, 2, dl); 3496} 3497 3498SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3499 SelectionDAG &DAG) const { 3500 // The rounding mode is in bits 23:22 of the FPSCR. 3501 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3502 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3503 // so that the shift + and get folded into a bitfield extract. 3504 DebugLoc dl = Op.getDebugLoc(); 3505 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3506 DAG.getConstant(Intrinsic::arm_get_fpscr, 3507 MVT::i32)); 3508 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3509 DAG.getConstant(1U << 22, MVT::i32)); 3510 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3511 DAG.getConstant(22, MVT::i32)); 3512 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3513 DAG.getConstant(3, MVT::i32)); 3514} 3515 3516static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3517 const ARMSubtarget *ST) { 3518 EVT VT = N->getValueType(0); 3519 DebugLoc dl = N->getDebugLoc(); 3520 3521 if (!ST->hasV6T2Ops()) 3522 return SDValue(); 3523 3524 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3525 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3526} 3527 3528static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3529 const ARMSubtarget *ST) { 3530 EVT VT = N->getValueType(0); 3531 DebugLoc dl = N->getDebugLoc(); 3532 3533 if (!VT.isVector()) 3534 return SDValue(); 3535 3536 // Lower vector shifts on NEON to use VSHL. 3537 assert(ST->hasNEON() && "unexpected vector shift"); 3538 3539 // Left shifts translate directly to the vshiftu intrinsic. 3540 if (N->getOpcode() == ISD::SHL) 3541 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3542 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3543 N->getOperand(0), N->getOperand(1)); 3544 3545 assert((N->getOpcode() == ISD::SRA || 3546 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3547 3548 // NEON uses the same intrinsics for both left and right shifts. For 3549 // right shifts, the shift amounts are negative, so negate the vector of 3550 // shift amounts. 3551 EVT ShiftVT = N->getOperand(1).getValueType(); 3552 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3553 getZeroVector(ShiftVT, DAG, dl), 3554 N->getOperand(1)); 3555 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3556 Intrinsic::arm_neon_vshifts : 3557 Intrinsic::arm_neon_vshiftu); 3558 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3559 DAG.getConstant(vshiftInt, MVT::i32), 3560 N->getOperand(0), NegatedCount); 3561} 3562 3563static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3564 const ARMSubtarget *ST) { 3565 EVT VT = N->getValueType(0); 3566 DebugLoc dl = N->getDebugLoc(); 3567 3568 // We can get here for a node like i32 = ISD::SHL i32, i64 3569 if (VT != MVT::i64) 3570 return SDValue(); 3571 3572 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3573 "Unknown shift to lower!"); 3574 3575 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3576 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3577 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3578 return SDValue(); 3579 3580 // If we are in thumb mode, we don't have RRX. 3581 if (ST->isThumb1Only()) return SDValue(); 3582 3583 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3584 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3585 DAG.getConstant(0, MVT::i32)); 3586 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3587 DAG.getConstant(1, MVT::i32)); 3588 3589 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3590 // captures the result into a carry flag. 3591 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3592 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3593 3594 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3595 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3596 3597 // Merge the pieces into a single i64 value. 3598 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3599} 3600 3601static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3602 SDValue TmpOp0, TmpOp1; 3603 bool Invert = false; 3604 bool Swap = false; 3605 unsigned Opc = 0; 3606 3607 SDValue Op0 = Op.getOperand(0); 3608 SDValue Op1 = Op.getOperand(1); 3609 SDValue CC = Op.getOperand(2); 3610 EVT VT = Op.getValueType(); 3611 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3612 DebugLoc dl = Op.getDebugLoc(); 3613 3614 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3615 switch (SetCCOpcode) { 3616 default: llvm_unreachable("Illegal FP comparison"); 3617 case ISD::SETUNE: 3618 case ISD::SETNE: Invert = true; // Fallthrough 3619 case ISD::SETOEQ: 3620 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3621 case ISD::SETOLT: 3622 case ISD::SETLT: Swap = true; // Fallthrough 3623 case ISD::SETOGT: 3624 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3625 case ISD::SETOLE: 3626 case ISD::SETLE: Swap = true; // Fallthrough 3627 case ISD::SETOGE: 3628 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3629 case ISD::SETUGE: Swap = true; // Fallthrough 3630 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3631 case ISD::SETUGT: Swap = true; // Fallthrough 3632 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3633 case ISD::SETUEQ: Invert = true; // Fallthrough 3634 case ISD::SETONE: 3635 // Expand this to (OLT | OGT). 3636 TmpOp0 = Op0; 3637 TmpOp1 = Op1; 3638 Opc = ISD::OR; 3639 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3640 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3641 break; 3642 case ISD::SETUO: Invert = true; // Fallthrough 3643 case ISD::SETO: 3644 // Expand this to (OLT | OGE). 3645 TmpOp0 = Op0; 3646 TmpOp1 = Op1; 3647 Opc = ISD::OR; 3648 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3649 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3650 break; 3651 } 3652 } else { 3653 // Integer comparisons. 3654 switch (SetCCOpcode) { 3655 default: llvm_unreachable("Illegal integer comparison"); 3656 case ISD::SETNE: Invert = true; 3657 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3658 case ISD::SETLT: Swap = true; 3659 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3660 case ISD::SETLE: Swap = true; 3661 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3662 case ISD::SETULT: Swap = true; 3663 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3664 case ISD::SETULE: Swap = true; 3665 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3666 } 3667 3668 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3669 if (Opc == ARMISD::VCEQ) { 3670 3671 SDValue AndOp; 3672 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3673 AndOp = Op0; 3674 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3675 AndOp = Op1; 3676 3677 // Ignore bitconvert. 3678 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3679 AndOp = AndOp.getOperand(0); 3680 3681 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3682 Opc = ARMISD::VTST; 3683 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3684 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3685 Invert = !Invert; 3686 } 3687 } 3688 } 3689 3690 if (Swap) 3691 std::swap(Op0, Op1); 3692 3693 // If one of the operands is a constant vector zero, attempt to fold the 3694 // comparison to a specialized compare-against-zero form. 3695 SDValue SingleOp; 3696 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3697 SingleOp = Op0; 3698 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3699 if (Opc == ARMISD::VCGE) 3700 Opc = ARMISD::VCLEZ; 3701 else if (Opc == ARMISD::VCGT) 3702 Opc = ARMISD::VCLTZ; 3703 SingleOp = Op1; 3704 } 3705 3706 SDValue Result; 3707 if (SingleOp.getNode()) { 3708 switch (Opc) { 3709 case ARMISD::VCEQ: 3710 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3711 case ARMISD::VCGE: 3712 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3713 case ARMISD::VCLEZ: 3714 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3715 case ARMISD::VCGT: 3716 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3717 case ARMISD::VCLTZ: 3718 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3719 default: 3720 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3721 } 3722 } else { 3723 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3724 } 3725 3726 if (Invert) 3727 Result = DAG.getNOT(dl, Result, VT); 3728 3729 return Result; 3730} 3731 3732/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3733/// valid vector constant for a NEON instruction with a "modified immediate" 3734/// operand (e.g., VMOV). If so, return the encoded value. 3735static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3736 unsigned SplatBitSize, SelectionDAG &DAG, 3737 EVT &VT, bool is128Bits, NEONModImmType type) { 3738 unsigned OpCmode, Imm; 3739 3740 // SplatBitSize is set to the smallest size that splats the vector, so a 3741 // zero vector will always have SplatBitSize == 8. However, NEON modified 3742 // immediate instructions others than VMOV do not support the 8-bit encoding 3743 // of a zero vector, and the default encoding of zero is supposed to be the 3744 // 32-bit version. 3745 if (SplatBits == 0) 3746 SplatBitSize = 32; 3747 3748 switch (SplatBitSize) { 3749 case 8: 3750 if (type != VMOVModImm) 3751 return SDValue(); 3752 // Any 1-byte value is OK. Op=0, Cmode=1110. 3753 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3754 OpCmode = 0xe; 3755 Imm = SplatBits; 3756 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3757 break; 3758 3759 case 16: 3760 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3761 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3762 if ((SplatBits & ~0xff) == 0) { 3763 // Value = 0x00nn: Op=x, Cmode=100x. 3764 OpCmode = 0x8; 3765 Imm = SplatBits; 3766 break; 3767 } 3768 if ((SplatBits & ~0xff00) == 0) { 3769 // Value = 0xnn00: Op=x, Cmode=101x. 3770 OpCmode = 0xa; 3771 Imm = SplatBits >> 8; 3772 break; 3773 } 3774 return SDValue(); 3775 3776 case 32: 3777 // NEON's 32-bit VMOV supports splat values where: 3778 // * only one byte is nonzero, or 3779 // * the least significant byte is 0xff and the second byte is nonzero, or 3780 // * the least significant 2 bytes are 0xff and the third is nonzero. 3781 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3782 if ((SplatBits & ~0xff) == 0) { 3783 // Value = 0x000000nn: Op=x, Cmode=000x. 3784 OpCmode = 0; 3785 Imm = SplatBits; 3786 break; 3787 } 3788 if ((SplatBits & ~0xff00) == 0) { 3789 // Value = 0x0000nn00: Op=x, Cmode=001x. 3790 OpCmode = 0x2; 3791 Imm = SplatBits >> 8; 3792 break; 3793 } 3794 if ((SplatBits & ~0xff0000) == 0) { 3795 // Value = 0x00nn0000: Op=x, Cmode=010x. 3796 OpCmode = 0x4; 3797 Imm = SplatBits >> 16; 3798 break; 3799 } 3800 if ((SplatBits & ~0xff000000) == 0) { 3801 // Value = 0xnn000000: Op=x, Cmode=011x. 3802 OpCmode = 0x6; 3803 Imm = SplatBits >> 24; 3804 break; 3805 } 3806 3807 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3808 if (type == OtherModImm) return SDValue(); 3809 3810 if ((SplatBits & ~0xffff) == 0 && 3811 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3812 // Value = 0x0000nnff: Op=x, Cmode=1100. 3813 OpCmode = 0xc; 3814 Imm = SplatBits >> 8; 3815 SplatBits |= 0xff; 3816 break; 3817 } 3818 3819 if ((SplatBits & ~0xffffff) == 0 && 3820 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3821 // Value = 0x00nnffff: Op=x, Cmode=1101. 3822 OpCmode = 0xd; 3823 Imm = SplatBits >> 16; 3824 SplatBits |= 0xffff; 3825 break; 3826 } 3827 3828 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3829 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3830 // VMOV.I32. A (very) minor optimization would be to replicate the value 3831 // and fall through here to test for a valid 64-bit splat. But, then the 3832 // caller would also need to check and handle the change in size. 3833 return SDValue(); 3834 3835 case 64: { 3836 if (type != VMOVModImm) 3837 return SDValue(); 3838 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3839 uint64_t BitMask = 0xff; 3840 uint64_t Val = 0; 3841 unsigned ImmMask = 1; 3842 Imm = 0; 3843 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3844 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3845 Val |= BitMask; 3846 Imm |= ImmMask; 3847 } else if ((SplatBits & BitMask) != 0) { 3848 return SDValue(); 3849 } 3850 BitMask <<= 8; 3851 ImmMask <<= 1; 3852 } 3853 // Op=1, Cmode=1110. 3854 OpCmode = 0x1e; 3855 SplatBits = Val; 3856 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3857 break; 3858 } 3859 3860 default: 3861 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3862 } 3863 3864 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3865 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3866} 3867 3868SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 3869 const ARMSubtarget *ST) const { 3870 if (!ST->useNEONForSinglePrecisionFP() || !ST->hasVFP3() || ST->hasD16()) 3871 return SDValue(); 3872 3873 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 3874 assert(Op.getValueType() == MVT::f32 && 3875 "ConstantFP custom lowering should only occur for f32."); 3876 3877 // Try splatting with a VMOV.f32... 3878 APFloat FPVal = CFP->getValueAPF(); 3879 int ImmVal = ARM_AM::getFP32Imm(FPVal); 3880 if (ImmVal != -1) { 3881 DebugLoc DL = Op.getDebugLoc(); 3882 SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32); 3883 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 3884 NewVal); 3885 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 3886 DAG.getConstant(0, MVT::i32)); 3887 } 3888 3889 // If that fails, try a VMOV.i32 3890 EVT VMovVT; 3891 unsigned iVal = FPVal.bitcastToAPInt().getZExtValue(); 3892 SDValue NewVal = isNEONModifiedImm(iVal, 0, 32, DAG, VMovVT, false, 3893 VMOVModImm); 3894 if (NewVal != SDValue()) { 3895 DebugLoc DL = Op.getDebugLoc(); 3896 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 3897 NewVal); 3898 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 3899 VecConstant); 3900 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 3901 DAG.getConstant(0, MVT::i32)); 3902 } 3903 3904 // Finally, try a VMVN.i32 3905 NewVal = isNEONModifiedImm(~iVal & 0xffffffff, 0, 32, DAG, VMovVT, false, 3906 VMVNModImm); 3907 if (NewVal != SDValue()) { 3908 DebugLoc DL = Op.getDebugLoc(); 3909 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 3910 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 3911 VecConstant); 3912 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 3913 DAG.getConstant(0, MVT::i32)); 3914 } 3915 3916 return SDValue(); 3917} 3918 3919 3920static bool isVEXTMask(ArrayRef<int> M, EVT VT, 3921 bool &ReverseVEXT, unsigned &Imm) { 3922 unsigned NumElts = VT.getVectorNumElements(); 3923 ReverseVEXT = false; 3924 3925 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3926 if (M[0] < 0) 3927 return false; 3928 3929 Imm = M[0]; 3930 3931 // If this is a VEXT shuffle, the immediate value is the index of the first 3932 // element. The other shuffle indices must be the successive elements after 3933 // the first one. 3934 unsigned ExpectedElt = Imm; 3935 for (unsigned i = 1; i < NumElts; ++i) { 3936 // Increment the expected index. If it wraps around, it may still be 3937 // a VEXT but the source vectors must be swapped. 3938 ExpectedElt += 1; 3939 if (ExpectedElt == NumElts * 2) { 3940 ExpectedElt = 0; 3941 ReverseVEXT = true; 3942 } 3943 3944 if (M[i] < 0) continue; // ignore UNDEF indices 3945 if (ExpectedElt != static_cast<unsigned>(M[i])) 3946 return false; 3947 } 3948 3949 // Adjust the index value if the source operands will be swapped. 3950 if (ReverseVEXT) 3951 Imm -= NumElts; 3952 3953 return true; 3954} 3955 3956/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3957/// instruction with the specified blocksize. (The order of the elements 3958/// within each block of the vector is reversed.) 3959static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 3960 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3961 "Only possible block sizes for VREV are: 16, 32, 64"); 3962 3963 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3964 if (EltSz == 64) 3965 return false; 3966 3967 unsigned NumElts = VT.getVectorNumElements(); 3968 unsigned BlockElts = M[0] + 1; 3969 // If the first shuffle index is UNDEF, be optimistic. 3970 if (M[0] < 0) 3971 BlockElts = BlockSize / EltSz; 3972 3973 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3974 return false; 3975 3976 for (unsigned i = 0; i < NumElts; ++i) { 3977 if (M[i] < 0) continue; // ignore UNDEF indices 3978 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3979 return false; 3980 } 3981 3982 return true; 3983} 3984 3985static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 3986 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3987 // range, then 0 is placed into the resulting vector. So pretty much any mask 3988 // of 8 elements can work here. 3989 return VT == MVT::v8i8 && M.size() == 8; 3990} 3991 3992static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 3993 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3994 if (EltSz == 64) 3995 return false; 3996 3997 unsigned NumElts = VT.getVectorNumElements(); 3998 WhichResult = (M[0] == 0 ? 0 : 1); 3999 for (unsigned i = 0; i < NumElts; i += 2) { 4000 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4001 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 4002 return false; 4003 } 4004 return true; 4005} 4006 4007/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 4008/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4009/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 4010static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4011 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4012 if (EltSz == 64) 4013 return false; 4014 4015 unsigned NumElts = VT.getVectorNumElements(); 4016 WhichResult = (M[0] == 0 ? 0 : 1); 4017 for (unsigned i = 0; i < NumElts; i += 2) { 4018 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4019 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 4020 return false; 4021 } 4022 return true; 4023} 4024 4025static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4026 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4027 if (EltSz == 64) 4028 return false; 4029 4030 unsigned NumElts = VT.getVectorNumElements(); 4031 WhichResult = (M[0] == 0 ? 0 : 1); 4032 for (unsigned i = 0; i != NumElts; ++i) { 4033 if (M[i] < 0) continue; // ignore UNDEF indices 4034 if ((unsigned) M[i] != 2 * i + WhichResult) 4035 return false; 4036 } 4037 4038 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4039 if (VT.is64BitVector() && EltSz == 32) 4040 return false; 4041 4042 return true; 4043} 4044 4045/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 4046/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4047/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 4048static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4049 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4050 if (EltSz == 64) 4051 return false; 4052 4053 unsigned Half = VT.getVectorNumElements() / 2; 4054 WhichResult = (M[0] == 0 ? 0 : 1); 4055 for (unsigned j = 0; j != 2; ++j) { 4056 unsigned Idx = WhichResult; 4057 for (unsigned i = 0; i != Half; ++i) { 4058 int MIdx = M[i + j * Half]; 4059 if (MIdx >= 0 && (unsigned) MIdx != Idx) 4060 return false; 4061 Idx += 2; 4062 } 4063 } 4064 4065 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4066 if (VT.is64BitVector() && EltSz == 32) 4067 return false; 4068 4069 return true; 4070} 4071 4072static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4073 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4074 if (EltSz == 64) 4075 return false; 4076 4077 unsigned NumElts = VT.getVectorNumElements(); 4078 WhichResult = (M[0] == 0 ? 0 : 1); 4079 unsigned Idx = WhichResult * NumElts / 2; 4080 for (unsigned i = 0; i != NumElts; i += 2) { 4081 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4082 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 4083 return false; 4084 Idx += 1; 4085 } 4086 4087 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4088 if (VT.is64BitVector() && EltSz == 32) 4089 return false; 4090 4091 return true; 4092} 4093 4094/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 4095/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4096/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 4097static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4098 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4099 if (EltSz == 64) 4100 return false; 4101 4102 unsigned NumElts = VT.getVectorNumElements(); 4103 WhichResult = (M[0] == 0 ? 0 : 1); 4104 unsigned Idx = WhichResult * NumElts / 2; 4105 for (unsigned i = 0; i != NumElts; i += 2) { 4106 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4107 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 4108 return false; 4109 Idx += 1; 4110 } 4111 4112 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4113 if (VT.is64BitVector() && EltSz == 32) 4114 return false; 4115 4116 return true; 4117} 4118 4119// If N is an integer constant that can be moved into a register in one 4120// instruction, return an SDValue of such a constant (will become a MOV 4121// instruction). Otherwise return null. 4122static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 4123 const ARMSubtarget *ST, DebugLoc dl) { 4124 uint64_t Val; 4125 if (!isa<ConstantSDNode>(N)) 4126 return SDValue(); 4127 Val = cast<ConstantSDNode>(N)->getZExtValue(); 4128 4129 if (ST->isThumb1Only()) { 4130 if (Val <= 255 || ~Val <= 255) 4131 return DAG.getConstant(Val, MVT::i32); 4132 } else { 4133 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 4134 return DAG.getConstant(Val, MVT::i32); 4135 } 4136 return SDValue(); 4137} 4138 4139// If this is a case we can't handle, return null and let the default 4140// expansion code take care of it. 4141SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 4142 const ARMSubtarget *ST) const { 4143 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4144 DebugLoc dl = Op.getDebugLoc(); 4145 EVT VT = Op.getValueType(); 4146 4147 APInt SplatBits, SplatUndef; 4148 unsigned SplatBitSize; 4149 bool HasAnyUndefs; 4150 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4151 if (SplatBitSize <= 64) { 4152 // Check if an immediate VMOV works. 4153 EVT VmovVT; 4154 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4155 SplatUndef.getZExtValue(), SplatBitSize, 4156 DAG, VmovVT, VT.is128BitVector(), 4157 VMOVModImm); 4158 if (Val.getNode()) { 4159 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 4160 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4161 } 4162 4163 // Try an immediate VMVN. 4164 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 4165 Val = isNEONModifiedImm(NegatedImm, 4166 SplatUndef.getZExtValue(), SplatBitSize, 4167 DAG, VmovVT, VT.is128BitVector(), 4168 VMVNModImm); 4169 if (Val.getNode()) { 4170 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 4171 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4172 } 4173 4174 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 4175 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 4176 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 4177 if (ImmVal != -1) { 4178 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 4179 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 4180 } 4181 } 4182 } 4183 } 4184 4185 // Scan through the operands to see if only one value is used. 4186 // 4187 // As an optimisation, even if more than one value is used it may be more 4188 // profitable to splat with one value then change some lanes. 4189 // 4190 // Heuristically we decide to do this if the vector has a "dominant" value, 4191 // defined as splatted to more than half of the lanes. 4192 unsigned NumElts = VT.getVectorNumElements(); 4193 bool isOnlyLowElement = true; 4194 bool usesOnlyOneValue = true; 4195 bool hasDominantValue = false; 4196 bool isConstant = true; 4197 4198 // Map of the number of times a particular SDValue appears in the 4199 // element list. 4200 DenseMap<SDValue, unsigned> ValueCounts; 4201 SDValue Value; 4202 for (unsigned i = 0; i < NumElts; ++i) { 4203 SDValue V = Op.getOperand(i); 4204 if (V.getOpcode() == ISD::UNDEF) 4205 continue; 4206 if (i > 0) 4207 isOnlyLowElement = false; 4208 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4209 isConstant = false; 4210 4211 ValueCounts.insert(std::make_pair(V, 0)); 4212 unsigned &Count = ValueCounts[V]; 4213 4214 // Is this value dominant? (takes up more than half of the lanes) 4215 if (++Count > (NumElts / 2)) { 4216 hasDominantValue = true; 4217 Value = V; 4218 } 4219 } 4220 if (ValueCounts.size() != 1) 4221 usesOnlyOneValue = false; 4222 if (!Value.getNode() && ValueCounts.size() > 0) 4223 Value = ValueCounts.begin()->first; 4224 4225 if (ValueCounts.size() == 0) 4226 return DAG.getUNDEF(VT); 4227 4228 if (isOnlyLowElement) 4229 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 4230 4231 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4232 4233 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 4234 // i32 and try again. 4235 if (hasDominantValue && EltSize <= 32) { 4236 if (!isConstant) { 4237 SDValue N; 4238 4239 // If we are VDUPing a value that comes directly from a vector, that will 4240 // cause an unnecessary move to and from a GPR, where instead we could 4241 // just use VDUPLANE. 4242 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 4243 // We need to create a new undef vector to use for the VDUPLANE if the 4244 // size of the vector from which we get the value is different than the 4245 // size of the vector that we need to create. We will insert the element 4246 // such that the register coalescer will remove unnecessary copies. 4247 if (VT != Value->getOperand(0).getValueType()) { 4248 ConstantSDNode *constIndex; 4249 constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)); 4250 assert(constIndex && "The index is not a constant!"); 4251 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 4252 VT.getVectorNumElements(); 4253 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4254 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 4255 Value, DAG.getConstant(index, MVT::i32)), 4256 DAG.getConstant(index, MVT::i32)); 4257 } else { 4258 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4259 Value->getOperand(0), Value->getOperand(1)); 4260 } 4261 } 4262 else 4263 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 4264 4265 if (!usesOnlyOneValue) { 4266 // The dominant value was splatted as 'N', but we now have to insert 4267 // all differing elements. 4268 for (unsigned I = 0; I < NumElts; ++I) { 4269 if (Op.getOperand(I) == Value) 4270 continue; 4271 SmallVector<SDValue, 3> Ops; 4272 Ops.push_back(N); 4273 Ops.push_back(Op.getOperand(I)); 4274 Ops.push_back(DAG.getConstant(I, MVT::i32)); 4275 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, &Ops[0], 3); 4276 } 4277 } 4278 return N; 4279 } 4280 if (VT.getVectorElementType().isFloatingPoint()) { 4281 SmallVector<SDValue, 8> Ops; 4282 for (unsigned i = 0; i < NumElts; ++i) 4283 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4284 Op.getOperand(i))); 4285 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 4286 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 4287 Val = LowerBUILD_VECTOR(Val, DAG, ST); 4288 if (Val.getNode()) 4289 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4290 } 4291 if (usesOnlyOneValue) { 4292 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 4293 if (isConstant && Val.getNode()) 4294 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4295 } 4296 } 4297 4298 // If all elements are constants and the case above didn't get hit, fall back 4299 // to the default expansion, which will generate a load from the constant 4300 // pool. 4301 if (isConstant) 4302 return SDValue(); 4303 4304 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4305 if (NumElts >= 4) { 4306 SDValue shuffle = ReconstructShuffle(Op, DAG); 4307 if (shuffle != SDValue()) 4308 return shuffle; 4309 } 4310 4311 // Vectors with 32- or 64-bit elements can be built by directly assigning 4312 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4313 // will be legalized. 4314 if (EltSize >= 32) { 4315 // Do the expansion with floating-point types, since that is what the VFP 4316 // registers are defined to use, and since i64 is not legal. 4317 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4318 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4319 SmallVector<SDValue, 8> Ops; 4320 for (unsigned i = 0; i < NumElts; ++i) 4321 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4322 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4323 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4324 } 4325 4326 return SDValue(); 4327} 4328 4329// Gather data to see if the operation can be modelled as a 4330// shuffle in combination with VEXTs. 4331SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4332 SelectionDAG &DAG) const { 4333 DebugLoc dl = Op.getDebugLoc(); 4334 EVT VT = Op.getValueType(); 4335 unsigned NumElts = VT.getVectorNumElements(); 4336 4337 SmallVector<SDValue, 2> SourceVecs; 4338 SmallVector<unsigned, 2> MinElts; 4339 SmallVector<unsigned, 2> MaxElts; 4340 4341 for (unsigned i = 0; i < NumElts; ++i) { 4342 SDValue V = Op.getOperand(i); 4343 if (V.getOpcode() == ISD::UNDEF) 4344 continue; 4345 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4346 // A shuffle can only come from building a vector from various 4347 // elements of other vectors. 4348 return SDValue(); 4349 } else if (V.getOperand(0).getValueType().getVectorElementType() != 4350 VT.getVectorElementType()) { 4351 // This code doesn't know how to handle shuffles where the vector 4352 // element types do not match (this happens because type legalization 4353 // promotes the return type of EXTRACT_VECTOR_ELT). 4354 // FIXME: It might be appropriate to extend this code to handle 4355 // mismatched types. 4356 return SDValue(); 4357 } 4358 4359 // Record this extraction against the appropriate vector if possible... 4360 SDValue SourceVec = V.getOperand(0); 4361 // If the element number isn't a constant, we can't effectively 4362 // analyze what's going on. 4363 if (!isa<ConstantSDNode>(V.getOperand(1))) 4364 return SDValue(); 4365 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4366 bool FoundSource = false; 4367 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4368 if (SourceVecs[j] == SourceVec) { 4369 if (MinElts[j] > EltNo) 4370 MinElts[j] = EltNo; 4371 if (MaxElts[j] < EltNo) 4372 MaxElts[j] = EltNo; 4373 FoundSource = true; 4374 break; 4375 } 4376 } 4377 4378 // Or record a new source if not... 4379 if (!FoundSource) { 4380 SourceVecs.push_back(SourceVec); 4381 MinElts.push_back(EltNo); 4382 MaxElts.push_back(EltNo); 4383 } 4384 } 4385 4386 // Currently only do something sane when at most two source vectors 4387 // involved. 4388 if (SourceVecs.size() > 2) 4389 return SDValue(); 4390 4391 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4392 int VEXTOffsets[2] = {0, 0}; 4393 4394 // This loop extracts the usage patterns of the source vectors 4395 // and prepares appropriate SDValues for a shuffle if possible. 4396 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4397 if (SourceVecs[i].getValueType() == VT) { 4398 // No VEXT necessary 4399 ShuffleSrcs[i] = SourceVecs[i]; 4400 VEXTOffsets[i] = 0; 4401 continue; 4402 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4403 // It probably isn't worth padding out a smaller vector just to 4404 // break it down again in a shuffle. 4405 return SDValue(); 4406 } 4407 4408 // Since only 64-bit and 128-bit vectors are legal on ARM and 4409 // we've eliminated the other cases... 4410 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4411 "unexpected vector sizes in ReconstructShuffle"); 4412 4413 if (MaxElts[i] - MinElts[i] >= NumElts) { 4414 // Span too large for a VEXT to cope 4415 return SDValue(); 4416 } 4417 4418 if (MinElts[i] >= NumElts) { 4419 // The extraction can just take the second half 4420 VEXTOffsets[i] = NumElts; 4421 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4422 SourceVecs[i], 4423 DAG.getIntPtrConstant(NumElts)); 4424 } else if (MaxElts[i] < NumElts) { 4425 // The extraction can just take the first half 4426 VEXTOffsets[i] = 0; 4427 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4428 SourceVecs[i], 4429 DAG.getIntPtrConstant(0)); 4430 } else { 4431 // An actual VEXT is needed 4432 VEXTOffsets[i] = MinElts[i]; 4433 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4434 SourceVecs[i], 4435 DAG.getIntPtrConstant(0)); 4436 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4437 SourceVecs[i], 4438 DAG.getIntPtrConstant(NumElts)); 4439 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4440 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4441 } 4442 } 4443 4444 SmallVector<int, 8> Mask; 4445 4446 for (unsigned i = 0; i < NumElts; ++i) { 4447 SDValue Entry = Op.getOperand(i); 4448 if (Entry.getOpcode() == ISD::UNDEF) { 4449 Mask.push_back(-1); 4450 continue; 4451 } 4452 4453 SDValue ExtractVec = Entry.getOperand(0); 4454 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4455 .getOperand(1))->getSExtValue(); 4456 if (ExtractVec == SourceVecs[0]) { 4457 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4458 } else { 4459 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4460 } 4461 } 4462 4463 // Final check before we try to produce nonsense... 4464 if (isShuffleMaskLegal(Mask, VT)) 4465 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4466 &Mask[0]); 4467 4468 return SDValue(); 4469} 4470 4471/// isShuffleMaskLegal - Targets can use this to indicate that they only 4472/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4473/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4474/// are assumed to be legal. 4475bool 4476ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4477 EVT VT) const { 4478 if (VT.getVectorNumElements() == 4 && 4479 (VT.is128BitVector() || VT.is64BitVector())) { 4480 unsigned PFIndexes[4]; 4481 for (unsigned i = 0; i != 4; ++i) { 4482 if (M[i] < 0) 4483 PFIndexes[i] = 8; 4484 else 4485 PFIndexes[i] = M[i]; 4486 } 4487 4488 // Compute the index in the perfect shuffle table. 4489 unsigned PFTableIndex = 4490 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4491 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4492 unsigned Cost = (PFEntry >> 30); 4493 4494 if (Cost <= 4) 4495 return true; 4496 } 4497 4498 bool ReverseVEXT; 4499 unsigned Imm, WhichResult; 4500 4501 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4502 return (EltSize >= 32 || 4503 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4504 isVREVMask(M, VT, 64) || 4505 isVREVMask(M, VT, 32) || 4506 isVREVMask(M, VT, 16) || 4507 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4508 isVTBLMask(M, VT) || 4509 isVTRNMask(M, VT, WhichResult) || 4510 isVUZPMask(M, VT, WhichResult) || 4511 isVZIPMask(M, VT, WhichResult) || 4512 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4513 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4514 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4515} 4516 4517/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4518/// the specified operations to build the shuffle. 4519static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4520 SDValue RHS, SelectionDAG &DAG, 4521 DebugLoc dl) { 4522 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4523 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4524 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4525 4526 enum { 4527 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4528 OP_VREV, 4529 OP_VDUP0, 4530 OP_VDUP1, 4531 OP_VDUP2, 4532 OP_VDUP3, 4533 OP_VEXT1, 4534 OP_VEXT2, 4535 OP_VEXT3, 4536 OP_VUZPL, // VUZP, left result 4537 OP_VUZPR, // VUZP, right result 4538 OP_VZIPL, // VZIP, left result 4539 OP_VZIPR, // VZIP, right result 4540 OP_VTRNL, // VTRN, left result 4541 OP_VTRNR // VTRN, right result 4542 }; 4543 4544 if (OpNum == OP_COPY) { 4545 if (LHSID == (1*9+2)*9+3) return LHS; 4546 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4547 return RHS; 4548 } 4549 4550 SDValue OpLHS, OpRHS; 4551 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4552 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4553 EVT VT = OpLHS.getValueType(); 4554 4555 switch (OpNum) { 4556 default: llvm_unreachable("Unknown shuffle opcode!"); 4557 case OP_VREV: 4558 // VREV divides the vector in half and swaps within the half. 4559 if (VT.getVectorElementType() == MVT::i32 || 4560 VT.getVectorElementType() == MVT::f32) 4561 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4562 // vrev <4 x i16> -> VREV32 4563 if (VT.getVectorElementType() == MVT::i16) 4564 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4565 // vrev <4 x i8> -> VREV16 4566 assert(VT.getVectorElementType() == MVT::i8); 4567 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4568 case OP_VDUP0: 4569 case OP_VDUP1: 4570 case OP_VDUP2: 4571 case OP_VDUP3: 4572 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4573 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4574 case OP_VEXT1: 4575 case OP_VEXT2: 4576 case OP_VEXT3: 4577 return DAG.getNode(ARMISD::VEXT, dl, VT, 4578 OpLHS, OpRHS, 4579 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4580 case OP_VUZPL: 4581 case OP_VUZPR: 4582 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4583 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4584 case OP_VZIPL: 4585 case OP_VZIPR: 4586 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4587 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4588 case OP_VTRNL: 4589 case OP_VTRNR: 4590 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4591 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4592 } 4593} 4594 4595static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4596 ArrayRef<int> ShuffleMask, 4597 SelectionDAG &DAG) { 4598 // Check to see if we can use the VTBL instruction. 4599 SDValue V1 = Op.getOperand(0); 4600 SDValue V2 = Op.getOperand(1); 4601 DebugLoc DL = Op.getDebugLoc(); 4602 4603 SmallVector<SDValue, 8> VTBLMask; 4604 for (ArrayRef<int>::iterator 4605 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4606 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4607 4608 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4609 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4610 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4611 &VTBLMask[0], 8)); 4612 4613 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4614 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4615 &VTBLMask[0], 8)); 4616} 4617 4618static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4619 SDValue V1 = Op.getOperand(0); 4620 SDValue V2 = Op.getOperand(1); 4621 DebugLoc dl = Op.getDebugLoc(); 4622 EVT VT = Op.getValueType(); 4623 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4624 4625 // Convert shuffles that are directly supported on NEON to target-specific 4626 // DAG nodes, instead of keeping them as shuffles and matching them again 4627 // during code selection. This is more efficient and avoids the possibility 4628 // of inconsistencies between legalization and selection. 4629 // FIXME: floating-point vectors should be canonicalized to integer vectors 4630 // of the same time so that they get CSEd properly. 4631 ArrayRef<int> ShuffleMask = SVN->getMask(); 4632 4633 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4634 if (EltSize <= 32) { 4635 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4636 int Lane = SVN->getSplatIndex(); 4637 // If this is undef splat, generate it via "just" vdup, if possible. 4638 if (Lane == -1) Lane = 0; 4639 4640 // Test if V1 is a SCALAR_TO_VECTOR. 4641 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4642 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4643 } 4644 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 4645 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 4646 // reaches it). 4647 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 4648 !isa<ConstantSDNode>(V1.getOperand(0))) { 4649 bool IsScalarToVector = true; 4650 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 4651 if (V1.getOperand(i).getOpcode() != ISD::UNDEF) { 4652 IsScalarToVector = false; 4653 break; 4654 } 4655 if (IsScalarToVector) 4656 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4657 } 4658 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4659 DAG.getConstant(Lane, MVT::i32)); 4660 } 4661 4662 bool ReverseVEXT; 4663 unsigned Imm; 4664 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4665 if (ReverseVEXT) 4666 std::swap(V1, V2); 4667 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4668 DAG.getConstant(Imm, MVT::i32)); 4669 } 4670 4671 if (isVREVMask(ShuffleMask, VT, 64)) 4672 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4673 if (isVREVMask(ShuffleMask, VT, 32)) 4674 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4675 if (isVREVMask(ShuffleMask, VT, 16)) 4676 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4677 4678 // Check for Neon shuffles that modify both input vectors in place. 4679 // If both results are used, i.e., if there are two shuffles with the same 4680 // source operands and with masks corresponding to both results of one of 4681 // these operations, DAG memoization will ensure that a single node is 4682 // used for both shuffles. 4683 unsigned WhichResult; 4684 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4685 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4686 V1, V2).getValue(WhichResult); 4687 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4688 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4689 V1, V2).getValue(WhichResult); 4690 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4691 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4692 V1, V2).getValue(WhichResult); 4693 4694 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4695 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4696 V1, V1).getValue(WhichResult); 4697 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4698 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4699 V1, V1).getValue(WhichResult); 4700 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4701 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4702 V1, V1).getValue(WhichResult); 4703 } 4704 4705 // If the shuffle is not directly supported and it has 4 elements, use 4706 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4707 unsigned NumElts = VT.getVectorNumElements(); 4708 if (NumElts == 4) { 4709 unsigned PFIndexes[4]; 4710 for (unsigned i = 0; i != 4; ++i) { 4711 if (ShuffleMask[i] < 0) 4712 PFIndexes[i] = 8; 4713 else 4714 PFIndexes[i] = ShuffleMask[i]; 4715 } 4716 4717 // Compute the index in the perfect shuffle table. 4718 unsigned PFTableIndex = 4719 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4720 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4721 unsigned Cost = (PFEntry >> 30); 4722 4723 if (Cost <= 4) 4724 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4725 } 4726 4727 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4728 if (EltSize >= 32) { 4729 // Do the expansion with floating-point types, since that is what the VFP 4730 // registers are defined to use, and since i64 is not legal. 4731 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4732 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4733 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4734 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4735 SmallVector<SDValue, 8> Ops; 4736 for (unsigned i = 0; i < NumElts; ++i) { 4737 if (ShuffleMask[i] < 0) 4738 Ops.push_back(DAG.getUNDEF(EltVT)); 4739 else 4740 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4741 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4742 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4743 MVT::i32))); 4744 } 4745 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4746 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4747 } 4748 4749 if (VT == MVT::v8i8) { 4750 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4751 if (NewOp.getNode()) 4752 return NewOp; 4753 } 4754 4755 return SDValue(); 4756} 4757 4758static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4759 // INSERT_VECTOR_ELT is legal only for immediate indexes. 4760 SDValue Lane = Op.getOperand(2); 4761 if (!isa<ConstantSDNode>(Lane)) 4762 return SDValue(); 4763 4764 return Op; 4765} 4766 4767static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4768 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4769 SDValue Lane = Op.getOperand(1); 4770 if (!isa<ConstantSDNode>(Lane)) 4771 return SDValue(); 4772 4773 SDValue Vec = Op.getOperand(0); 4774 if (Op.getValueType() == MVT::i32 && 4775 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4776 DebugLoc dl = Op.getDebugLoc(); 4777 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4778 } 4779 4780 return Op; 4781} 4782 4783static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4784 // The only time a CONCAT_VECTORS operation can have legal types is when 4785 // two 64-bit vectors are concatenated to a 128-bit vector. 4786 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4787 "unexpected CONCAT_VECTORS"); 4788 DebugLoc dl = Op.getDebugLoc(); 4789 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4790 SDValue Op0 = Op.getOperand(0); 4791 SDValue Op1 = Op.getOperand(1); 4792 if (Op0.getOpcode() != ISD::UNDEF) 4793 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4794 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4795 DAG.getIntPtrConstant(0)); 4796 if (Op1.getOpcode() != ISD::UNDEF) 4797 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4798 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4799 DAG.getIntPtrConstant(1)); 4800 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4801} 4802 4803/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4804/// element has been zero/sign-extended, depending on the isSigned parameter, 4805/// from an integer type half its size. 4806static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4807 bool isSigned) { 4808 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4809 EVT VT = N->getValueType(0); 4810 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4811 SDNode *BVN = N->getOperand(0).getNode(); 4812 if (BVN->getValueType(0) != MVT::v4i32 || 4813 BVN->getOpcode() != ISD::BUILD_VECTOR) 4814 return false; 4815 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4816 unsigned HiElt = 1 - LoElt; 4817 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4818 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4819 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4820 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4821 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4822 return false; 4823 if (isSigned) { 4824 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4825 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4826 return true; 4827 } else { 4828 if (Hi0->isNullValue() && Hi1->isNullValue()) 4829 return true; 4830 } 4831 return false; 4832 } 4833 4834 if (N->getOpcode() != ISD::BUILD_VECTOR) 4835 return false; 4836 4837 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4838 SDNode *Elt = N->getOperand(i).getNode(); 4839 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4840 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4841 unsigned HalfSize = EltSize / 2; 4842 if (isSigned) { 4843 if (!isIntN(HalfSize, C->getSExtValue())) 4844 return false; 4845 } else { 4846 if (!isUIntN(HalfSize, C->getZExtValue())) 4847 return false; 4848 } 4849 continue; 4850 } 4851 return false; 4852 } 4853 4854 return true; 4855} 4856 4857/// isSignExtended - Check if a node is a vector value that is sign-extended 4858/// or a constant BUILD_VECTOR with sign-extended elements. 4859static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4860 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4861 return true; 4862 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4863 return true; 4864 return false; 4865} 4866 4867/// isZeroExtended - Check if a node is a vector value that is zero-extended 4868/// or a constant BUILD_VECTOR with zero-extended elements. 4869static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4870 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4871 return true; 4872 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4873 return true; 4874 return false; 4875} 4876 4877/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4878/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4879static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4880 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4881 return N->getOperand(0); 4882 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4883 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4884 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4885 LD->isNonTemporal(), LD->isInvariant(), 4886 LD->getAlignment()); 4887 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4888 // have been legalized as a BITCAST from v4i32. 4889 if (N->getOpcode() == ISD::BITCAST) { 4890 SDNode *BVN = N->getOperand(0).getNode(); 4891 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4892 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4893 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4894 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4895 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4896 } 4897 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4898 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4899 EVT VT = N->getValueType(0); 4900 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4901 unsigned NumElts = VT.getVectorNumElements(); 4902 MVT TruncVT = MVT::getIntegerVT(EltSize); 4903 SmallVector<SDValue, 8> Ops; 4904 for (unsigned i = 0; i != NumElts; ++i) { 4905 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4906 const APInt &CInt = C->getAPIntValue(); 4907 // Element types smaller than 32 bits are not legal, so use i32 elements. 4908 // The values are implicitly truncated so sext vs. zext doesn't matter. 4909 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32)); 4910 } 4911 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4912 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4913} 4914 4915static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4916 unsigned Opcode = N->getOpcode(); 4917 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4918 SDNode *N0 = N->getOperand(0).getNode(); 4919 SDNode *N1 = N->getOperand(1).getNode(); 4920 return N0->hasOneUse() && N1->hasOneUse() && 4921 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4922 } 4923 return false; 4924} 4925 4926static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4927 unsigned Opcode = N->getOpcode(); 4928 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4929 SDNode *N0 = N->getOperand(0).getNode(); 4930 SDNode *N1 = N->getOperand(1).getNode(); 4931 return N0->hasOneUse() && N1->hasOneUse() && 4932 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4933 } 4934 return false; 4935} 4936 4937static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4938 // Multiplications are only custom-lowered for 128-bit vectors so that 4939 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4940 EVT VT = Op.getValueType(); 4941 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4942 SDNode *N0 = Op.getOperand(0).getNode(); 4943 SDNode *N1 = Op.getOperand(1).getNode(); 4944 unsigned NewOpc = 0; 4945 bool isMLA = false; 4946 bool isN0SExt = isSignExtended(N0, DAG); 4947 bool isN1SExt = isSignExtended(N1, DAG); 4948 if (isN0SExt && isN1SExt) 4949 NewOpc = ARMISD::VMULLs; 4950 else { 4951 bool isN0ZExt = isZeroExtended(N0, DAG); 4952 bool isN1ZExt = isZeroExtended(N1, DAG); 4953 if (isN0ZExt && isN1ZExt) 4954 NewOpc = ARMISD::VMULLu; 4955 else if (isN1SExt || isN1ZExt) { 4956 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4957 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4958 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4959 NewOpc = ARMISD::VMULLs; 4960 isMLA = true; 4961 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4962 NewOpc = ARMISD::VMULLu; 4963 isMLA = true; 4964 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4965 std::swap(N0, N1); 4966 NewOpc = ARMISD::VMULLu; 4967 isMLA = true; 4968 } 4969 } 4970 4971 if (!NewOpc) { 4972 if (VT == MVT::v2i64) 4973 // Fall through to expand this. It is not legal. 4974 return SDValue(); 4975 else 4976 // Other vector multiplications are legal. 4977 return Op; 4978 } 4979 } 4980 4981 // Legalize to a VMULL instruction. 4982 DebugLoc DL = Op.getDebugLoc(); 4983 SDValue Op0; 4984 SDValue Op1 = SkipExtension(N1, DAG); 4985 if (!isMLA) { 4986 Op0 = SkipExtension(N0, DAG); 4987 assert(Op0.getValueType().is64BitVector() && 4988 Op1.getValueType().is64BitVector() && 4989 "unexpected types for extended operands to VMULL"); 4990 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4991 } 4992 4993 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4994 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4995 // vmull q0, d4, d6 4996 // vmlal q0, d5, d6 4997 // is faster than 4998 // vaddl q0, d4, d5 4999 // vmovl q1, d6 5000 // vmul q0, q0, q1 5001 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 5002 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 5003 EVT Op1VT = Op1.getValueType(); 5004 return DAG.getNode(N0->getOpcode(), DL, VT, 5005 DAG.getNode(NewOpc, DL, VT, 5006 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 5007 DAG.getNode(NewOpc, DL, VT, 5008 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 5009} 5010 5011static SDValue 5012LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 5013 // Convert to float 5014 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 5015 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 5016 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 5017 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 5018 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 5019 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 5020 // Get reciprocal estimate. 5021 // float4 recip = vrecpeq_f32(yf); 5022 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5023 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 5024 // Because char has a smaller range than uchar, we can actually get away 5025 // without any newton steps. This requires that we use a weird bias 5026 // of 0xb000, however (again, this has been exhaustively tested). 5027 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 5028 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 5029 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 5030 Y = DAG.getConstant(0xb000, MVT::i32); 5031 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 5032 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 5033 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 5034 // Convert back to short. 5035 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 5036 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 5037 return X; 5038} 5039 5040static SDValue 5041LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 5042 SDValue N2; 5043 // Convert to float. 5044 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 5045 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 5046 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 5047 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 5048 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5049 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5050 5051 // Use reciprocal estimate and one refinement step. 5052 // float4 recip = vrecpeq_f32(yf); 5053 // recip *= vrecpsq_f32(yf, recip); 5054 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5055 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 5056 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5057 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5058 N1, N2); 5059 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5060 // Because short has a smaller range than ushort, we can actually get away 5061 // with only a single newton step. This requires that we use a weird bias 5062 // of 89, however (again, this has been exhaustively tested). 5063 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 5064 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5065 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5066 N1 = DAG.getConstant(0x89, MVT::i32); 5067 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5068 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5069 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5070 // Convert back to integer and return. 5071 // return vmovn_s32(vcvt_s32_f32(result)); 5072 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5073 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5074 return N0; 5075} 5076 5077static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 5078 EVT VT = Op.getValueType(); 5079 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5080 "unexpected type for custom-lowering ISD::SDIV"); 5081 5082 DebugLoc dl = Op.getDebugLoc(); 5083 SDValue N0 = Op.getOperand(0); 5084 SDValue N1 = Op.getOperand(1); 5085 SDValue N2, N3; 5086 5087 if (VT == MVT::v8i8) { 5088 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 5089 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 5090 5091 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5092 DAG.getIntPtrConstant(4)); 5093 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5094 DAG.getIntPtrConstant(4)); 5095 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5096 DAG.getIntPtrConstant(0)); 5097 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5098 DAG.getIntPtrConstant(0)); 5099 5100 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 5101 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 5102 5103 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5104 N0 = LowerCONCAT_VECTORS(N0, DAG); 5105 5106 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 5107 return N0; 5108 } 5109 return LowerSDIV_v4i16(N0, N1, dl, DAG); 5110} 5111 5112static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 5113 EVT VT = Op.getValueType(); 5114 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5115 "unexpected type for custom-lowering ISD::UDIV"); 5116 5117 DebugLoc dl = Op.getDebugLoc(); 5118 SDValue N0 = Op.getOperand(0); 5119 SDValue N1 = Op.getOperand(1); 5120 SDValue N2, N3; 5121 5122 if (VT == MVT::v8i8) { 5123 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 5124 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 5125 5126 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5127 DAG.getIntPtrConstant(4)); 5128 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5129 DAG.getIntPtrConstant(4)); 5130 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5131 DAG.getIntPtrConstant(0)); 5132 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5133 DAG.getIntPtrConstant(0)); 5134 5135 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 5136 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 5137 5138 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5139 N0 = LowerCONCAT_VECTORS(N0, DAG); 5140 5141 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 5142 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 5143 N0); 5144 return N0; 5145 } 5146 5147 // v4i16 sdiv ... Convert to float. 5148 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 5149 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 5150 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 5151 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 5152 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5153 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5154 5155 // Use reciprocal estimate and two refinement steps. 5156 // float4 recip = vrecpeq_f32(yf); 5157 // recip *= vrecpsq_f32(yf, recip); 5158 // recip *= vrecpsq_f32(yf, recip); 5159 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5160 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 5161 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5162 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5163 BN1, N2); 5164 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5165 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5166 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5167 BN1, N2); 5168 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5169 // Simply multiplying by the reciprocal estimate can leave us a few ulps 5170 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 5171 // and that it will never cause us to return an answer too large). 5172 // float4 result = as_float4(as_int4(xf*recip) + 2); 5173 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5174 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5175 N1 = DAG.getConstant(2, MVT::i32); 5176 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5177 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5178 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5179 // Convert back to integer and return. 5180 // return vmovn_u32(vcvt_s32_f32(result)); 5181 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5182 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5183 return N0; 5184} 5185 5186static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 5187 EVT VT = Op.getNode()->getValueType(0); 5188 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 5189 5190 unsigned Opc; 5191 bool ExtraOp = false; 5192 switch (Op.getOpcode()) { 5193 default: llvm_unreachable("Invalid code"); 5194 case ISD::ADDC: Opc = ARMISD::ADDC; break; 5195 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 5196 case ISD::SUBC: Opc = ARMISD::SUBC; break; 5197 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 5198 } 5199 5200 if (!ExtraOp) 5201 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 5202 Op.getOperand(1)); 5203 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 5204 Op.getOperand(1), Op.getOperand(2)); 5205} 5206 5207static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 5208 // Monotonic load/store is legal for all targets 5209 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 5210 return Op; 5211 5212 // Aquire/Release load/store is not legal for targets without a 5213 // dmb or equivalent available. 5214 return SDValue(); 5215} 5216 5217 5218static void 5219ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 5220 SelectionDAG &DAG, unsigned NewOp) { 5221 DebugLoc dl = Node->getDebugLoc(); 5222 assert (Node->getValueType(0) == MVT::i64 && 5223 "Only know how to expand i64 atomics"); 5224 5225 SmallVector<SDValue, 6> Ops; 5226 Ops.push_back(Node->getOperand(0)); // Chain 5227 Ops.push_back(Node->getOperand(1)); // Ptr 5228 // Low part of Val1 5229 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5230 Node->getOperand(2), DAG.getIntPtrConstant(0))); 5231 // High part of Val1 5232 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5233 Node->getOperand(2), DAG.getIntPtrConstant(1))); 5234 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 5235 // High part of Val1 5236 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5237 Node->getOperand(3), DAG.getIntPtrConstant(0))); 5238 // High part of Val2 5239 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5240 Node->getOperand(3), DAG.getIntPtrConstant(1))); 5241 } 5242 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 5243 SDValue Result = 5244 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 5245 cast<MemSDNode>(Node)->getMemOperand()); 5246 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 5247 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 5248 Results.push_back(Result.getValue(2)); 5249} 5250 5251SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5252 switch (Op.getOpcode()) { 5253 default: llvm_unreachable("Don't know how to custom lower this!"); 5254 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5255 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5256 case ISD::GlobalAddress: 5257 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 5258 LowerGlobalAddressELF(Op, DAG); 5259 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5260 case ISD::SELECT: return LowerSELECT(Op, DAG); 5261 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5262 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 5263 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 5264 case ISD::VASTART: return LowerVASTART(Op, DAG); 5265 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 5266 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 5267 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 5268 case ISD::SINT_TO_FP: 5269 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5270 case ISD::FP_TO_SINT: 5271 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 5272 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5273 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5274 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5275 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 5276 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 5277 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 5278 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 5279 Subtarget); 5280 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 5281 case ISD::SHL: 5282 case ISD::SRL: 5283 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 5284 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 5285 case ISD::SRL_PARTS: 5286 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 5287 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 5288 case ISD::SETCC: return LowerVSETCC(Op, DAG); 5289 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 5290 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 5291 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5292 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5293 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5294 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 5295 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5296 case ISD::MUL: return LowerMUL(Op, DAG); 5297 case ISD::SDIV: return LowerSDIV(Op, DAG); 5298 case ISD::UDIV: return LowerUDIV(Op, DAG); 5299 case ISD::ADDC: 5300 case ISD::ADDE: 5301 case ISD::SUBC: 5302 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 5303 case ISD::ATOMIC_LOAD: 5304 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 5305 } 5306} 5307 5308/// ReplaceNodeResults - Replace the results of node with an illegal result 5309/// type with new values built out of custom code. 5310void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 5311 SmallVectorImpl<SDValue>&Results, 5312 SelectionDAG &DAG) const { 5313 SDValue Res; 5314 switch (N->getOpcode()) { 5315 default: 5316 llvm_unreachable("Don't know how to custom expand this!"); 5317 case ISD::BITCAST: 5318 Res = ExpandBITCAST(N, DAG); 5319 break; 5320 case ISD::SRL: 5321 case ISD::SRA: 5322 Res = Expand64BitShift(N, DAG, Subtarget); 5323 break; 5324 case ISD::ATOMIC_LOAD_ADD: 5325 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 5326 return; 5327 case ISD::ATOMIC_LOAD_AND: 5328 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 5329 return; 5330 case ISD::ATOMIC_LOAD_NAND: 5331 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 5332 return; 5333 case ISD::ATOMIC_LOAD_OR: 5334 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 5335 return; 5336 case ISD::ATOMIC_LOAD_SUB: 5337 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 5338 return; 5339 case ISD::ATOMIC_LOAD_XOR: 5340 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5341 return; 5342 case ISD::ATOMIC_SWAP: 5343 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5344 return; 5345 case ISD::ATOMIC_CMP_SWAP: 5346 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5347 return; 5348 } 5349 if (Res.getNode()) 5350 Results.push_back(Res); 5351} 5352 5353//===----------------------------------------------------------------------===// 5354// ARM Scheduler Hooks 5355//===----------------------------------------------------------------------===// 5356 5357MachineBasicBlock * 5358ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5359 MachineBasicBlock *BB, 5360 unsigned Size) const { 5361 unsigned dest = MI->getOperand(0).getReg(); 5362 unsigned ptr = MI->getOperand(1).getReg(); 5363 unsigned oldval = MI->getOperand(2).getReg(); 5364 unsigned newval = MI->getOperand(3).getReg(); 5365 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5366 DebugLoc dl = MI->getDebugLoc(); 5367 bool isThumb2 = Subtarget->isThumb2(); 5368 5369 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5370 unsigned scratch = MRI.createVirtualRegister(isThumb2 ? 5371 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5372 (const TargetRegisterClass*)&ARM::GPRRegClass); 5373 5374 if (isThumb2) { 5375 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5376 MRI.constrainRegClass(oldval, &ARM::rGPRRegClass); 5377 MRI.constrainRegClass(newval, &ARM::rGPRRegClass); 5378 } 5379 5380 unsigned ldrOpc, strOpc; 5381 switch (Size) { 5382 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5383 case 1: 5384 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5385 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5386 break; 5387 case 2: 5388 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5389 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5390 break; 5391 case 4: 5392 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5393 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5394 break; 5395 } 5396 5397 MachineFunction *MF = BB->getParent(); 5398 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5399 MachineFunction::iterator It = BB; 5400 ++It; // insert the new blocks after the current block 5401 5402 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5403 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5404 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5405 MF->insert(It, loop1MBB); 5406 MF->insert(It, loop2MBB); 5407 MF->insert(It, exitMBB); 5408 5409 // Transfer the remainder of BB and its successor edges to exitMBB. 5410 exitMBB->splice(exitMBB->begin(), BB, 5411 llvm::next(MachineBasicBlock::iterator(MI)), 5412 BB->end()); 5413 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5414 5415 // thisMBB: 5416 // ... 5417 // fallthrough --> loop1MBB 5418 BB->addSuccessor(loop1MBB); 5419 5420 // loop1MBB: 5421 // ldrex dest, [ptr] 5422 // cmp dest, oldval 5423 // bne exitMBB 5424 BB = loop1MBB; 5425 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5426 if (ldrOpc == ARM::t2LDREX) 5427 MIB.addImm(0); 5428 AddDefaultPred(MIB); 5429 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5430 .addReg(dest).addReg(oldval)); 5431 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5432 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5433 BB->addSuccessor(loop2MBB); 5434 BB->addSuccessor(exitMBB); 5435 5436 // loop2MBB: 5437 // strex scratch, newval, [ptr] 5438 // cmp scratch, #0 5439 // bne loop1MBB 5440 BB = loop2MBB; 5441 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5442 if (strOpc == ARM::t2STREX) 5443 MIB.addImm(0); 5444 AddDefaultPred(MIB); 5445 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5446 .addReg(scratch).addImm(0)); 5447 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5448 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5449 BB->addSuccessor(loop1MBB); 5450 BB->addSuccessor(exitMBB); 5451 5452 // exitMBB: 5453 // ... 5454 BB = exitMBB; 5455 5456 MI->eraseFromParent(); // The instruction is gone now. 5457 5458 return BB; 5459} 5460 5461MachineBasicBlock * 5462ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5463 unsigned Size, unsigned BinOpcode) const { 5464 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5465 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5466 5467 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5468 MachineFunction *MF = BB->getParent(); 5469 MachineFunction::iterator It = BB; 5470 ++It; 5471 5472 unsigned dest = MI->getOperand(0).getReg(); 5473 unsigned ptr = MI->getOperand(1).getReg(); 5474 unsigned incr = MI->getOperand(2).getReg(); 5475 DebugLoc dl = MI->getDebugLoc(); 5476 bool isThumb2 = Subtarget->isThumb2(); 5477 5478 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5479 if (isThumb2) { 5480 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5481 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 5482 } 5483 5484 unsigned ldrOpc, strOpc; 5485 switch (Size) { 5486 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5487 case 1: 5488 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5489 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5490 break; 5491 case 2: 5492 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5493 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5494 break; 5495 case 4: 5496 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5497 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5498 break; 5499 } 5500 5501 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5502 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5503 MF->insert(It, loopMBB); 5504 MF->insert(It, exitMBB); 5505 5506 // Transfer the remainder of BB and its successor edges to exitMBB. 5507 exitMBB->splice(exitMBB->begin(), BB, 5508 llvm::next(MachineBasicBlock::iterator(MI)), 5509 BB->end()); 5510 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5511 5512 const TargetRegisterClass *TRC = isThumb2 ? 5513 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5514 (const TargetRegisterClass*)&ARM::GPRRegClass; 5515 unsigned scratch = MRI.createVirtualRegister(TRC); 5516 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5517 5518 // thisMBB: 5519 // ... 5520 // fallthrough --> loopMBB 5521 BB->addSuccessor(loopMBB); 5522 5523 // loopMBB: 5524 // ldrex dest, ptr 5525 // <binop> scratch2, dest, incr 5526 // strex scratch, scratch2, ptr 5527 // cmp scratch, #0 5528 // bne- loopMBB 5529 // fallthrough --> exitMBB 5530 BB = loopMBB; 5531 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5532 if (ldrOpc == ARM::t2LDREX) 5533 MIB.addImm(0); 5534 AddDefaultPred(MIB); 5535 if (BinOpcode) { 5536 // operand order needs to go the other way for NAND 5537 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5538 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5539 addReg(incr).addReg(dest)).addReg(0); 5540 else 5541 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5542 addReg(dest).addReg(incr)).addReg(0); 5543 } 5544 5545 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5546 if (strOpc == ARM::t2STREX) 5547 MIB.addImm(0); 5548 AddDefaultPred(MIB); 5549 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5550 .addReg(scratch).addImm(0)); 5551 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5552 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5553 5554 BB->addSuccessor(loopMBB); 5555 BB->addSuccessor(exitMBB); 5556 5557 // exitMBB: 5558 // ... 5559 BB = exitMBB; 5560 5561 MI->eraseFromParent(); // The instruction is gone now. 5562 5563 return BB; 5564} 5565 5566MachineBasicBlock * 5567ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5568 MachineBasicBlock *BB, 5569 unsigned Size, 5570 bool signExtend, 5571 ARMCC::CondCodes Cond) const { 5572 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5573 5574 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5575 MachineFunction *MF = BB->getParent(); 5576 MachineFunction::iterator It = BB; 5577 ++It; 5578 5579 unsigned dest = MI->getOperand(0).getReg(); 5580 unsigned ptr = MI->getOperand(1).getReg(); 5581 unsigned incr = MI->getOperand(2).getReg(); 5582 unsigned oldval = dest; 5583 DebugLoc dl = MI->getDebugLoc(); 5584 bool isThumb2 = Subtarget->isThumb2(); 5585 5586 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5587 if (isThumb2) { 5588 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5589 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 5590 } 5591 5592 unsigned ldrOpc, strOpc, extendOpc; 5593 switch (Size) { 5594 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5595 case 1: 5596 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5597 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5598 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 5599 break; 5600 case 2: 5601 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5602 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5603 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 5604 break; 5605 case 4: 5606 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5607 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5608 extendOpc = 0; 5609 break; 5610 } 5611 5612 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5613 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5614 MF->insert(It, loopMBB); 5615 MF->insert(It, exitMBB); 5616 5617 // Transfer the remainder of BB and its successor edges to exitMBB. 5618 exitMBB->splice(exitMBB->begin(), BB, 5619 llvm::next(MachineBasicBlock::iterator(MI)), 5620 BB->end()); 5621 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5622 5623 const TargetRegisterClass *TRC = isThumb2 ? 5624 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5625 (const TargetRegisterClass*)&ARM::GPRRegClass; 5626 unsigned scratch = MRI.createVirtualRegister(TRC); 5627 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5628 5629 // thisMBB: 5630 // ... 5631 // fallthrough --> loopMBB 5632 BB->addSuccessor(loopMBB); 5633 5634 // loopMBB: 5635 // ldrex dest, ptr 5636 // (sign extend dest, if required) 5637 // cmp dest, incr 5638 // cmov.cond scratch2, incr, dest 5639 // strex scratch, scratch2, ptr 5640 // cmp scratch, #0 5641 // bne- loopMBB 5642 // fallthrough --> exitMBB 5643 BB = loopMBB; 5644 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5645 if (ldrOpc == ARM::t2LDREX) 5646 MIB.addImm(0); 5647 AddDefaultPred(MIB); 5648 5649 // Sign extend the value, if necessary. 5650 if (signExtend && extendOpc) { 5651 oldval = MRI.createVirtualRegister(&ARM::GPRRegClass); 5652 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 5653 .addReg(dest) 5654 .addImm(0)); 5655 } 5656 5657 // Build compare and cmov instructions. 5658 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5659 .addReg(oldval).addReg(incr)); 5660 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5661 .addReg(incr).addReg(oldval).addImm(Cond).addReg(ARM::CPSR); 5662 5663 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5664 if (strOpc == ARM::t2STREX) 5665 MIB.addImm(0); 5666 AddDefaultPred(MIB); 5667 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5668 .addReg(scratch).addImm(0)); 5669 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5670 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5671 5672 BB->addSuccessor(loopMBB); 5673 BB->addSuccessor(exitMBB); 5674 5675 // exitMBB: 5676 // ... 5677 BB = exitMBB; 5678 5679 MI->eraseFromParent(); // The instruction is gone now. 5680 5681 return BB; 5682} 5683 5684MachineBasicBlock * 5685ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 5686 unsigned Op1, unsigned Op2, 5687 bool NeedsCarry, bool IsCmpxchg) const { 5688 // This also handles ATOMIC_SWAP, indicated by Op1==0. 5689 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5690 5691 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5692 MachineFunction *MF = BB->getParent(); 5693 MachineFunction::iterator It = BB; 5694 ++It; 5695 5696 unsigned destlo = MI->getOperand(0).getReg(); 5697 unsigned desthi = MI->getOperand(1).getReg(); 5698 unsigned ptr = MI->getOperand(2).getReg(); 5699 unsigned vallo = MI->getOperand(3).getReg(); 5700 unsigned valhi = MI->getOperand(4).getReg(); 5701 DebugLoc dl = MI->getDebugLoc(); 5702 bool isThumb2 = Subtarget->isThumb2(); 5703 5704 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5705 if (isThumb2) { 5706 MRI.constrainRegClass(destlo, &ARM::rGPRRegClass); 5707 MRI.constrainRegClass(desthi, &ARM::rGPRRegClass); 5708 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 5709 } 5710 5711 unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; 5712 unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD; 5713 5714 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5715 MachineBasicBlock *contBB = 0, *cont2BB = 0; 5716 if (IsCmpxchg) { 5717 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 5718 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 5719 } 5720 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5721 MF->insert(It, loopMBB); 5722 if (IsCmpxchg) { 5723 MF->insert(It, contBB); 5724 MF->insert(It, cont2BB); 5725 } 5726 MF->insert(It, exitMBB); 5727 5728 // Transfer the remainder of BB and its successor edges to exitMBB. 5729 exitMBB->splice(exitMBB->begin(), BB, 5730 llvm::next(MachineBasicBlock::iterator(MI)), 5731 BB->end()); 5732 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5733 5734 const TargetRegisterClass *TRC = isThumb2 ? 5735 (const TargetRegisterClass*)&ARM::tGPRRegClass : 5736 (const TargetRegisterClass*)&ARM::GPRRegClass; 5737 unsigned storesuccess = MRI.createVirtualRegister(TRC); 5738 5739 // thisMBB: 5740 // ... 5741 // fallthrough --> loopMBB 5742 BB->addSuccessor(loopMBB); 5743 5744 // loopMBB: 5745 // ldrexd r2, r3, ptr 5746 // <binopa> r0, r2, incr 5747 // <binopb> r1, r3, incr 5748 // strexd storesuccess, r0, r1, ptr 5749 // cmp storesuccess, #0 5750 // bne- loopMBB 5751 // fallthrough --> exitMBB 5752 // 5753 // Note that the registers are explicitly specified because there is not any 5754 // way to force the register allocator to allocate a register pair. 5755 // 5756 // FIXME: The hardcoded registers are not necessary for Thumb2, but we 5757 // need to properly enforce the restriction that the two output registers 5758 // for ldrexd must be different. 5759 BB = loopMBB; 5760 // Load 5761 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 5762 .addReg(ARM::R2, RegState::Define) 5763 .addReg(ARM::R3, RegState::Define).addReg(ptr)); 5764 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 5765 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2); 5766 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3); 5767 5768 if (IsCmpxchg) { 5769 // Add early exit 5770 for (unsigned i = 0; i < 2; i++) { 5771 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 5772 ARM::CMPrr)) 5773 .addReg(i == 0 ? destlo : desthi) 5774 .addReg(i == 0 ? vallo : valhi)); 5775 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5776 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5777 BB->addSuccessor(exitMBB); 5778 BB->addSuccessor(i == 0 ? contBB : cont2BB); 5779 BB = (i == 0 ? contBB : cont2BB); 5780 } 5781 5782 // Copy to physregs for strexd 5783 unsigned setlo = MI->getOperand(5).getReg(); 5784 unsigned sethi = MI->getOperand(6).getReg(); 5785 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo); 5786 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi); 5787 } else if (Op1) { 5788 // Perform binary operation 5789 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0) 5790 .addReg(destlo).addReg(vallo)) 5791 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 5792 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1) 5793 .addReg(desthi).addReg(valhi)).addReg(0); 5794 } else { 5795 // Copy to physregs for strexd 5796 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo); 5797 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi); 5798 } 5799 5800 // Store 5801 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 5802 .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr)); 5803 // Cmp+jump 5804 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5805 .addReg(storesuccess).addImm(0)); 5806 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5807 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5808 5809 BB->addSuccessor(loopMBB); 5810 BB->addSuccessor(exitMBB); 5811 5812 // exitMBB: 5813 // ... 5814 BB = exitMBB; 5815 5816 MI->eraseFromParent(); // The instruction is gone now. 5817 5818 return BB; 5819} 5820 5821/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 5822/// registers the function context. 5823void ARMTargetLowering:: 5824SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 5825 MachineBasicBlock *DispatchBB, int FI) const { 5826 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5827 DebugLoc dl = MI->getDebugLoc(); 5828 MachineFunction *MF = MBB->getParent(); 5829 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5830 MachineConstantPool *MCP = MF->getConstantPool(); 5831 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5832 const Function *F = MF->getFunction(); 5833 5834 bool isThumb = Subtarget->isThumb(); 5835 bool isThumb2 = Subtarget->isThumb2(); 5836 5837 unsigned PCLabelId = AFI->createPICLabelUId(); 5838 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 5839 ARMConstantPoolValue *CPV = 5840 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 5841 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 5842 5843 const TargetRegisterClass *TRC = isThumb ? 5844 (const TargetRegisterClass*)&ARM::tGPRRegClass : 5845 (const TargetRegisterClass*)&ARM::GPRRegClass; 5846 5847 // Grab constant pool and fixed stack memory operands. 5848 MachineMemOperand *CPMMO = 5849 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 5850 MachineMemOperand::MOLoad, 4, 4); 5851 5852 MachineMemOperand *FIMMOSt = 5853 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5854 MachineMemOperand::MOStore, 4, 4); 5855 5856 // Load the address of the dispatch MBB into the jump buffer. 5857 if (isThumb2) { 5858 // Incoming value: jbuf 5859 // ldr.n r5, LCPI1_1 5860 // orr r5, r5, #1 5861 // add r5, pc 5862 // str r5, [$jbuf, #+4] ; &jbuf[1] 5863 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5864 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 5865 .addConstantPoolIndex(CPI) 5866 .addMemOperand(CPMMO)); 5867 // Set the low bit because of thumb mode. 5868 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5869 AddDefaultCC( 5870 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 5871 .addReg(NewVReg1, RegState::Kill) 5872 .addImm(0x01))); 5873 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5874 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 5875 .addReg(NewVReg2, RegState::Kill) 5876 .addImm(PCLabelId); 5877 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 5878 .addReg(NewVReg3, RegState::Kill) 5879 .addFrameIndex(FI) 5880 .addImm(36) // &jbuf[1] :: pc 5881 .addMemOperand(FIMMOSt)); 5882 } else if (isThumb) { 5883 // Incoming value: jbuf 5884 // ldr.n r1, LCPI1_4 5885 // add r1, pc 5886 // mov r2, #1 5887 // orrs r1, r2 5888 // add r2, $jbuf, #+4 ; &jbuf[1] 5889 // str r1, [r2] 5890 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5891 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 5892 .addConstantPoolIndex(CPI) 5893 .addMemOperand(CPMMO)); 5894 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5895 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 5896 .addReg(NewVReg1, RegState::Kill) 5897 .addImm(PCLabelId); 5898 // Set the low bit because of thumb mode. 5899 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5900 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 5901 .addReg(ARM::CPSR, RegState::Define) 5902 .addImm(1)); 5903 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5904 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 5905 .addReg(ARM::CPSR, RegState::Define) 5906 .addReg(NewVReg2, RegState::Kill) 5907 .addReg(NewVReg3, RegState::Kill)); 5908 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5909 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 5910 .addFrameIndex(FI) 5911 .addImm(36)); // &jbuf[1] :: pc 5912 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 5913 .addReg(NewVReg4, RegState::Kill) 5914 .addReg(NewVReg5, RegState::Kill) 5915 .addImm(0) 5916 .addMemOperand(FIMMOSt)); 5917 } else { 5918 // Incoming value: jbuf 5919 // ldr r1, LCPI1_1 5920 // add r1, pc, r1 5921 // str r1, [$jbuf, #+4] ; &jbuf[1] 5922 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5923 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 5924 .addConstantPoolIndex(CPI) 5925 .addImm(0) 5926 .addMemOperand(CPMMO)); 5927 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5928 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 5929 .addReg(NewVReg1, RegState::Kill) 5930 .addImm(PCLabelId)); 5931 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 5932 .addReg(NewVReg2, RegState::Kill) 5933 .addFrameIndex(FI) 5934 .addImm(36) // &jbuf[1] :: pc 5935 .addMemOperand(FIMMOSt)); 5936 } 5937} 5938 5939MachineBasicBlock *ARMTargetLowering:: 5940EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 5941 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5942 DebugLoc dl = MI->getDebugLoc(); 5943 MachineFunction *MF = MBB->getParent(); 5944 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5945 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5946 MachineFrameInfo *MFI = MF->getFrameInfo(); 5947 int FI = MFI->getFunctionContextIndex(); 5948 5949 const TargetRegisterClass *TRC = Subtarget->isThumb() ? 5950 (const TargetRegisterClass*)&ARM::tGPRRegClass : 5951 (const TargetRegisterClass*)&ARM::GPRnopcRegClass; 5952 5953 // Get a mapping of the call site numbers to all of the landing pads they're 5954 // associated with. 5955 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 5956 unsigned MaxCSNum = 0; 5957 MachineModuleInfo &MMI = MF->getMMI(); 5958 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 5959 ++BB) { 5960 if (!BB->isLandingPad()) continue; 5961 5962 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 5963 // pad. 5964 for (MachineBasicBlock::iterator 5965 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 5966 if (!II->isEHLabel()) continue; 5967 5968 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 5969 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 5970 5971 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 5972 for (SmallVectorImpl<unsigned>::iterator 5973 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 5974 CSI != CSE; ++CSI) { 5975 CallSiteNumToLPad[*CSI].push_back(BB); 5976 MaxCSNum = std::max(MaxCSNum, *CSI); 5977 } 5978 break; 5979 } 5980 } 5981 5982 // Get an ordered list of the machine basic blocks for the jump table. 5983 std::vector<MachineBasicBlock*> LPadList; 5984 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 5985 LPadList.reserve(CallSiteNumToLPad.size()); 5986 for (unsigned I = 1; I <= MaxCSNum; ++I) { 5987 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 5988 for (SmallVectorImpl<MachineBasicBlock*>::iterator 5989 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 5990 LPadList.push_back(*II); 5991 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 5992 } 5993 } 5994 5995 assert(!LPadList.empty() && 5996 "No landing pad destinations for the dispatch jump table!"); 5997 5998 // Create the jump table and associated information. 5999 MachineJumpTableInfo *JTI = 6000 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 6001 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 6002 unsigned UId = AFI->createJumpTableUId(); 6003 6004 // Create the MBBs for the dispatch code. 6005 6006 // Shove the dispatch's address into the return slot in the function context. 6007 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 6008 DispatchBB->setIsLandingPad(); 6009 6010 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 6011 BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); 6012 DispatchBB->addSuccessor(TrapBB); 6013 6014 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 6015 DispatchBB->addSuccessor(DispContBB); 6016 6017 // Insert and MBBs. 6018 MF->insert(MF->end(), DispatchBB); 6019 MF->insert(MF->end(), DispContBB); 6020 MF->insert(MF->end(), TrapBB); 6021 6022 // Insert code into the entry block that creates and registers the function 6023 // context. 6024 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 6025 6026 MachineMemOperand *FIMMOLd = 6027 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6028 MachineMemOperand::MOLoad | 6029 MachineMemOperand::MOVolatile, 4, 4); 6030 6031 if (AFI->isThumb1OnlyFunction()) 6032 BuildMI(DispatchBB, dl, TII->get(ARM::tInt_eh_sjlj_dispatchsetup)); 6033 else if (!Subtarget->hasVFP2()) 6034 BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup_nofp)); 6035 else 6036 BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 6037 6038 unsigned NumLPads = LPadList.size(); 6039 if (Subtarget->isThumb2()) { 6040 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6041 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 6042 .addFrameIndex(FI) 6043 .addImm(4) 6044 .addMemOperand(FIMMOLd)); 6045 6046 if (NumLPads < 256) { 6047 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 6048 .addReg(NewVReg1) 6049 .addImm(LPadList.size())); 6050 } else { 6051 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6052 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 6053 .addImm(NumLPads & 0xFFFF)); 6054 6055 unsigned VReg2 = VReg1; 6056 if ((NumLPads & 0xFFFF0000) != 0) { 6057 VReg2 = MRI->createVirtualRegister(TRC); 6058 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 6059 .addReg(VReg1) 6060 .addImm(NumLPads >> 16)); 6061 } 6062 6063 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 6064 .addReg(NewVReg1) 6065 .addReg(VReg2)); 6066 } 6067 6068 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 6069 .addMBB(TrapBB) 6070 .addImm(ARMCC::HI) 6071 .addReg(ARM::CPSR); 6072 6073 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6074 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 6075 .addJumpTableIndex(MJTI) 6076 .addImm(UId)); 6077 6078 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6079 AddDefaultCC( 6080 AddDefaultPred( 6081 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 6082 .addReg(NewVReg3, RegState::Kill) 6083 .addReg(NewVReg1) 6084 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6085 6086 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 6087 .addReg(NewVReg4, RegState::Kill) 6088 .addReg(NewVReg1) 6089 .addJumpTableIndex(MJTI) 6090 .addImm(UId); 6091 } else if (Subtarget->isThumb()) { 6092 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6093 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 6094 .addFrameIndex(FI) 6095 .addImm(1) 6096 .addMemOperand(FIMMOLd)); 6097 6098 if (NumLPads < 256) { 6099 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 6100 .addReg(NewVReg1) 6101 .addImm(NumLPads)); 6102 } else { 6103 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6104 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6105 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6106 6107 // MachineConstantPool wants an explicit alignment. 6108 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6109 if (Align == 0) 6110 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6111 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6112 6113 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6114 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 6115 .addReg(VReg1, RegState::Define) 6116 .addConstantPoolIndex(Idx)); 6117 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 6118 .addReg(NewVReg1) 6119 .addReg(VReg1)); 6120 } 6121 6122 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 6123 .addMBB(TrapBB) 6124 .addImm(ARMCC::HI) 6125 .addReg(ARM::CPSR); 6126 6127 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6128 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 6129 .addReg(ARM::CPSR, RegState::Define) 6130 .addReg(NewVReg1) 6131 .addImm(2)); 6132 6133 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6134 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 6135 .addJumpTableIndex(MJTI) 6136 .addImm(UId)); 6137 6138 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6139 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 6140 .addReg(ARM::CPSR, RegState::Define) 6141 .addReg(NewVReg2, RegState::Kill) 6142 .addReg(NewVReg3)); 6143 6144 MachineMemOperand *JTMMOLd = 6145 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6146 MachineMemOperand::MOLoad, 4, 4); 6147 6148 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6149 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 6150 .addReg(NewVReg4, RegState::Kill) 6151 .addImm(0) 6152 .addMemOperand(JTMMOLd)); 6153 6154 unsigned NewVReg6 = MRI->createVirtualRegister(TRC); 6155 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 6156 .addReg(ARM::CPSR, RegState::Define) 6157 .addReg(NewVReg5, RegState::Kill) 6158 .addReg(NewVReg3)); 6159 6160 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 6161 .addReg(NewVReg6, RegState::Kill) 6162 .addJumpTableIndex(MJTI) 6163 .addImm(UId); 6164 } else { 6165 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6166 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 6167 .addFrameIndex(FI) 6168 .addImm(4) 6169 .addMemOperand(FIMMOLd)); 6170 6171 if (NumLPads < 256) { 6172 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 6173 .addReg(NewVReg1) 6174 .addImm(NumLPads)); 6175 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 6176 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6177 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 6178 .addImm(NumLPads & 0xFFFF)); 6179 6180 unsigned VReg2 = VReg1; 6181 if ((NumLPads & 0xFFFF0000) != 0) { 6182 VReg2 = MRI->createVirtualRegister(TRC); 6183 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 6184 .addReg(VReg1) 6185 .addImm(NumLPads >> 16)); 6186 } 6187 6188 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6189 .addReg(NewVReg1) 6190 .addReg(VReg2)); 6191 } else { 6192 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6193 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6194 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6195 6196 // MachineConstantPool wants an explicit alignment. 6197 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6198 if (Align == 0) 6199 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6200 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6201 6202 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6203 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 6204 .addReg(VReg1, RegState::Define) 6205 .addConstantPoolIndex(Idx) 6206 .addImm(0)); 6207 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6208 .addReg(NewVReg1) 6209 .addReg(VReg1, RegState::Kill)); 6210 } 6211 6212 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 6213 .addMBB(TrapBB) 6214 .addImm(ARMCC::HI) 6215 .addReg(ARM::CPSR); 6216 6217 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6218 AddDefaultCC( 6219 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 6220 .addReg(NewVReg1) 6221 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6222 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6223 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 6224 .addJumpTableIndex(MJTI) 6225 .addImm(UId)); 6226 6227 MachineMemOperand *JTMMOLd = 6228 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6229 MachineMemOperand::MOLoad, 4, 4); 6230 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6231 AddDefaultPred( 6232 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 6233 .addReg(NewVReg3, RegState::Kill) 6234 .addReg(NewVReg4) 6235 .addImm(0) 6236 .addMemOperand(JTMMOLd)); 6237 6238 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 6239 .addReg(NewVReg5, RegState::Kill) 6240 .addReg(NewVReg4) 6241 .addJumpTableIndex(MJTI) 6242 .addImm(UId); 6243 } 6244 6245 // Add the jump table entries as successors to the MBB. 6246 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 6247 for (std::vector<MachineBasicBlock*>::iterator 6248 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 6249 MachineBasicBlock *CurMBB = *I; 6250 if (SeenMBBs.insert(CurMBB)) 6251 DispContBB->addSuccessor(CurMBB); 6252 } 6253 6254 // N.B. the order the invoke BBs are processed in doesn't matter here. 6255 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 6256 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 6257 const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF); 6258 SmallVector<MachineBasicBlock*, 64> MBBLPads; 6259 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 6260 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 6261 MachineBasicBlock *BB = *I; 6262 6263 // Remove the landing pad successor from the invoke block and replace it 6264 // with the new dispatch block. 6265 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 6266 BB->succ_end()); 6267 while (!Successors.empty()) { 6268 MachineBasicBlock *SMBB = Successors.pop_back_val(); 6269 if (SMBB->isLandingPad()) { 6270 BB->removeSuccessor(SMBB); 6271 MBBLPads.push_back(SMBB); 6272 } 6273 } 6274 6275 BB->addSuccessor(DispatchBB); 6276 6277 // Find the invoke call and mark all of the callee-saved registers as 6278 // 'implicit defined' so that they're spilled. This prevents code from 6279 // moving instructions to before the EH block, where they will never be 6280 // executed. 6281 for (MachineBasicBlock::reverse_iterator 6282 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 6283 if (!II->isCall()) continue; 6284 6285 DenseMap<unsigned, bool> DefRegs; 6286 for (MachineInstr::mop_iterator 6287 OI = II->operands_begin(), OE = II->operands_end(); 6288 OI != OE; ++OI) { 6289 if (!OI->isReg()) continue; 6290 DefRegs[OI->getReg()] = true; 6291 } 6292 6293 MachineInstrBuilder MIB(&*II); 6294 6295 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 6296 unsigned Reg = SavedRegs[i]; 6297 if (Subtarget->isThumb2() && 6298 !ARM::tGPRRegClass.contains(Reg) && 6299 !ARM::hGPRRegClass.contains(Reg)) 6300 continue; 6301 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 6302 continue; 6303 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 6304 continue; 6305 if (!DefRegs[Reg]) 6306 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 6307 } 6308 6309 break; 6310 } 6311 } 6312 6313 // Mark all former landing pads as non-landing pads. The dispatch is the only 6314 // landing pad now. 6315 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6316 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 6317 (*I)->setIsLandingPad(false); 6318 6319 // The instruction is gone now. 6320 MI->eraseFromParent(); 6321 6322 return MBB; 6323} 6324 6325static 6326MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 6327 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 6328 E = MBB->succ_end(); I != E; ++I) 6329 if (*I != Succ) 6330 return *I; 6331 llvm_unreachable("Expecting a BB with two successors!"); 6332} 6333 6334MachineBasicBlock *ARMTargetLowering:: 6335EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { 6336 // This pseudo instruction has 3 operands: dst, src, size 6337 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 6338 // Otherwise, we will generate unrolled scalar copies. 6339 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6340 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6341 MachineFunction::iterator It = BB; 6342 ++It; 6343 6344 unsigned dest = MI->getOperand(0).getReg(); 6345 unsigned src = MI->getOperand(1).getReg(); 6346 unsigned SizeVal = MI->getOperand(2).getImm(); 6347 unsigned Align = MI->getOperand(3).getImm(); 6348 DebugLoc dl = MI->getDebugLoc(); 6349 6350 bool isThumb2 = Subtarget->isThumb2(); 6351 MachineFunction *MF = BB->getParent(); 6352 MachineRegisterInfo &MRI = MF->getRegInfo(); 6353 unsigned ldrOpc, strOpc, UnitSize = 0; 6354 6355 const TargetRegisterClass *TRC = isThumb2 ? 6356 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6357 (const TargetRegisterClass*)&ARM::GPRRegClass; 6358 const TargetRegisterClass *TRC_Vec = 0; 6359 6360 if (Align & 1) { 6361 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6362 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6363 UnitSize = 1; 6364 } else if (Align & 2) { 6365 ldrOpc = isThumb2 ? ARM::t2LDRH_POST : ARM::LDRH_POST; 6366 strOpc = isThumb2 ? ARM::t2STRH_POST : ARM::STRH_POST; 6367 UnitSize = 2; 6368 } else { 6369 // Check whether we can use NEON instructions. 6370 if (!MF->getFunction()->getFnAttributes(). 6371 hasAttribute(Attributes::NoImplicitFloat) && 6372 Subtarget->hasNEON()) { 6373 if ((Align % 16 == 0) && SizeVal >= 16) { 6374 ldrOpc = ARM::VLD1q32wb_fixed; 6375 strOpc = ARM::VST1q32wb_fixed; 6376 UnitSize = 16; 6377 TRC_Vec = (const TargetRegisterClass*)&ARM::DPairRegClass; 6378 } 6379 else if ((Align % 8 == 0) && SizeVal >= 8) { 6380 ldrOpc = ARM::VLD1d32wb_fixed; 6381 strOpc = ARM::VST1d32wb_fixed; 6382 UnitSize = 8; 6383 TRC_Vec = (const TargetRegisterClass*)&ARM::DPRRegClass; 6384 } 6385 } 6386 // Can't use NEON instructions. 6387 if (UnitSize == 0) { 6388 ldrOpc = isThumb2 ? ARM::t2LDR_POST : ARM::LDR_POST_IMM; 6389 strOpc = isThumb2 ? ARM::t2STR_POST : ARM::STR_POST_IMM; 6390 UnitSize = 4; 6391 } 6392 } 6393 6394 unsigned BytesLeft = SizeVal % UnitSize; 6395 unsigned LoopSize = SizeVal - BytesLeft; 6396 6397 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 6398 // Use LDR and STR to copy. 6399 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 6400 // [destOut] = STR_POST(scratch, destIn, UnitSize) 6401 unsigned srcIn = src; 6402 unsigned destIn = dest; 6403 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 6404 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 6405 unsigned srcOut = MRI.createVirtualRegister(TRC); 6406 unsigned destOut = MRI.createVirtualRegister(TRC); 6407 if (UnitSize >= 8) { 6408 AddDefaultPred(BuildMI(*BB, MI, dl, 6409 TII->get(ldrOpc), scratch) 6410 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(0)); 6411 6412 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6413 .addReg(destIn).addImm(0).addReg(scratch)); 6414 } else if (isThumb2) { 6415 AddDefaultPred(BuildMI(*BB, MI, dl, 6416 TII->get(ldrOpc), scratch) 6417 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(UnitSize)); 6418 6419 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6420 .addReg(scratch).addReg(destIn) 6421 .addImm(UnitSize)); 6422 } else { 6423 AddDefaultPred(BuildMI(*BB, MI, dl, 6424 TII->get(ldrOpc), scratch) 6425 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0) 6426 .addImm(UnitSize)); 6427 6428 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6429 .addReg(scratch).addReg(destIn) 6430 .addReg(0).addImm(UnitSize)); 6431 } 6432 srcIn = srcOut; 6433 destIn = destOut; 6434 } 6435 6436 // Handle the leftover bytes with LDRB and STRB. 6437 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 6438 // [destOut] = STRB_POST(scratch, destIn, 1) 6439 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6440 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6441 for (unsigned i = 0; i < BytesLeft; i++) { 6442 unsigned scratch = MRI.createVirtualRegister(TRC); 6443 unsigned srcOut = MRI.createVirtualRegister(TRC); 6444 unsigned destOut = MRI.createVirtualRegister(TRC); 6445 if (isThumb2) { 6446 AddDefaultPred(BuildMI(*BB, MI, dl, 6447 TII->get(ldrOpc),scratch) 6448 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 6449 6450 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6451 .addReg(scratch).addReg(destIn) 6452 .addReg(0).addImm(1)); 6453 } else { 6454 AddDefaultPred(BuildMI(*BB, MI, dl, 6455 TII->get(ldrOpc),scratch) 6456 .addReg(srcOut, RegState::Define).addReg(srcIn) 6457 .addReg(0).addImm(1)); 6458 6459 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6460 .addReg(scratch).addReg(destIn) 6461 .addReg(0).addImm(1)); 6462 } 6463 srcIn = srcOut; 6464 destIn = destOut; 6465 } 6466 MI->eraseFromParent(); // The instruction is gone now. 6467 return BB; 6468 } 6469 6470 // Expand the pseudo op to a loop. 6471 // thisMBB: 6472 // ... 6473 // movw varEnd, # --> with thumb2 6474 // movt varEnd, # 6475 // ldrcp varEnd, idx --> without thumb2 6476 // fallthrough --> loopMBB 6477 // loopMBB: 6478 // PHI varPhi, varEnd, varLoop 6479 // PHI srcPhi, src, srcLoop 6480 // PHI destPhi, dst, destLoop 6481 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 6482 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 6483 // subs varLoop, varPhi, #UnitSize 6484 // bne loopMBB 6485 // fallthrough --> exitMBB 6486 // exitMBB: 6487 // epilogue to handle left-over bytes 6488 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 6489 // [destOut] = STRB_POST(scratch, destLoop, 1) 6490 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6491 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6492 MF->insert(It, loopMBB); 6493 MF->insert(It, exitMBB); 6494 6495 // Transfer the remainder of BB and its successor edges to exitMBB. 6496 exitMBB->splice(exitMBB->begin(), BB, 6497 llvm::next(MachineBasicBlock::iterator(MI)), 6498 BB->end()); 6499 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6500 6501 // Load an immediate to varEnd. 6502 unsigned varEnd = MRI.createVirtualRegister(TRC); 6503 if (isThumb2) { 6504 unsigned VReg1 = varEnd; 6505 if ((LoopSize & 0xFFFF0000) != 0) 6506 VReg1 = MRI.createVirtualRegister(TRC); 6507 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), VReg1) 6508 .addImm(LoopSize & 0xFFFF)); 6509 6510 if ((LoopSize & 0xFFFF0000) != 0) 6511 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVTi16), varEnd) 6512 .addReg(VReg1) 6513 .addImm(LoopSize >> 16)); 6514 } else { 6515 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6516 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6517 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 6518 6519 // MachineConstantPool wants an explicit alignment. 6520 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6521 if (Align == 0) 6522 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6523 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6524 6525 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp)) 6526 .addReg(varEnd, RegState::Define) 6527 .addConstantPoolIndex(Idx) 6528 .addImm(0)); 6529 } 6530 BB->addSuccessor(loopMBB); 6531 6532 // Generate the loop body: 6533 // varPhi = PHI(varLoop, varEnd) 6534 // srcPhi = PHI(srcLoop, src) 6535 // destPhi = PHI(destLoop, dst) 6536 MachineBasicBlock *entryBB = BB; 6537 BB = loopMBB; 6538 unsigned varLoop = MRI.createVirtualRegister(TRC); 6539 unsigned varPhi = MRI.createVirtualRegister(TRC); 6540 unsigned srcLoop = MRI.createVirtualRegister(TRC); 6541 unsigned srcPhi = MRI.createVirtualRegister(TRC); 6542 unsigned destLoop = MRI.createVirtualRegister(TRC); 6543 unsigned destPhi = MRI.createVirtualRegister(TRC); 6544 6545 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 6546 .addReg(varLoop).addMBB(loopMBB) 6547 .addReg(varEnd).addMBB(entryBB); 6548 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 6549 .addReg(srcLoop).addMBB(loopMBB) 6550 .addReg(src).addMBB(entryBB); 6551 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 6552 .addReg(destLoop).addMBB(loopMBB) 6553 .addReg(dest).addMBB(entryBB); 6554 6555 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 6556 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 6557 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 6558 if (UnitSize >= 8) { 6559 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 6560 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(0)); 6561 6562 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 6563 .addReg(destPhi).addImm(0).addReg(scratch)); 6564 } else if (isThumb2) { 6565 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 6566 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(UnitSize)); 6567 6568 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 6569 .addReg(scratch).addReg(destPhi) 6570 .addImm(UnitSize)); 6571 } else { 6572 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 6573 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addReg(0) 6574 .addImm(UnitSize)); 6575 6576 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 6577 .addReg(scratch).addReg(destPhi) 6578 .addReg(0).addImm(UnitSize)); 6579 } 6580 6581 // Decrement loop variable by UnitSize. 6582 MachineInstrBuilder MIB = BuildMI(BB, dl, 6583 TII->get(isThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 6584 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 6585 MIB->getOperand(5).setReg(ARM::CPSR); 6586 MIB->getOperand(5).setIsDef(true); 6587 6588 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6589 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6590 6591 // loopMBB can loop back to loopMBB or fall through to exitMBB. 6592 BB->addSuccessor(loopMBB); 6593 BB->addSuccessor(exitMBB); 6594 6595 // Add epilogue to handle BytesLeft. 6596 BB = exitMBB; 6597 MachineInstr *StartOfExit = exitMBB->begin(); 6598 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6599 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6600 6601 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 6602 // [destOut] = STRB_POST(scratch, destLoop, 1) 6603 unsigned srcIn = srcLoop; 6604 unsigned destIn = destLoop; 6605 for (unsigned i = 0; i < BytesLeft; i++) { 6606 unsigned scratch = MRI.createVirtualRegister(TRC); 6607 unsigned srcOut = MRI.createVirtualRegister(TRC); 6608 unsigned destOut = MRI.createVirtualRegister(TRC); 6609 if (isThumb2) { 6610 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 6611 TII->get(ldrOpc),scratch) 6612 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 6613 6614 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 6615 .addReg(scratch).addReg(destIn) 6616 .addImm(1)); 6617 } else { 6618 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 6619 TII->get(ldrOpc),scratch) 6620 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0).addImm(1)); 6621 6622 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 6623 .addReg(scratch).addReg(destIn) 6624 .addReg(0).addImm(1)); 6625 } 6626 srcIn = srcOut; 6627 destIn = destOut; 6628 } 6629 6630 MI->eraseFromParent(); // The instruction is gone now. 6631 return BB; 6632} 6633 6634MachineBasicBlock * 6635ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6636 MachineBasicBlock *BB) const { 6637 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6638 DebugLoc dl = MI->getDebugLoc(); 6639 bool isThumb2 = Subtarget->isThumb2(); 6640 switch (MI->getOpcode()) { 6641 default: { 6642 MI->dump(); 6643 llvm_unreachable("Unexpected instr type to insert"); 6644 } 6645 // The Thumb2 pre-indexed stores have the same MI operands, they just 6646 // define them differently in the .td files from the isel patterns, so 6647 // they need pseudos. 6648 case ARM::t2STR_preidx: 6649 MI->setDesc(TII->get(ARM::t2STR_PRE)); 6650 return BB; 6651 case ARM::t2STRB_preidx: 6652 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 6653 return BB; 6654 case ARM::t2STRH_preidx: 6655 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 6656 return BB; 6657 6658 case ARM::STRi_preidx: 6659 case ARM::STRBi_preidx: { 6660 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 6661 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 6662 // Decode the offset. 6663 unsigned Offset = MI->getOperand(4).getImm(); 6664 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 6665 Offset = ARM_AM::getAM2Offset(Offset); 6666 if (isSub) 6667 Offset = -Offset; 6668 6669 MachineMemOperand *MMO = *MI->memoperands_begin(); 6670 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 6671 .addOperand(MI->getOperand(0)) // Rn_wb 6672 .addOperand(MI->getOperand(1)) // Rt 6673 .addOperand(MI->getOperand(2)) // Rn 6674 .addImm(Offset) // offset (skip GPR==zero_reg) 6675 .addOperand(MI->getOperand(5)) // pred 6676 .addOperand(MI->getOperand(6)) 6677 .addMemOperand(MMO); 6678 MI->eraseFromParent(); 6679 return BB; 6680 } 6681 case ARM::STRr_preidx: 6682 case ARM::STRBr_preidx: 6683 case ARM::STRH_preidx: { 6684 unsigned NewOpc; 6685 switch (MI->getOpcode()) { 6686 default: llvm_unreachable("unexpected opcode!"); 6687 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 6688 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 6689 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 6690 } 6691 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 6692 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 6693 MIB.addOperand(MI->getOperand(i)); 6694 MI->eraseFromParent(); 6695 return BB; 6696 } 6697 case ARM::ATOMIC_LOAD_ADD_I8: 6698 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6699 case ARM::ATOMIC_LOAD_ADD_I16: 6700 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6701 case ARM::ATOMIC_LOAD_ADD_I32: 6702 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6703 6704 case ARM::ATOMIC_LOAD_AND_I8: 6705 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6706 case ARM::ATOMIC_LOAD_AND_I16: 6707 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6708 case ARM::ATOMIC_LOAD_AND_I32: 6709 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6710 6711 case ARM::ATOMIC_LOAD_OR_I8: 6712 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6713 case ARM::ATOMIC_LOAD_OR_I16: 6714 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6715 case ARM::ATOMIC_LOAD_OR_I32: 6716 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6717 6718 case ARM::ATOMIC_LOAD_XOR_I8: 6719 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6720 case ARM::ATOMIC_LOAD_XOR_I16: 6721 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6722 case ARM::ATOMIC_LOAD_XOR_I32: 6723 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6724 6725 case ARM::ATOMIC_LOAD_NAND_I8: 6726 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6727 case ARM::ATOMIC_LOAD_NAND_I16: 6728 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6729 case ARM::ATOMIC_LOAD_NAND_I32: 6730 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6731 6732 case ARM::ATOMIC_LOAD_SUB_I8: 6733 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6734 case ARM::ATOMIC_LOAD_SUB_I16: 6735 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6736 case ARM::ATOMIC_LOAD_SUB_I32: 6737 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6738 6739 case ARM::ATOMIC_LOAD_MIN_I8: 6740 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 6741 case ARM::ATOMIC_LOAD_MIN_I16: 6742 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 6743 case ARM::ATOMIC_LOAD_MIN_I32: 6744 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 6745 6746 case ARM::ATOMIC_LOAD_MAX_I8: 6747 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 6748 case ARM::ATOMIC_LOAD_MAX_I16: 6749 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 6750 case ARM::ATOMIC_LOAD_MAX_I32: 6751 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 6752 6753 case ARM::ATOMIC_LOAD_UMIN_I8: 6754 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 6755 case ARM::ATOMIC_LOAD_UMIN_I16: 6756 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 6757 case ARM::ATOMIC_LOAD_UMIN_I32: 6758 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 6759 6760 case ARM::ATOMIC_LOAD_UMAX_I8: 6761 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 6762 case ARM::ATOMIC_LOAD_UMAX_I16: 6763 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 6764 case ARM::ATOMIC_LOAD_UMAX_I32: 6765 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 6766 6767 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 6768 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 6769 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 6770 6771 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 6772 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 6773 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 6774 6775 6776 case ARM::ATOMADD6432: 6777 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 6778 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 6779 /*NeedsCarry*/ true); 6780 case ARM::ATOMSUB6432: 6781 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6782 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6783 /*NeedsCarry*/ true); 6784 case ARM::ATOMOR6432: 6785 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 6786 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6787 case ARM::ATOMXOR6432: 6788 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 6789 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6790 case ARM::ATOMAND6432: 6791 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 6792 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6793 case ARM::ATOMSWAP6432: 6794 return EmitAtomicBinary64(MI, BB, 0, 0, false); 6795 case ARM::ATOMCMPXCHG6432: 6796 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6797 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6798 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 6799 6800 case ARM::tMOVCCr_pseudo: { 6801 // To "insert" a SELECT_CC instruction, we actually have to insert the 6802 // diamond control-flow pattern. The incoming instruction knows the 6803 // destination vreg to set, the condition code register to branch on, the 6804 // true/false values to select between, and a branch opcode to use. 6805 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6806 MachineFunction::iterator It = BB; 6807 ++It; 6808 6809 // thisMBB: 6810 // ... 6811 // TrueVal = ... 6812 // cmpTY ccX, r1, r2 6813 // bCC copy1MBB 6814 // fallthrough --> copy0MBB 6815 MachineBasicBlock *thisMBB = BB; 6816 MachineFunction *F = BB->getParent(); 6817 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6818 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6819 F->insert(It, copy0MBB); 6820 F->insert(It, sinkMBB); 6821 6822 // Transfer the remainder of BB and its successor edges to sinkMBB. 6823 sinkMBB->splice(sinkMBB->begin(), BB, 6824 llvm::next(MachineBasicBlock::iterator(MI)), 6825 BB->end()); 6826 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6827 6828 BB->addSuccessor(copy0MBB); 6829 BB->addSuccessor(sinkMBB); 6830 6831 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 6832 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 6833 6834 // copy0MBB: 6835 // %FalseValue = ... 6836 // # fallthrough to sinkMBB 6837 BB = copy0MBB; 6838 6839 // Update machine-CFG edges 6840 BB->addSuccessor(sinkMBB); 6841 6842 // sinkMBB: 6843 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6844 // ... 6845 BB = sinkMBB; 6846 BuildMI(*BB, BB->begin(), dl, 6847 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 6848 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 6849 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6850 6851 MI->eraseFromParent(); // The pseudo instruction is gone now. 6852 return BB; 6853 } 6854 6855 case ARM::BCCi64: 6856 case ARM::BCCZi64: { 6857 // If there is an unconditional branch to the other successor, remove it. 6858 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 6859 6860 // Compare both parts that make up the double comparison separately for 6861 // equality. 6862 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 6863 6864 unsigned LHS1 = MI->getOperand(1).getReg(); 6865 unsigned LHS2 = MI->getOperand(2).getReg(); 6866 if (RHSisZero) { 6867 AddDefaultPred(BuildMI(BB, dl, 6868 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6869 .addReg(LHS1).addImm(0)); 6870 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6871 .addReg(LHS2).addImm(0) 6872 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6873 } else { 6874 unsigned RHS1 = MI->getOperand(3).getReg(); 6875 unsigned RHS2 = MI->getOperand(4).getReg(); 6876 AddDefaultPred(BuildMI(BB, dl, 6877 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6878 .addReg(LHS1).addReg(RHS1)); 6879 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6880 .addReg(LHS2).addReg(RHS2) 6881 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6882 } 6883 6884 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 6885 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 6886 if (MI->getOperand(0).getImm() == ARMCC::NE) 6887 std::swap(destMBB, exitMBB); 6888 6889 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6890 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 6891 if (isThumb2) 6892 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 6893 else 6894 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 6895 6896 MI->eraseFromParent(); // The pseudo instruction is gone now. 6897 return BB; 6898 } 6899 6900 case ARM::Int_eh_sjlj_setjmp: 6901 case ARM::Int_eh_sjlj_setjmp_nofp: 6902 case ARM::tInt_eh_sjlj_setjmp: 6903 case ARM::t2Int_eh_sjlj_setjmp: 6904 case ARM::t2Int_eh_sjlj_setjmp_nofp: 6905 EmitSjLjDispatchBlock(MI, BB); 6906 return BB; 6907 6908 case ARM::ABS: 6909 case ARM::t2ABS: { 6910 // To insert an ABS instruction, we have to insert the 6911 // diamond control-flow pattern. The incoming instruction knows the 6912 // source vreg to test against 0, the destination vreg to set, 6913 // the condition code register to branch on, the 6914 // true/false values to select between, and a branch opcode to use. 6915 // It transforms 6916 // V1 = ABS V0 6917 // into 6918 // V2 = MOVS V0 6919 // BCC (branch to SinkBB if V0 >= 0) 6920 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 6921 // SinkBB: V1 = PHI(V2, V3) 6922 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6923 MachineFunction::iterator BBI = BB; 6924 ++BBI; 6925 MachineFunction *Fn = BB->getParent(); 6926 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6927 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6928 Fn->insert(BBI, RSBBB); 6929 Fn->insert(BBI, SinkBB); 6930 6931 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 6932 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 6933 bool isThumb2 = Subtarget->isThumb2(); 6934 MachineRegisterInfo &MRI = Fn->getRegInfo(); 6935 // In Thumb mode S must not be specified if source register is the SP or 6936 // PC and if destination register is the SP, so restrict register class 6937 unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ? 6938 (const TargetRegisterClass*)&ARM::rGPRRegClass : 6939 (const TargetRegisterClass*)&ARM::GPRRegClass); 6940 6941 // Transfer the remainder of BB and its successor edges to sinkMBB. 6942 SinkBB->splice(SinkBB->begin(), BB, 6943 llvm::next(MachineBasicBlock::iterator(MI)), 6944 BB->end()); 6945 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 6946 6947 BB->addSuccessor(RSBBB); 6948 BB->addSuccessor(SinkBB); 6949 6950 // fall through to SinkMBB 6951 RSBBB->addSuccessor(SinkBB); 6952 6953 // insert a cmp at the end of BB 6954 AddDefaultPred(BuildMI(BB, dl, 6955 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6956 .addReg(ABSSrcReg).addImm(0)); 6957 6958 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 6959 BuildMI(BB, dl, 6960 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 6961 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 6962 6963 // insert rsbri in RSBBB 6964 // Note: BCC and rsbri will be converted into predicated rsbmi 6965 // by if-conversion pass 6966 BuildMI(*RSBBB, RSBBB->begin(), dl, 6967 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 6968 .addReg(ABSSrcReg, RegState::Kill) 6969 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 6970 6971 // insert PHI in SinkBB, 6972 // reuse ABSDstReg to not change uses of ABS instruction 6973 BuildMI(*SinkBB, SinkBB->begin(), dl, 6974 TII->get(ARM::PHI), ABSDstReg) 6975 .addReg(NewRsbDstReg).addMBB(RSBBB) 6976 .addReg(ABSSrcReg).addMBB(BB); 6977 6978 // remove ABS instruction 6979 MI->eraseFromParent(); 6980 6981 // return last added BB 6982 return SinkBB; 6983 } 6984 case ARM::COPY_STRUCT_BYVAL_I32: 6985 ++NumLoopByVals; 6986 return EmitStructByval(MI, BB); 6987 } 6988} 6989 6990void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 6991 SDNode *Node) const { 6992 if (!MI->hasPostISelHook()) { 6993 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 6994 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 6995 return; 6996 } 6997 6998 const MCInstrDesc *MCID = &MI->getDesc(); 6999 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 7000 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 7001 // operand is still set to noreg. If needed, set the optional operand's 7002 // register to CPSR, and remove the redundant implicit def. 7003 // 7004 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 7005 7006 // Rename pseudo opcodes. 7007 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 7008 if (NewOpc) { 7009 const ARMBaseInstrInfo *TII = 7010 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 7011 MCID = &TII->get(NewOpc); 7012 7013 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 7014 "converted opcode should be the same except for cc_out"); 7015 7016 MI->setDesc(*MCID); 7017 7018 // Add the optional cc_out operand 7019 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 7020 } 7021 unsigned ccOutIdx = MCID->getNumOperands() - 1; 7022 7023 // Any ARM instruction that sets the 's' bit should specify an optional 7024 // "cc_out" operand in the last operand position. 7025 if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 7026 assert(!NewOpc && "Optional cc_out operand required"); 7027 return; 7028 } 7029 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 7030 // since we already have an optional CPSR def. 7031 bool definesCPSR = false; 7032 bool deadCPSR = false; 7033 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 7034 i != e; ++i) { 7035 const MachineOperand &MO = MI->getOperand(i); 7036 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 7037 definesCPSR = true; 7038 if (MO.isDead()) 7039 deadCPSR = true; 7040 MI->RemoveOperand(i); 7041 break; 7042 } 7043 } 7044 if (!definesCPSR) { 7045 assert(!NewOpc && "Optional cc_out operand required"); 7046 return; 7047 } 7048 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 7049 if (deadCPSR) { 7050 assert(!MI->getOperand(ccOutIdx).getReg() && 7051 "expect uninitialized optional cc_out operand"); 7052 return; 7053 } 7054 7055 // If this instruction was defined with an optional CPSR def and its dag node 7056 // had a live implicit CPSR def, then activate the optional CPSR def. 7057 MachineOperand &MO = MI->getOperand(ccOutIdx); 7058 MO.setReg(ARM::CPSR); 7059 MO.setIsDef(true); 7060} 7061 7062//===----------------------------------------------------------------------===// 7063// ARM Optimization Hooks 7064//===----------------------------------------------------------------------===// 7065 7066// Helper function that checks if N is a null or all ones constant. 7067static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 7068 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N); 7069 if (!C) 7070 return false; 7071 return AllOnes ? C->isAllOnesValue() : C->isNullValue(); 7072} 7073 7074// Return true if N is conditionally 0 or all ones. 7075// Detects these expressions where cc is an i1 value: 7076// 7077// (select cc 0, y) [AllOnes=0] 7078// (select cc y, 0) [AllOnes=0] 7079// (zext cc) [AllOnes=0] 7080// (sext cc) [AllOnes=0/1] 7081// (select cc -1, y) [AllOnes=1] 7082// (select cc y, -1) [AllOnes=1] 7083// 7084// Invert is set when N is the null/all ones constant when CC is false. 7085// OtherOp is set to the alternative value of N. 7086static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 7087 SDValue &CC, bool &Invert, 7088 SDValue &OtherOp, 7089 SelectionDAG &DAG) { 7090 switch (N->getOpcode()) { 7091 default: return false; 7092 case ISD::SELECT: { 7093 CC = N->getOperand(0); 7094 SDValue N1 = N->getOperand(1); 7095 SDValue N2 = N->getOperand(2); 7096 if (isZeroOrAllOnes(N1, AllOnes)) { 7097 Invert = false; 7098 OtherOp = N2; 7099 return true; 7100 } 7101 if (isZeroOrAllOnes(N2, AllOnes)) { 7102 Invert = true; 7103 OtherOp = N1; 7104 return true; 7105 } 7106 return false; 7107 } 7108 case ISD::ZERO_EXTEND: 7109 // (zext cc) can never be the all ones value. 7110 if (AllOnes) 7111 return false; 7112 // Fall through. 7113 case ISD::SIGN_EXTEND: { 7114 EVT VT = N->getValueType(0); 7115 CC = N->getOperand(0); 7116 if (CC.getValueType() != MVT::i1) 7117 return false; 7118 Invert = !AllOnes; 7119 if (AllOnes) 7120 // When looking for an AllOnes constant, N is an sext, and the 'other' 7121 // value is 0. 7122 OtherOp = DAG.getConstant(0, VT); 7123 else if (N->getOpcode() == ISD::ZERO_EXTEND) 7124 // When looking for a 0 constant, N can be zext or sext. 7125 OtherOp = DAG.getConstant(1, VT); 7126 else 7127 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 7128 return true; 7129 } 7130 } 7131} 7132 7133// Combine a constant select operand into its use: 7134// 7135// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7136// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 7137// (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 7138// (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 7139// (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 7140// 7141// The transform is rejected if the select doesn't have a constant operand that 7142// is null, or all ones when AllOnes is set. 7143// 7144// Also recognize sext/zext from i1: 7145// 7146// (add (zext cc), x) -> (select cc (add x, 1), x) 7147// (add (sext cc), x) -> (select cc (add x, -1), x) 7148// 7149// These transformations eventually create predicated instructions. 7150// 7151// @param N The node to transform. 7152// @param Slct The N operand that is a select. 7153// @param OtherOp The other N operand (x above). 7154// @param DCI Context. 7155// @param AllOnes Require the select constant to be all ones instead of null. 7156// @returns The new node, or SDValue() on failure. 7157static 7158SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 7159 TargetLowering::DAGCombinerInfo &DCI, 7160 bool AllOnes = false) { 7161 SelectionDAG &DAG = DCI.DAG; 7162 EVT VT = N->getValueType(0); 7163 SDValue NonConstantVal; 7164 SDValue CCOp; 7165 bool SwapSelectOps; 7166 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 7167 NonConstantVal, DAG)) 7168 return SDValue(); 7169 7170 // Slct is now know to be the desired identity constant when CC is true. 7171 SDValue TrueVal = OtherOp; 7172 SDValue FalseVal = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT, 7173 OtherOp, NonConstantVal); 7174 // Unless SwapSelectOps says CC should be false. 7175 if (SwapSelectOps) 7176 std::swap(TrueVal, FalseVal); 7177 7178 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 7179 CCOp, TrueVal, FalseVal); 7180} 7181 7182// Attempt combineSelectAndUse on each operand of a commutative operator N. 7183static 7184SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 7185 TargetLowering::DAGCombinerInfo &DCI) { 7186 SDValue N0 = N->getOperand(0); 7187 SDValue N1 = N->getOperand(1); 7188 if (N0.getNode()->hasOneUse()) { 7189 SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes); 7190 if (Result.getNode()) 7191 return Result; 7192 } 7193 if (N1.getNode()->hasOneUse()) { 7194 SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes); 7195 if (Result.getNode()) 7196 return Result; 7197 } 7198 return SDValue(); 7199} 7200 7201// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 7202// (only after legalization). 7203static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 7204 TargetLowering::DAGCombinerInfo &DCI, 7205 const ARMSubtarget *Subtarget) { 7206 7207 // Only perform optimization if after legalize, and if NEON is available. We 7208 // also expected both operands to be BUILD_VECTORs. 7209 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 7210 || N0.getOpcode() != ISD::BUILD_VECTOR 7211 || N1.getOpcode() != ISD::BUILD_VECTOR) 7212 return SDValue(); 7213 7214 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 7215 EVT VT = N->getValueType(0); 7216 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 7217 return SDValue(); 7218 7219 // Check that the vector operands are of the right form. 7220 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 7221 // operands, where N is the size of the formed vector. 7222 // Each EXTRACT_VECTOR should have the same input vector and odd or even 7223 // index such that we have a pair wise add pattern. 7224 7225 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 7226 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7227 return SDValue(); 7228 SDValue Vec = N0->getOperand(0)->getOperand(0); 7229 SDNode *V = Vec.getNode(); 7230 unsigned nextIndex = 0; 7231 7232 // For each operands to the ADD which are BUILD_VECTORs, 7233 // check to see if each of their operands are an EXTRACT_VECTOR with 7234 // the same vector and appropriate index. 7235 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 7236 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 7237 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7238 7239 SDValue ExtVec0 = N0->getOperand(i); 7240 SDValue ExtVec1 = N1->getOperand(i); 7241 7242 // First operand is the vector, verify its the same. 7243 if (V != ExtVec0->getOperand(0).getNode() || 7244 V != ExtVec1->getOperand(0).getNode()) 7245 return SDValue(); 7246 7247 // Second is the constant, verify its correct. 7248 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 7249 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 7250 7251 // For the constant, we want to see all the even or all the odd. 7252 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 7253 || C1->getZExtValue() != nextIndex+1) 7254 return SDValue(); 7255 7256 // Increment index. 7257 nextIndex+=2; 7258 } else 7259 return SDValue(); 7260 } 7261 7262 // Create VPADDL node. 7263 SelectionDAG &DAG = DCI.DAG; 7264 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7265 7266 // Build operand list. 7267 SmallVector<SDValue, 8> Ops; 7268 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 7269 TLI.getPointerTy())); 7270 7271 // Input is the vector. 7272 Ops.push_back(Vec); 7273 7274 // Get widened type and narrowed type. 7275 MVT widenType; 7276 unsigned numElem = VT.getVectorNumElements(); 7277 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 7278 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 7279 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 7280 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 7281 default: 7282 llvm_unreachable("Invalid vector element type for padd optimization."); 7283 } 7284 7285 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7286 widenType, &Ops[0], Ops.size()); 7287 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 7288} 7289 7290static SDValue findMUL_LOHI(SDValue V) { 7291 if (V->getOpcode() == ISD::UMUL_LOHI || 7292 V->getOpcode() == ISD::SMUL_LOHI) 7293 return V; 7294 return SDValue(); 7295} 7296 7297static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 7298 TargetLowering::DAGCombinerInfo &DCI, 7299 const ARMSubtarget *Subtarget) { 7300 7301 if (Subtarget->isThumb1Only()) return SDValue(); 7302 7303 // Only perform the checks after legalize when the pattern is available. 7304 if (DCI.isBeforeLegalize()) return SDValue(); 7305 7306 // Look for multiply add opportunities. 7307 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 7308 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 7309 // a glue link from the first add to the second add. 7310 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 7311 // a S/UMLAL instruction. 7312 // loAdd UMUL_LOHI 7313 // \ / :lo \ :hi 7314 // \ / \ [no multiline comment] 7315 // ADDC | hiAdd 7316 // \ :glue / / 7317 // \ / / 7318 // ADDE 7319 // 7320 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 7321 SDValue AddcOp0 = AddcNode->getOperand(0); 7322 SDValue AddcOp1 = AddcNode->getOperand(1); 7323 7324 // Check if the two operands are from the same mul_lohi node. 7325 if (AddcOp0.getNode() == AddcOp1.getNode()) 7326 return SDValue(); 7327 7328 assert(AddcNode->getNumValues() == 2 && 7329 AddcNode->getValueType(0) == MVT::i32 && 7330 AddcNode->getValueType(1) == MVT::Glue && 7331 "Expect ADDC with two result values: i32, glue"); 7332 7333 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 7334 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 7335 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 7336 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 7337 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 7338 return SDValue(); 7339 7340 // Look for the glued ADDE. 7341 SDNode* AddeNode = AddcNode->getGluedUser(); 7342 if (AddeNode == NULL) 7343 return SDValue(); 7344 7345 // Make sure it is really an ADDE. 7346 if (AddeNode->getOpcode() != ISD::ADDE) 7347 return SDValue(); 7348 7349 assert(AddeNode->getNumOperands() == 3 && 7350 AddeNode->getOperand(2).getValueType() == MVT::Glue && 7351 "ADDE node has the wrong inputs"); 7352 7353 // Check for the triangle shape. 7354 SDValue AddeOp0 = AddeNode->getOperand(0); 7355 SDValue AddeOp1 = AddeNode->getOperand(1); 7356 7357 // Make sure that the ADDE operands are not coming from the same node. 7358 if (AddeOp0.getNode() == AddeOp1.getNode()) 7359 return SDValue(); 7360 7361 // Find the MUL_LOHI node walking up ADDE's operands. 7362 bool IsLeftOperandMUL = false; 7363 SDValue MULOp = findMUL_LOHI(AddeOp0); 7364 if (MULOp == SDValue()) 7365 MULOp = findMUL_LOHI(AddeOp1); 7366 else 7367 IsLeftOperandMUL = true; 7368 if (MULOp == SDValue()) 7369 return SDValue(); 7370 7371 // Figure out the right opcode. 7372 unsigned Opc = MULOp->getOpcode(); 7373 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 7374 7375 // Figure out the high and low input values to the MLAL node. 7376 SDValue* HiMul = &MULOp; 7377 SDValue* HiAdd = NULL; 7378 SDValue* LoMul = NULL; 7379 SDValue* LowAdd = NULL; 7380 7381 if (IsLeftOperandMUL) 7382 HiAdd = &AddeOp1; 7383 else 7384 HiAdd = &AddeOp0; 7385 7386 7387 if (AddcOp0->getOpcode() == Opc) { 7388 LoMul = &AddcOp0; 7389 LowAdd = &AddcOp1; 7390 } 7391 if (AddcOp1->getOpcode() == Opc) { 7392 LoMul = &AddcOp1; 7393 LowAdd = &AddcOp0; 7394 } 7395 7396 if (LoMul == NULL) 7397 return SDValue(); 7398 7399 if (LoMul->getNode() != HiMul->getNode()) 7400 return SDValue(); 7401 7402 // Create the merged node. 7403 SelectionDAG &DAG = DCI.DAG; 7404 7405 // Build operand list. 7406 SmallVector<SDValue, 8> Ops; 7407 Ops.push_back(LoMul->getOperand(0)); 7408 Ops.push_back(LoMul->getOperand(1)); 7409 Ops.push_back(*LowAdd); 7410 Ops.push_back(*HiAdd); 7411 7412 SDValue MLALNode = DAG.getNode(FinalOpc, AddcNode->getDebugLoc(), 7413 DAG.getVTList(MVT::i32, MVT::i32), 7414 &Ops[0], Ops.size()); 7415 7416 // Replace the ADDs' nodes uses by the MLA node's values. 7417 SDValue HiMLALResult(MLALNode.getNode(), 1); 7418 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 7419 7420 SDValue LoMLALResult(MLALNode.getNode(), 0); 7421 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 7422 7423 // Return original node to notify the driver to stop replacing. 7424 SDValue resNode(AddcNode, 0); 7425 return resNode; 7426} 7427 7428/// PerformADDCCombine - Target-specific dag combine transform from 7429/// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. 7430static SDValue PerformADDCCombine(SDNode *N, 7431 TargetLowering::DAGCombinerInfo &DCI, 7432 const ARMSubtarget *Subtarget) { 7433 7434 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 7435 7436} 7437 7438/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 7439/// operands N0 and N1. This is a helper for PerformADDCombine that is 7440/// called with the default operands, and if that fails, with commuted 7441/// operands. 7442static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 7443 TargetLowering::DAGCombinerInfo &DCI, 7444 const ARMSubtarget *Subtarget){ 7445 7446 // Attempt to create vpaddl for this add. 7447 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 7448 if (Result.getNode()) 7449 return Result; 7450 7451 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7452 if (N0.getNode()->hasOneUse()) { 7453 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 7454 if (Result.getNode()) return Result; 7455 } 7456 return SDValue(); 7457} 7458 7459/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 7460/// 7461static SDValue PerformADDCombine(SDNode *N, 7462 TargetLowering::DAGCombinerInfo &DCI, 7463 const ARMSubtarget *Subtarget) { 7464 SDValue N0 = N->getOperand(0); 7465 SDValue N1 = N->getOperand(1); 7466 7467 // First try with the default operand order. 7468 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 7469 if (Result.getNode()) 7470 return Result; 7471 7472 // If that didn't work, try again with the operands commuted. 7473 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 7474} 7475 7476/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 7477/// 7478static SDValue PerformSUBCombine(SDNode *N, 7479 TargetLowering::DAGCombinerInfo &DCI) { 7480 SDValue N0 = N->getOperand(0); 7481 SDValue N1 = N->getOperand(1); 7482 7483 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 7484 if (N1.getNode()->hasOneUse()) { 7485 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 7486 if (Result.getNode()) return Result; 7487 } 7488 7489 return SDValue(); 7490} 7491 7492/// PerformVMULCombine 7493/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 7494/// special multiplier accumulator forwarding. 7495/// vmul d3, d0, d2 7496/// vmla d3, d1, d2 7497/// is faster than 7498/// vadd d3, d0, d1 7499/// vmul d3, d3, d2 7500static SDValue PerformVMULCombine(SDNode *N, 7501 TargetLowering::DAGCombinerInfo &DCI, 7502 const ARMSubtarget *Subtarget) { 7503 if (!Subtarget->hasVMLxForwarding()) 7504 return SDValue(); 7505 7506 SelectionDAG &DAG = DCI.DAG; 7507 SDValue N0 = N->getOperand(0); 7508 SDValue N1 = N->getOperand(1); 7509 unsigned Opcode = N0.getOpcode(); 7510 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 7511 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 7512 Opcode = N1.getOpcode(); 7513 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 7514 Opcode != ISD::FADD && Opcode != ISD::FSUB) 7515 return SDValue(); 7516 std::swap(N0, N1); 7517 } 7518 7519 EVT VT = N->getValueType(0); 7520 DebugLoc DL = N->getDebugLoc(); 7521 SDValue N00 = N0->getOperand(0); 7522 SDValue N01 = N0->getOperand(1); 7523 return DAG.getNode(Opcode, DL, VT, 7524 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 7525 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 7526} 7527 7528static SDValue PerformMULCombine(SDNode *N, 7529 TargetLowering::DAGCombinerInfo &DCI, 7530 const ARMSubtarget *Subtarget) { 7531 SelectionDAG &DAG = DCI.DAG; 7532 7533 if (Subtarget->isThumb1Only()) 7534 return SDValue(); 7535 7536 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 7537 return SDValue(); 7538 7539 EVT VT = N->getValueType(0); 7540 if (VT.is64BitVector() || VT.is128BitVector()) 7541 return PerformVMULCombine(N, DCI, Subtarget); 7542 if (VT != MVT::i32) 7543 return SDValue(); 7544 7545 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7546 if (!C) 7547 return SDValue(); 7548 7549 int64_t MulAmt = C->getSExtValue(); 7550 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 7551 7552 ShiftAmt = ShiftAmt & (32 - 1); 7553 SDValue V = N->getOperand(0); 7554 DebugLoc DL = N->getDebugLoc(); 7555 7556 SDValue Res; 7557 MulAmt >>= ShiftAmt; 7558 7559 if (MulAmt >= 0) { 7560 if (isPowerOf2_32(MulAmt - 1)) { 7561 // (mul x, 2^N + 1) => (add (shl x, N), x) 7562 Res = DAG.getNode(ISD::ADD, DL, VT, 7563 V, 7564 DAG.getNode(ISD::SHL, DL, VT, 7565 V, 7566 DAG.getConstant(Log2_32(MulAmt - 1), 7567 MVT::i32))); 7568 } else if (isPowerOf2_32(MulAmt + 1)) { 7569 // (mul x, 2^N - 1) => (sub (shl x, N), x) 7570 Res = DAG.getNode(ISD::SUB, DL, VT, 7571 DAG.getNode(ISD::SHL, DL, VT, 7572 V, 7573 DAG.getConstant(Log2_32(MulAmt + 1), 7574 MVT::i32)), 7575 V); 7576 } else 7577 return SDValue(); 7578 } else { 7579 uint64_t MulAmtAbs = -MulAmt; 7580 if (isPowerOf2_32(MulAmtAbs + 1)) { 7581 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 7582 Res = DAG.getNode(ISD::SUB, DL, VT, 7583 V, 7584 DAG.getNode(ISD::SHL, DL, VT, 7585 V, 7586 DAG.getConstant(Log2_32(MulAmtAbs + 1), 7587 MVT::i32))); 7588 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 7589 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 7590 Res = DAG.getNode(ISD::ADD, DL, VT, 7591 V, 7592 DAG.getNode(ISD::SHL, DL, VT, 7593 V, 7594 DAG.getConstant(Log2_32(MulAmtAbs-1), 7595 MVT::i32))); 7596 Res = DAG.getNode(ISD::SUB, DL, VT, 7597 DAG.getConstant(0, MVT::i32),Res); 7598 7599 } else 7600 return SDValue(); 7601 } 7602 7603 if (ShiftAmt != 0) 7604 Res = DAG.getNode(ISD::SHL, DL, VT, 7605 Res, DAG.getConstant(ShiftAmt, MVT::i32)); 7606 7607 // Do not add new nodes to DAG combiner worklist. 7608 DCI.CombineTo(N, Res, false); 7609 return SDValue(); 7610} 7611 7612static SDValue PerformANDCombine(SDNode *N, 7613 TargetLowering::DAGCombinerInfo &DCI, 7614 const ARMSubtarget *Subtarget) { 7615 7616 // Attempt to use immediate-form VBIC 7617 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 7618 DebugLoc dl = N->getDebugLoc(); 7619 EVT VT = N->getValueType(0); 7620 SelectionDAG &DAG = DCI.DAG; 7621 7622 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 7623 return SDValue(); 7624 7625 APInt SplatBits, SplatUndef; 7626 unsigned SplatBitSize; 7627 bool HasAnyUndefs; 7628 if (BVN && 7629 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 7630 if (SplatBitSize <= 64) { 7631 EVT VbicVT; 7632 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 7633 SplatUndef.getZExtValue(), SplatBitSize, 7634 DAG, VbicVT, VT.is128BitVector(), 7635 OtherModImm); 7636 if (Val.getNode()) { 7637 SDValue Input = 7638 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 7639 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 7640 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 7641 } 7642 } 7643 } 7644 7645 if (!Subtarget->isThumb1Only()) { 7646 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 7647 SDValue Result = combineSelectAndUseCommutative(N, true, DCI); 7648 if (Result.getNode()) 7649 return Result; 7650 } 7651 7652 return SDValue(); 7653} 7654 7655/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 7656static SDValue PerformORCombine(SDNode *N, 7657 TargetLowering::DAGCombinerInfo &DCI, 7658 const ARMSubtarget *Subtarget) { 7659 // Attempt to use immediate-form VORR 7660 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 7661 DebugLoc dl = N->getDebugLoc(); 7662 EVT VT = N->getValueType(0); 7663 SelectionDAG &DAG = DCI.DAG; 7664 7665 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 7666 return SDValue(); 7667 7668 APInt SplatBits, SplatUndef; 7669 unsigned SplatBitSize; 7670 bool HasAnyUndefs; 7671 if (BVN && Subtarget->hasNEON() && 7672 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 7673 if (SplatBitSize <= 64) { 7674 EVT VorrVT; 7675 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 7676 SplatUndef.getZExtValue(), SplatBitSize, 7677 DAG, VorrVT, VT.is128BitVector(), 7678 OtherModImm); 7679 if (Val.getNode()) { 7680 SDValue Input = 7681 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 7682 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 7683 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 7684 } 7685 } 7686 } 7687 7688 if (!Subtarget->isThumb1Only()) { 7689 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 7690 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 7691 if (Result.getNode()) 7692 return Result; 7693 } 7694 7695 // The code below optimizes (or (and X, Y), Z). 7696 // The AND operand needs to have a single user to make these optimizations 7697 // profitable. 7698 SDValue N0 = N->getOperand(0); 7699 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 7700 return SDValue(); 7701 SDValue N1 = N->getOperand(1); 7702 7703 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 7704 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 7705 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 7706 APInt SplatUndef; 7707 unsigned SplatBitSize; 7708 bool HasAnyUndefs; 7709 7710 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 7711 APInt SplatBits0; 7712 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 7713 HasAnyUndefs) && !HasAnyUndefs) { 7714 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 7715 APInt SplatBits1; 7716 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 7717 HasAnyUndefs) && !HasAnyUndefs && 7718 SplatBits0 == ~SplatBits1) { 7719 // Canonicalize the vector type to make instruction selection simpler. 7720 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 7721 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 7722 N0->getOperand(1), N0->getOperand(0), 7723 N1->getOperand(0)); 7724 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 7725 } 7726 } 7727 } 7728 7729 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 7730 // reasonable. 7731 7732 // BFI is only available on V6T2+ 7733 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 7734 return SDValue(); 7735 7736 DebugLoc DL = N->getDebugLoc(); 7737 // 1) or (and A, mask), val => ARMbfi A, val, mask 7738 // iff (val & mask) == val 7739 // 7740 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 7741 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 7742 // && mask == ~mask2 7743 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 7744 // && ~mask == mask2 7745 // (i.e., copy a bitfield value into another bitfield of the same width) 7746 7747 if (VT != MVT::i32) 7748 return SDValue(); 7749 7750 SDValue N00 = N0.getOperand(0); 7751 7752 // The value and the mask need to be constants so we can verify this is 7753 // actually a bitfield set. If the mask is 0xffff, we can do better 7754 // via a movt instruction, so don't use BFI in that case. 7755 SDValue MaskOp = N0.getOperand(1); 7756 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 7757 if (!MaskC) 7758 return SDValue(); 7759 unsigned Mask = MaskC->getZExtValue(); 7760 if (Mask == 0xffff) 7761 return SDValue(); 7762 SDValue Res; 7763 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 7764 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 7765 if (N1C) { 7766 unsigned Val = N1C->getZExtValue(); 7767 if ((Val & ~Mask) != Val) 7768 return SDValue(); 7769 7770 if (ARM::isBitFieldInvertedMask(Mask)) { 7771 Val >>= CountTrailingZeros_32(~Mask); 7772 7773 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 7774 DAG.getConstant(Val, MVT::i32), 7775 DAG.getConstant(Mask, MVT::i32)); 7776 7777 // Do not add new nodes to DAG combiner worklist. 7778 DCI.CombineTo(N, Res, false); 7779 return SDValue(); 7780 } 7781 } else if (N1.getOpcode() == ISD::AND) { 7782 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 7783 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 7784 if (!N11C) 7785 return SDValue(); 7786 unsigned Mask2 = N11C->getZExtValue(); 7787 7788 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 7789 // as is to match. 7790 if (ARM::isBitFieldInvertedMask(Mask) && 7791 (Mask == ~Mask2)) { 7792 // The pack halfword instruction works better for masks that fit it, 7793 // so use that when it's available. 7794 if (Subtarget->hasT2ExtractPack() && 7795 (Mask == 0xffff || Mask == 0xffff0000)) 7796 return SDValue(); 7797 // 2a 7798 unsigned amt = CountTrailingZeros_32(Mask2); 7799 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 7800 DAG.getConstant(amt, MVT::i32)); 7801 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 7802 DAG.getConstant(Mask, MVT::i32)); 7803 // Do not add new nodes to DAG combiner worklist. 7804 DCI.CombineTo(N, Res, false); 7805 return SDValue(); 7806 } else if (ARM::isBitFieldInvertedMask(~Mask) && 7807 (~Mask == Mask2)) { 7808 // The pack halfword instruction works better for masks that fit it, 7809 // so use that when it's available. 7810 if (Subtarget->hasT2ExtractPack() && 7811 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 7812 return SDValue(); 7813 // 2b 7814 unsigned lsb = CountTrailingZeros_32(Mask); 7815 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 7816 DAG.getConstant(lsb, MVT::i32)); 7817 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 7818 DAG.getConstant(Mask2, MVT::i32)); 7819 // Do not add new nodes to DAG combiner worklist. 7820 DCI.CombineTo(N, Res, false); 7821 return SDValue(); 7822 } 7823 } 7824 7825 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 7826 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 7827 ARM::isBitFieldInvertedMask(~Mask)) { 7828 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 7829 // where lsb(mask) == #shamt and masked bits of B are known zero. 7830 SDValue ShAmt = N00.getOperand(1); 7831 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 7832 unsigned LSB = CountTrailingZeros_32(Mask); 7833 if (ShAmtC != LSB) 7834 return SDValue(); 7835 7836 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 7837 DAG.getConstant(~Mask, MVT::i32)); 7838 7839 // Do not add new nodes to DAG combiner worklist. 7840 DCI.CombineTo(N, Res, false); 7841 } 7842 7843 return SDValue(); 7844} 7845 7846static SDValue PerformXORCombine(SDNode *N, 7847 TargetLowering::DAGCombinerInfo &DCI, 7848 const ARMSubtarget *Subtarget) { 7849 EVT VT = N->getValueType(0); 7850 SelectionDAG &DAG = DCI.DAG; 7851 7852 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 7853 return SDValue(); 7854 7855 if (!Subtarget->isThumb1Only()) { 7856 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 7857 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 7858 if (Result.getNode()) 7859 return Result; 7860 } 7861 7862 return SDValue(); 7863} 7864 7865/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 7866/// the bits being cleared by the AND are not demanded by the BFI. 7867static SDValue PerformBFICombine(SDNode *N, 7868 TargetLowering::DAGCombinerInfo &DCI) { 7869 SDValue N1 = N->getOperand(1); 7870 if (N1.getOpcode() == ISD::AND) { 7871 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 7872 if (!N11C) 7873 return SDValue(); 7874 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 7875 unsigned LSB = CountTrailingZeros_32(~InvMask); 7876 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 7877 unsigned Mask = (1 << Width)-1; 7878 unsigned Mask2 = N11C->getZExtValue(); 7879 if ((Mask & (~Mask2)) == 0) 7880 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 7881 N->getOperand(0), N1.getOperand(0), 7882 N->getOperand(2)); 7883 } 7884 return SDValue(); 7885} 7886 7887/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 7888/// ARMISD::VMOVRRD. 7889static SDValue PerformVMOVRRDCombine(SDNode *N, 7890 TargetLowering::DAGCombinerInfo &DCI) { 7891 // vmovrrd(vmovdrr x, y) -> x,y 7892 SDValue InDouble = N->getOperand(0); 7893 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 7894 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 7895 7896 // vmovrrd(load f64) -> (load i32), (load i32) 7897 SDNode *InNode = InDouble.getNode(); 7898 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 7899 InNode->getValueType(0) == MVT::f64 && 7900 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 7901 !cast<LoadSDNode>(InNode)->isVolatile()) { 7902 // TODO: Should this be done for non-FrameIndex operands? 7903 LoadSDNode *LD = cast<LoadSDNode>(InNode); 7904 7905 SelectionDAG &DAG = DCI.DAG; 7906 DebugLoc DL = LD->getDebugLoc(); 7907 SDValue BasePtr = LD->getBasePtr(); 7908 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 7909 LD->getPointerInfo(), LD->isVolatile(), 7910 LD->isNonTemporal(), LD->isInvariant(), 7911 LD->getAlignment()); 7912 7913 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 7914 DAG.getConstant(4, MVT::i32)); 7915 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 7916 LD->getPointerInfo(), LD->isVolatile(), 7917 LD->isNonTemporal(), LD->isInvariant(), 7918 std::min(4U, LD->getAlignment() / 2)); 7919 7920 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 7921 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 7922 DCI.RemoveFromWorklist(LD); 7923 DAG.DeleteNode(LD); 7924 return Result; 7925 } 7926 7927 return SDValue(); 7928} 7929 7930/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 7931/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 7932static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 7933 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 7934 SDValue Op0 = N->getOperand(0); 7935 SDValue Op1 = N->getOperand(1); 7936 if (Op0.getOpcode() == ISD::BITCAST) 7937 Op0 = Op0.getOperand(0); 7938 if (Op1.getOpcode() == ISD::BITCAST) 7939 Op1 = Op1.getOperand(0); 7940 if (Op0.getOpcode() == ARMISD::VMOVRRD && 7941 Op0.getNode() == Op1.getNode() && 7942 Op0.getResNo() == 0 && Op1.getResNo() == 1) 7943 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 7944 N->getValueType(0), Op0.getOperand(0)); 7945 return SDValue(); 7946} 7947 7948/// PerformSTORECombine - Target-specific dag combine xforms for 7949/// ISD::STORE. 7950static SDValue PerformSTORECombine(SDNode *N, 7951 TargetLowering::DAGCombinerInfo &DCI) { 7952 StoreSDNode *St = cast<StoreSDNode>(N); 7953 if (St->isVolatile()) 7954 return SDValue(); 7955 7956 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 7957 // pack all of the elements in one place. Next, store to memory in fewer 7958 // chunks. 7959 SDValue StVal = St->getValue(); 7960 EVT VT = StVal.getValueType(); 7961 if (St->isTruncatingStore() && VT.isVector()) { 7962 SelectionDAG &DAG = DCI.DAG; 7963 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7964 EVT StVT = St->getMemoryVT(); 7965 unsigned NumElems = VT.getVectorNumElements(); 7966 assert(StVT != VT && "Cannot truncate to the same type"); 7967 unsigned FromEltSz = VT.getVectorElementType().getSizeInBits(); 7968 unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits(); 7969 7970 // From, To sizes and ElemCount must be pow of two 7971 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 7972 7973 // We are going to use the original vector elt for storing. 7974 // Accumulated smaller vector elements must be a multiple of the store size. 7975 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 7976 7977 unsigned SizeRatio = FromEltSz / ToEltSz; 7978 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 7979 7980 // Create a type on which we perform the shuffle. 7981 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 7982 NumElems*SizeRatio); 7983 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 7984 7985 DebugLoc DL = St->getDebugLoc(); 7986 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 7987 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 7988 for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio; 7989 7990 // Can't shuffle using an illegal type. 7991 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 7992 7993 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 7994 DAG.getUNDEF(WideVec.getValueType()), 7995 ShuffleVec.data()); 7996 // At this point all of the data is stored at the bottom of the 7997 // register. We now need to save it to mem. 7998 7999 // Find the largest store unit 8000 MVT StoreType = MVT::i8; 8001 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 8002 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 8003 MVT Tp = (MVT::SimpleValueType)tp; 8004 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 8005 StoreType = Tp; 8006 } 8007 // Didn't find a legal store type. 8008 if (!TLI.isTypeLegal(StoreType)) 8009 return SDValue(); 8010 8011 // Bitcast the original vector into a vector of store-size units 8012 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 8013 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 8014 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 8015 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 8016 SmallVector<SDValue, 8> Chains; 8017 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 8018 TLI.getPointerTy()); 8019 SDValue BasePtr = St->getBasePtr(); 8020 8021 // Perform one or more big stores into memory. 8022 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 8023 for (unsigned I = 0; I < E; I++) { 8024 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 8025 StoreType, ShuffWide, 8026 DAG.getIntPtrConstant(I)); 8027 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 8028 St->getPointerInfo(), St->isVolatile(), 8029 St->isNonTemporal(), St->getAlignment()); 8030 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 8031 Increment); 8032 Chains.push_back(Ch); 8033 } 8034 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0], 8035 Chains.size()); 8036 } 8037 8038 if (!ISD::isNormalStore(St)) 8039 return SDValue(); 8040 8041 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 8042 // ARM stores of arguments in the same cache line. 8043 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 8044 StVal.getNode()->hasOneUse()) { 8045 SelectionDAG &DAG = DCI.DAG; 8046 DebugLoc DL = St->getDebugLoc(); 8047 SDValue BasePtr = St->getBasePtr(); 8048 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 8049 StVal.getNode()->getOperand(0), BasePtr, 8050 St->getPointerInfo(), St->isVolatile(), 8051 St->isNonTemporal(), St->getAlignment()); 8052 8053 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 8054 DAG.getConstant(4, MVT::i32)); 8055 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 8056 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 8057 St->isNonTemporal(), 8058 std::min(4U, St->getAlignment() / 2)); 8059 } 8060 8061 if (StVal.getValueType() != MVT::i64 || 8062 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8063 return SDValue(); 8064 8065 // Bitcast an i64 store extracted from a vector to f64. 8066 // Otherwise, the i64 value will be legalized to a pair of i32 values. 8067 SelectionDAG &DAG = DCI.DAG; 8068 DebugLoc dl = StVal.getDebugLoc(); 8069 SDValue IntVec = StVal.getOperand(0); 8070 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 8071 IntVec.getValueType().getVectorNumElements()); 8072 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 8073 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8074 Vec, StVal.getOperand(1)); 8075 dl = N->getDebugLoc(); 8076 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 8077 // Make the DAGCombiner fold the bitcasts. 8078 DCI.AddToWorklist(Vec.getNode()); 8079 DCI.AddToWorklist(ExtElt.getNode()); 8080 DCI.AddToWorklist(V.getNode()); 8081 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 8082 St->getPointerInfo(), St->isVolatile(), 8083 St->isNonTemporal(), St->getAlignment(), 8084 St->getTBAAInfo()); 8085} 8086 8087/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 8088/// are normal, non-volatile loads. If so, it is profitable to bitcast an 8089/// i64 vector to have f64 elements, since the value can then be loaded 8090/// directly into a VFP register. 8091static bool hasNormalLoadOperand(SDNode *N) { 8092 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 8093 for (unsigned i = 0; i < NumElts; ++i) { 8094 SDNode *Elt = N->getOperand(i).getNode(); 8095 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 8096 return true; 8097 } 8098 return false; 8099} 8100 8101/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 8102/// ISD::BUILD_VECTOR. 8103static SDValue PerformBUILD_VECTORCombine(SDNode *N, 8104 TargetLowering::DAGCombinerInfo &DCI){ 8105 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 8106 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 8107 // into a pair of GPRs, which is fine when the value is used as a scalar, 8108 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 8109 SelectionDAG &DAG = DCI.DAG; 8110 if (N->getNumOperands() == 2) { 8111 SDValue RV = PerformVMOVDRRCombine(N, DAG); 8112 if (RV.getNode()) 8113 return RV; 8114 } 8115 8116 // Load i64 elements as f64 values so that type legalization does not split 8117 // them up into i32 values. 8118 EVT VT = N->getValueType(0); 8119 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 8120 return SDValue(); 8121 DebugLoc dl = N->getDebugLoc(); 8122 SmallVector<SDValue, 8> Ops; 8123 unsigned NumElts = VT.getVectorNumElements(); 8124 for (unsigned i = 0; i < NumElts; ++i) { 8125 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 8126 Ops.push_back(V); 8127 // Make the DAGCombiner fold the bitcast. 8128 DCI.AddToWorklist(V.getNode()); 8129 } 8130 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 8131 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 8132 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 8133} 8134 8135/// PerformInsertEltCombine - Target-specific dag combine xforms for 8136/// ISD::INSERT_VECTOR_ELT. 8137static SDValue PerformInsertEltCombine(SDNode *N, 8138 TargetLowering::DAGCombinerInfo &DCI) { 8139 // Bitcast an i64 load inserted into a vector to f64. 8140 // Otherwise, the i64 value will be legalized to a pair of i32 values. 8141 EVT VT = N->getValueType(0); 8142 SDNode *Elt = N->getOperand(1).getNode(); 8143 if (VT.getVectorElementType() != MVT::i64 || 8144 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 8145 return SDValue(); 8146 8147 SelectionDAG &DAG = DCI.DAG; 8148 DebugLoc dl = N->getDebugLoc(); 8149 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 8150 VT.getVectorNumElements()); 8151 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 8152 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 8153 // Make the DAGCombiner fold the bitcasts. 8154 DCI.AddToWorklist(Vec.getNode()); 8155 DCI.AddToWorklist(V.getNode()); 8156 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 8157 Vec, V, N->getOperand(2)); 8158 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 8159} 8160 8161/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 8162/// ISD::VECTOR_SHUFFLE. 8163static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 8164 // The LLVM shufflevector instruction does not require the shuffle mask 8165 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 8166 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 8167 // operands do not match the mask length, they are extended by concatenating 8168 // them with undef vectors. That is probably the right thing for other 8169 // targets, but for NEON it is better to concatenate two double-register 8170 // size vector operands into a single quad-register size vector. Do that 8171 // transformation here: 8172 // shuffle(concat(v1, undef), concat(v2, undef)) -> 8173 // shuffle(concat(v1, v2), undef) 8174 SDValue Op0 = N->getOperand(0); 8175 SDValue Op1 = N->getOperand(1); 8176 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 8177 Op1.getOpcode() != ISD::CONCAT_VECTORS || 8178 Op0.getNumOperands() != 2 || 8179 Op1.getNumOperands() != 2) 8180 return SDValue(); 8181 SDValue Concat0Op1 = Op0.getOperand(1); 8182 SDValue Concat1Op1 = Op1.getOperand(1); 8183 if (Concat0Op1.getOpcode() != ISD::UNDEF || 8184 Concat1Op1.getOpcode() != ISD::UNDEF) 8185 return SDValue(); 8186 // Skip the transformation if any of the types are illegal. 8187 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8188 EVT VT = N->getValueType(0); 8189 if (!TLI.isTypeLegal(VT) || 8190 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 8191 !TLI.isTypeLegal(Concat1Op1.getValueType())) 8192 return SDValue(); 8193 8194 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 8195 Op0.getOperand(0), Op1.getOperand(0)); 8196 // Translate the shuffle mask. 8197 SmallVector<int, 16> NewMask; 8198 unsigned NumElts = VT.getVectorNumElements(); 8199 unsigned HalfElts = NumElts/2; 8200 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 8201 for (unsigned n = 0; n < NumElts; ++n) { 8202 int MaskElt = SVN->getMaskElt(n); 8203 int NewElt = -1; 8204 if (MaskElt < (int)HalfElts) 8205 NewElt = MaskElt; 8206 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 8207 NewElt = HalfElts + MaskElt - NumElts; 8208 NewMask.push_back(NewElt); 8209 } 8210 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 8211 DAG.getUNDEF(VT), NewMask.data()); 8212} 8213 8214/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 8215/// NEON load/store intrinsics to merge base address updates. 8216static SDValue CombineBaseUpdate(SDNode *N, 8217 TargetLowering::DAGCombinerInfo &DCI) { 8218 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 8219 return SDValue(); 8220 8221 SelectionDAG &DAG = DCI.DAG; 8222 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 8223 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 8224 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 8225 SDValue Addr = N->getOperand(AddrOpIdx); 8226 8227 // Search for a use of the address operand that is an increment. 8228 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 8229 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 8230 SDNode *User = *UI; 8231 if (User->getOpcode() != ISD::ADD || 8232 UI.getUse().getResNo() != Addr.getResNo()) 8233 continue; 8234 8235 // Check that the add is independent of the load/store. Otherwise, folding 8236 // it would create a cycle. 8237 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 8238 continue; 8239 8240 // Find the new opcode for the updating load/store. 8241 bool isLoad = true; 8242 bool isLaneOp = false; 8243 unsigned NewOpc = 0; 8244 unsigned NumVecs = 0; 8245 if (isIntrinsic) { 8246 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 8247 switch (IntNo) { 8248 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 8249 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 8250 NumVecs = 1; break; 8251 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 8252 NumVecs = 2; break; 8253 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 8254 NumVecs = 3; break; 8255 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 8256 NumVecs = 4; break; 8257 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 8258 NumVecs = 2; isLaneOp = true; break; 8259 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 8260 NumVecs = 3; isLaneOp = true; break; 8261 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 8262 NumVecs = 4; isLaneOp = true; break; 8263 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 8264 NumVecs = 1; isLoad = false; break; 8265 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 8266 NumVecs = 2; isLoad = false; break; 8267 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 8268 NumVecs = 3; isLoad = false; break; 8269 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 8270 NumVecs = 4; isLoad = false; break; 8271 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 8272 NumVecs = 2; isLoad = false; isLaneOp = true; break; 8273 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 8274 NumVecs = 3; isLoad = false; isLaneOp = true; break; 8275 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 8276 NumVecs = 4; isLoad = false; isLaneOp = true; break; 8277 } 8278 } else { 8279 isLaneOp = true; 8280 switch (N->getOpcode()) { 8281 default: llvm_unreachable("unexpected opcode for Neon base update"); 8282 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 8283 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 8284 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 8285 } 8286 } 8287 8288 // Find the size of memory referenced by the load/store. 8289 EVT VecTy; 8290 if (isLoad) 8291 VecTy = N->getValueType(0); 8292 else 8293 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 8294 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 8295 if (isLaneOp) 8296 NumBytes /= VecTy.getVectorNumElements(); 8297 8298 // If the increment is a constant, it must match the memory ref size. 8299 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 8300 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 8301 uint64_t IncVal = CInc->getZExtValue(); 8302 if (IncVal != NumBytes) 8303 continue; 8304 } else if (NumBytes >= 3 * 16) { 8305 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 8306 // separate instructions that make it harder to use a non-constant update. 8307 continue; 8308 } 8309 8310 // Create the new updating load/store node. 8311 EVT Tys[6]; 8312 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 8313 unsigned n; 8314 for (n = 0; n < NumResultVecs; ++n) 8315 Tys[n] = VecTy; 8316 Tys[n++] = MVT::i32; 8317 Tys[n] = MVT::Other; 8318 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 8319 SmallVector<SDValue, 8> Ops; 8320 Ops.push_back(N->getOperand(0)); // incoming chain 8321 Ops.push_back(N->getOperand(AddrOpIdx)); 8322 Ops.push_back(Inc); 8323 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 8324 Ops.push_back(N->getOperand(i)); 8325 } 8326 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 8327 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 8328 Ops.data(), Ops.size(), 8329 MemInt->getMemoryVT(), 8330 MemInt->getMemOperand()); 8331 8332 // Update the uses. 8333 std::vector<SDValue> NewResults; 8334 for (unsigned i = 0; i < NumResultVecs; ++i) { 8335 NewResults.push_back(SDValue(UpdN.getNode(), i)); 8336 } 8337 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 8338 DCI.CombineTo(N, NewResults); 8339 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 8340 8341 break; 8342 } 8343 return SDValue(); 8344} 8345 8346/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 8347/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 8348/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 8349/// return true. 8350static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 8351 SelectionDAG &DAG = DCI.DAG; 8352 EVT VT = N->getValueType(0); 8353 // vldN-dup instructions only support 64-bit vectors for N > 1. 8354 if (!VT.is64BitVector()) 8355 return false; 8356 8357 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 8358 SDNode *VLD = N->getOperand(0).getNode(); 8359 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 8360 return false; 8361 unsigned NumVecs = 0; 8362 unsigned NewOpc = 0; 8363 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 8364 if (IntNo == Intrinsic::arm_neon_vld2lane) { 8365 NumVecs = 2; 8366 NewOpc = ARMISD::VLD2DUP; 8367 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 8368 NumVecs = 3; 8369 NewOpc = ARMISD::VLD3DUP; 8370 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 8371 NumVecs = 4; 8372 NewOpc = ARMISD::VLD4DUP; 8373 } else { 8374 return false; 8375 } 8376 8377 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 8378 // numbers match the load. 8379 unsigned VLDLaneNo = 8380 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 8381 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 8382 UI != UE; ++UI) { 8383 // Ignore uses of the chain result. 8384 if (UI.getUse().getResNo() == NumVecs) 8385 continue; 8386 SDNode *User = *UI; 8387 if (User->getOpcode() != ARMISD::VDUPLANE || 8388 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 8389 return false; 8390 } 8391 8392 // Create the vldN-dup node. 8393 EVT Tys[5]; 8394 unsigned n; 8395 for (n = 0; n < NumVecs; ++n) 8396 Tys[n] = VT; 8397 Tys[n] = MVT::Other; 8398 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 8399 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 8400 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 8401 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 8402 Ops, 2, VLDMemInt->getMemoryVT(), 8403 VLDMemInt->getMemOperand()); 8404 8405 // Update the uses. 8406 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 8407 UI != UE; ++UI) { 8408 unsigned ResNo = UI.getUse().getResNo(); 8409 // Ignore uses of the chain result. 8410 if (ResNo == NumVecs) 8411 continue; 8412 SDNode *User = *UI; 8413 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 8414 } 8415 8416 // Now the vldN-lane intrinsic is dead except for its chain result. 8417 // Update uses of the chain. 8418 std::vector<SDValue> VLDDupResults; 8419 for (unsigned n = 0; n < NumVecs; ++n) 8420 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 8421 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 8422 DCI.CombineTo(VLD, VLDDupResults); 8423 8424 return true; 8425} 8426 8427/// PerformVDUPLANECombine - Target-specific dag combine xforms for 8428/// ARMISD::VDUPLANE. 8429static SDValue PerformVDUPLANECombine(SDNode *N, 8430 TargetLowering::DAGCombinerInfo &DCI) { 8431 SDValue Op = N->getOperand(0); 8432 8433 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 8434 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 8435 if (CombineVLDDUP(N, DCI)) 8436 return SDValue(N, 0); 8437 8438 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 8439 // redundant. Ignore bit_converts for now; element sizes are checked below. 8440 while (Op.getOpcode() == ISD::BITCAST) 8441 Op = Op.getOperand(0); 8442 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 8443 return SDValue(); 8444 8445 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 8446 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 8447 // The canonical VMOV for a zero vector uses a 32-bit element size. 8448 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8449 unsigned EltBits; 8450 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 8451 EltSize = 8; 8452 EVT VT = N->getValueType(0); 8453 if (EltSize > VT.getVectorElementType().getSizeInBits()) 8454 return SDValue(); 8455 8456 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 8457} 8458 8459// isConstVecPow2 - Return true if each vector element is a power of 2, all 8460// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 8461static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 8462{ 8463 integerPart cN; 8464 integerPart c0 = 0; 8465 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 8466 I != E; I++) { 8467 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 8468 if (!C) 8469 return false; 8470 8471 bool isExact; 8472 APFloat APF = C->getValueAPF(); 8473 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 8474 != APFloat::opOK || !isExact) 8475 return false; 8476 8477 c0 = (I == 0) ? cN : c0; 8478 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 8479 return false; 8480 } 8481 C = c0; 8482 return true; 8483} 8484 8485/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 8486/// can replace combinations of VMUL and VCVT (floating-point to integer) 8487/// when the VMUL has a constant operand that is a power of 2. 8488/// 8489/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 8490/// vmul.f32 d16, d17, d16 8491/// vcvt.s32.f32 d16, d16 8492/// becomes: 8493/// vcvt.s32.f32 d16, d16, #3 8494static SDValue PerformVCVTCombine(SDNode *N, 8495 TargetLowering::DAGCombinerInfo &DCI, 8496 const ARMSubtarget *Subtarget) { 8497 SelectionDAG &DAG = DCI.DAG; 8498 SDValue Op = N->getOperand(0); 8499 8500 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 8501 Op.getOpcode() != ISD::FMUL) 8502 return SDValue(); 8503 8504 uint64_t C; 8505 SDValue N0 = Op->getOperand(0); 8506 SDValue ConstVec = Op->getOperand(1); 8507 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 8508 8509 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 8510 !isConstVecPow2(ConstVec, isSigned, C)) 8511 return SDValue(); 8512 8513 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 8514 Intrinsic::arm_neon_vcvtfp2fxu; 8515 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 8516 N->getValueType(0), 8517 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 8518 DAG.getConstant(Log2_64(C), MVT::i32)); 8519} 8520 8521/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 8522/// can replace combinations of VCVT (integer to floating-point) and VDIV 8523/// when the VDIV has a constant operand that is a power of 2. 8524/// 8525/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 8526/// vcvt.f32.s32 d16, d16 8527/// vdiv.f32 d16, d17, d16 8528/// becomes: 8529/// vcvt.f32.s32 d16, d16, #3 8530static SDValue PerformVDIVCombine(SDNode *N, 8531 TargetLowering::DAGCombinerInfo &DCI, 8532 const ARMSubtarget *Subtarget) { 8533 SelectionDAG &DAG = DCI.DAG; 8534 SDValue Op = N->getOperand(0); 8535 unsigned OpOpcode = Op.getNode()->getOpcode(); 8536 8537 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 8538 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 8539 return SDValue(); 8540 8541 uint64_t C; 8542 SDValue ConstVec = N->getOperand(1); 8543 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 8544 8545 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 8546 !isConstVecPow2(ConstVec, isSigned, C)) 8547 return SDValue(); 8548 8549 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 8550 Intrinsic::arm_neon_vcvtfxu2fp; 8551 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 8552 Op.getValueType(), 8553 DAG.getConstant(IntrinsicOpcode, MVT::i32), 8554 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 8555} 8556 8557/// Getvshiftimm - Check if this is a valid build_vector for the immediate 8558/// operand of a vector shift operation, where all the elements of the 8559/// build_vector must have the same constant integer value. 8560static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 8561 // Ignore bit_converts. 8562 while (Op.getOpcode() == ISD::BITCAST) 8563 Op = Op.getOperand(0); 8564 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8565 APInt SplatBits, SplatUndef; 8566 unsigned SplatBitSize; 8567 bool HasAnyUndefs; 8568 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 8569 HasAnyUndefs, ElementBits) || 8570 SplatBitSize > ElementBits) 8571 return false; 8572 Cnt = SplatBits.getSExtValue(); 8573 return true; 8574} 8575 8576/// isVShiftLImm - Check if this is a valid build_vector for the immediate 8577/// operand of a vector shift left operation. That value must be in the range: 8578/// 0 <= Value < ElementBits for a left shift; or 8579/// 0 <= Value <= ElementBits for a long left shift. 8580static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 8581 assert(VT.isVector() && "vector shift count is not a vector type"); 8582 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 8583 if (! getVShiftImm(Op, ElementBits, Cnt)) 8584 return false; 8585 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 8586} 8587 8588/// isVShiftRImm - Check if this is a valid build_vector for the immediate 8589/// operand of a vector shift right operation. For a shift opcode, the value 8590/// is positive, but for an intrinsic the value count must be negative. The 8591/// absolute value must be in the range: 8592/// 1 <= |Value| <= ElementBits for a right shift; or 8593/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 8594static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 8595 int64_t &Cnt) { 8596 assert(VT.isVector() && "vector shift count is not a vector type"); 8597 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 8598 if (! getVShiftImm(Op, ElementBits, Cnt)) 8599 return false; 8600 if (isIntrinsic) 8601 Cnt = -Cnt; 8602 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 8603} 8604 8605/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 8606static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 8607 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 8608 switch (IntNo) { 8609 default: 8610 // Don't do anything for most intrinsics. 8611 break; 8612 8613 // Vector shifts: check for immediate versions and lower them. 8614 // Note: This is done during DAG combining instead of DAG legalizing because 8615 // the build_vectors for 64-bit vector element shift counts are generally 8616 // not legal, and it is hard to see their values after they get legalized to 8617 // loads from a constant pool. 8618 case Intrinsic::arm_neon_vshifts: 8619 case Intrinsic::arm_neon_vshiftu: 8620 case Intrinsic::arm_neon_vshiftls: 8621 case Intrinsic::arm_neon_vshiftlu: 8622 case Intrinsic::arm_neon_vshiftn: 8623 case Intrinsic::arm_neon_vrshifts: 8624 case Intrinsic::arm_neon_vrshiftu: 8625 case Intrinsic::arm_neon_vrshiftn: 8626 case Intrinsic::arm_neon_vqshifts: 8627 case Intrinsic::arm_neon_vqshiftu: 8628 case Intrinsic::arm_neon_vqshiftsu: 8629 case Intrinsic::arm_neon_vqshiftns: 8630 case Intrinsic::arm_neon_vqshiftnu: 8631 case Intrinsic::arm_neon_vqshiftnsu: 8632 case Intrinsic::arm_neon_vqrshiftns: 8633 case Intrinsic::arm_neon_vqrshiftnu: 8634 case Intrinsic::arm_neon_vqrshiftnsu: { 8635 EVT VT = N->getOperand(1).getValueType(); 8636 int64_t Cnt; 8637 unsigned VShiftOpc = 0; 8638 8639 switch (IntNo) { 8640 case Intrinsic::arm_neon_vshifts: 8641 case Intrinsic::arm_neon_vshiftu: 8642 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 8643 VShiftOpc = ARMISD::VSHL; 8644 break; 8645 } 8646 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 8647 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 8648 ARMISD::VSHRs : ARMISD::VSHRu); 8649 break; 8650 } 8651 return SDValue(); 8652 8653 case Intrinsic::arm_neon_vshiftls: 8654 case Intrinsic::arm_neon_vshiftlu: 8655 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 8656 break; 8657 llvm_unreachable("invalid shift count for vshll intrinsic"); 8658 8659 case Intrinsic::arm_neon_vrshifts: 8660 case Intrinsic::arm_neon_vrshiftu: 8661 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 8662 break; 8663 return SDValue(); 8664 8665 case Intrinsic::arm_neon_vqshifts: 8666 case Intrinsic::arm_neon_vqshiftu: 8667 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 8668 break; 8669 return SDValue(); 8670 8671 case Intrinsic::arm_neon_vqshiftsu: 8672 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 8673 break; 8674 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 8675 8676 case Intrinsic::arm_neon_vshiftn: 8677 case Intrinsic::arm_neon_vrshiftn: 8678 case Intrinsic::arm_neon_vqshiftns: 8679 case Intrinsic::arm_neon_vqshiftnu: 8680 case Intrinsic::arm_neon_vqshiftnsu: 8681 case Intrinsic::arm_neon_vqrshiftns: 8682 case Intrinsic::arm_neon_vqrshiftnu: 8683 case Intrinsic::arm_neon_vqrshiftnsu: 8684 // Narrowing shifts require an immediate right shift. 8685 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 8686 break; 8687 llvm_unreachable("invalid shift count for narrowing vector shift " 8688 "intrinsic"); 8689 8690 default: 8691 llvm_unreachable("unhandled vector shift"); 8692 } 8693 8694 switch (IntNo) { 8695 case Intrinsic::arm_neon_vshifts: 8696 case Intrinsic::arm_neon_vshiftu: 8697 // Opcode already set above. 8698 break; 8699 case Intrinsic::arm_neon_vshiftls: 8700 case Intrinsic::arm_neon_vshiftlu: 8701 if (Cnt == VT.getVectorElementType().getSizeInBits()) 8702 VShiftOpc = ARMISD::VSHLLi; 8703 else 8704 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 8705 ARMISD::VSHLLs : ARMISD::VSHLLu); 8706 break; 8707 case Intrinsic::arm_neon_vshiftn: 8708 VShiftOpc = ARMISD::VSHRN; break; 8709 case Intrinsic::arm_neon_vrshifts: 8710 VShiftOpc = ARMISD::VRSHRs; break; 8711 case Intrinsic::arm_neon_vrshiftu: 8712 VShiftOpc = ARMISD::VRSHRu; break; 8713 case Intrinsic::arm_neon_vrshiftn: 8714 VShiftOpc = ARMISD::VRSHRN; break; 8715 case Intrinsic::arm_neon_vqshifts: 8716 VShiftOpc = ARMISD::VQSHLs; break; 8717 case Intrinsic::arm_neon_vqshiftu: 8718 VShiftOpc = ARMISD::VQSHLu; break; 8719 case Intrinsic::arm_neon_vqshiftsu: 8720 VShiftOpc = ARMISD::VQSHLsu; break; 8721 case Intrinsic::arm_neon_vqshiftns: 8722 VShiftOpc = ARMISD::VQSHRNs; break; 8723 case Intrinsic::arm_neon_vqshiftnu: 8724 VShiftOpc = ARMISD::VQSHRNu; break; 8725 case Intrinsic::arm_neon_vqshiftnsu: 8726 VShiftOpc = ARMISD::VQSHRNsu; break; 8727 case Intrinsic::arm_neon_vqrshiftns: 8728 VShiftOpc = ARMISD::VQRSHRNs; break; 8729 case Intrinsic::arm_neon_vqrshiftnu: 8730 VShiftOpc = ARMISD::VQRSHRNu; break; 8731 case Intrinsic::arm_neon_vqrshiftnsu: 8732 VShiftOpc = ARMISD::VQRSHRNsu; break; 8733 } 8734 8735 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 8736 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 8737 } 8738 8739 case Intrinsic::arm_neon_vshiftins: { 8740 EVT VT = N->getOperand(1).getValueType(); 8741 int64_t Cnt; 8742 unsigned VShiftOpc = 0; 8743 8744 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 8745 VShiftOpc = ARMISD::VSLI; 8746 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 8747 VShiftOpc = ARMISD::VSRI; 8748 else { 8749 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 8750 } 8751 8752 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 8753 N->getOperand(1), N->getOperand(2), 8754 DAG.getConstant(Cnt, MVT::i32)); 8755 } 8756 8757 case Intrinsic::arm_neon_vqrshifts: 8758 case Intrinsic::arm_neon_vqrshiftu: 8759 // No immediate versions of these to check for. 8760 break; 8761 } 8762 8763 return SDValue(); 8764} 8765 8766/// PerformShiftCombine - Checks for immediate versions of vector shifts and 8767/// lowers them. As with the vector shift intrinsics, this is done during DAG 8768/// combining instead of DAG legalizing because the build_vectors for 64-bit 8769/// vector element shift counts are generally not legal, and it is hard to see 8770/// their values after they get legalized to loads from a constant pool. 8771static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 8772 const ARMSubtarget *ST) { 8773 EVT VT = N->getValueType(0); 8774 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 8775 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 8776 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 8777 SDValue N1 = N->getOperand(1); 8778 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 8779 SDValue N0 = N->getOperand(0); 8780 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 8781 DAG.MaskedValueIsZero(N0.getOperand(0), 8782 APInt::getHighBitsSet(32, 16))) 8783 return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, N0, N1); 8784 } 8785 } 8786 8787 // Nothing to be done for scalar shifts. 8788 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8789 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 8790 return SDValue(); 8791 8792 assert(ST->hasNEON() && "unexpected vector shift"); 8793 int64_t Cnt; 8794 8795 switch (N->getOpcode()) { 8796 default: llvm_unreachable("unexpected shift opcode"); 8797 8798 case ISD::SHL: 8799 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 8800 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 8801 DAG.getConstant(Cnt, MVT::i32)); 8802 break; 8803 8804 case ISD::SRA: 8805 case ISD::SRL: 8806 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 8807 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 8808 ARMISD::VSHRs : ARMISD::VSHRu); 8809 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 8810 DAG.getConstant(Cnt, MVT::i32)); 8811 } 8812 } 8813 return SDValue(); 8814} 8815 8816/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 8817/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 8818static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 8819 const ARMSubtarget *ST) { 8820 SDValue N0 = N->getOperand(0); 8821 8822 // Check for sign- and zero-extensions of vector extract operations of 8- 8823 // and 16-bit vector elements. NEON supports these directly. They are 8824 // handled during DAG combining because type legalization will promote them 8825 // to 32-bit types and it is messy to recognize the operations after that. 8826 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 8827 SDValue Vec = N0.getOperand(0); 8828 SDValue Lane = N0.getOperand(1); 8829 EVT VT = N->getValueType(0); 8830 EVT EltVT = N0.getValueType(); 8831 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8832 8833 if (VT == MVT::i32 && 8834 (EltVT == MVT::i8 || EltVT == MVT::i16) && 8835 TLI.isTypeLegal(Vec.getValueType()) && 8836 isa<ConstantSDNode>(Lane)) { 8837 8838 unsigned Opc = 0; 8839 switch (N->getOpcode()) { 8840 default: llvm_unreachable("unexpected opcode"); 8841 case ISD::SIGN_EXTEND: 8842 Opc = ARMISD::VGETLANEs; 8843 break; 8844 case ISD::ZERO_EXTEND: 8845 case ISD::ANY_EXTEND: 8846 Opc = ARMISD::VGETLANEu; 8847 break; 8848 } 8849 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 8850 } 8851 } 8852 8853 return SDValue(); 8854} 8855 8856/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 8857/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 8858static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 8859 const ARMSubtarget *ST) { 8860 // If the target supports NEON, try to use vmax/vmin instructions for f32 8861 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 8862 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 8863 // a NaN; only do the transformation when it matches that behavior. 8864 8865 // For now only do this when using NEON for FP operations; if using VFP, it 8866 // is not obvious that the benefit outweighs the cost of switching to the 8867 // NEON pipeline. 8868 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 8869 N->getValueType(0) != MVT::f32) 8870 return SDValue(); 8871 8872 SDValue CondLHS = N->getOperand(0); 8873 SDValue CondRHS = N->getOperand(1); 8874 SDValue LHS = N->getOperand(2); 8875 SDValue RHS = N->getOperand(3); 8876 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 8877 8878 unsigned Opcode = 0; 8879 bool IsReversed; 8880 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 8881 IsReversed = false; // x CC y ? x : y 8882 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 8883 IsReversed = true ; // x CC y ? y : x 8884 } else { 8885 return SDValue(); 8886 } 8887 8888 bool IsUnordered; 8889 switch (CC) { 8890 default: break; 8891 case ISD::SETOLT: 8892 case ISD::SETOLE: 8893 case ISD::SETLT: 8894 case ISD::SETLE: 8895 case ISD::SETULT: 8896 case ISD::SETULE: 8897 // If LHS is NaN, an ordered comparison will be false and the result will 8898 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 8899 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 8900 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 8901 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 8902 break; 8903 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 8904 // will return -0, so vmin can only be used for unsafe math or if one of 8905 // the operands is known to be nonzero. 8906 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 8907 !DAG.getTarget().Options.UnsafeFPMath && 8908 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 8909 break; 8910 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 8911 break; 8912 8913 case ISD::SETOGT: 8914 case ISD::SETOGE: 8915 case ISD::SETGT: 8916 case ISD::SETGE: 8917 case ISD::SETUGT: 8918 case ISD::SETUGE: 8919 // If LHS is NaN, an ordered comparison will be false and the result will 8920 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 8921 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 8922 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 8923 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 8924 break; 8925 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 8926 // will return +0, so vmax can only be used for unsafe math or if one of 8927 // the operands is known to be nonzero. 8928 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 8929 !DAG.getTarget().Options.UnsafeFPMath && 8930 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 8931 break; 8932 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 8933 break; 8934 } 8935 8936 if (!Opcode) 8937 return SDValue(); 8938 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 8939} 8940 8941/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 8942SDValue 8943ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 8944 SDValue Cmp = N->getOperand(4); 8945 if (Cmp.getOpcode() != ARMISD::CMPZ) 8946 // Only looking at EQ and NE cases. 8947 return SDValue(); 8948 8949 EVT VT = N->getValueType(0); 8950 DebugLoc dl = N->getDebugLoc(); 8951 SDValue LHS = Cmp.getOperand(0); 8952 SDValue RHS = Cmp.getOperand(1); 8953 SDValue FalseVal = N->getOperand(0); 8954 SDValue TrueVal = N->getOperand(1); 8955 SDValue ARMcc = N->getOperand(2); 8956 ARMCC::CondCodes CC = 8957 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 8958 8959 // Simplify 8960 // mov r1, r0 8961 // cmp r1, x 8962 // mov r0, y 8963 // moveq r0, x 8964 // to 8965 // cmp r0, x 8966 // movne r0, y 8967 // 8968 // mov r1, r0 8969 // cmp r1, x 8970 // mov r0, x 8971 // movne r0, y 8972 // to 8973 // cmp r0, x 8974 // movne r0, y 8975 /// FIXME: Turn this into a target neutral optimization? 8976 SDValue Res; 8977 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 8978 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 8979 N->getOperand(3), Cmp); 8980 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 8981 SDValue ARMcc; 8982 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 8983 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 8984 N->getOperand(3), NewCmp); 8985 } 8986 8987 if (Res.getNode()) { 8988 APInt KnownZero, KnownOne; 8989 DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne); 8990 // Capture demanded bits information that would be otherwise lost. 8991 if (KnownZero == 0xfffffffe) 8992 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8993 DAG.getValueType(MVT::i1)); 8994 else if (KnownZero == 0xffffff00) 8995 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8996 DAG.getValueType(MVT::i8)); 8997 else if (KnownZero == 0xffff0000) 8998 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8999 DAG.getValueType(MVT::i16)); 9000 } 9001 9002 return Res; 9003} 9004 9005SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 9006 DAGCombinerInfo &DCI) const { 9007 switch (N->getOpcode()) { 9008 default: break; 9009 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); 9010 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 9011 case ISD::SUB: return PerformSUBCombine(N, DCI); 9012 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 9013 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 9014 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 9015 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 9016 case ARMISD::BFI: return PerformBFICombine(N, DCI); 9017 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 9018 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 9019 case ISD::STORE: return PerformSTORECombine(N, DCI); 9020 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 9021 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 9022 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 9023 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 9024 case ISD::FP_TO_SINT: 9025 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 9026 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 9027 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 9028 case ISD::SHL: 9029 case ISD::SRA: 9030 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 9031 case ISD::SIGN_EXTEND: 9032 case ISD::ZERO_EXTEND: 9033 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 9034 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 9035 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 9036 case ARMISD::VLD2DUP: 9037 case ARMISD::VLD3DUP: 9038 case ARMISD::VLD4DUP: 9039 return CombineBaseUpdate(N, DCI); 9040 case ISD::INTRINSIC_VOID: 9041 case ISD::INTRINSIC_W_CHAIN: 9042 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9043 case Intrinsic::arm_neon_vld1: 9044 case Intrinsic::arm_neon_vld2: 9045 case Intrinsic::arm_neon_vld3: 9046 case Intrinsic::arm_neon_vld4: 9047 case Intrinsic::arm_neon_vld2lane: 9048 case Intrinsic::arm_neon_vld3lane: 9049 case Intrinsic::arm_neon_vld4lane: 9050 case Intrinsic::arm_neon_vst1: 9051 case Intrinsic::arm_neon_vst2: 9052 case Intrinsic::arm_neon_vst3: 9053 case Intrinsic::arm_neon_vst4: 9054 case Intrinsic::arm_neon_vst2lane: 9055 case Intrinsic::arm_neon_vst3lane: 9056 case Intrinsic::arm_neon_vst4lane: 9057 return CombineBaseUpdate(N, DCI); 9058 default: break; 9059 } 9060 break; 9061 } 9062 return SDValue(); 9063} 9064 9065bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 9066 EVT VT) const { 9067 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 9068} 9069 9070bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 9071 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 9072 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 9073 9074 switch (VT.getSimpleVT().SimpleTy) { 9075 default: 9076 return false; 9077 case MVT::i8: 9078 case MVT::i16: 9079 case MVT::i32: 9080 // Unaligned access can use (for example) LRDB, LRDH, LDR 9081 return AllowsUnaligned; 9082 case MVT::f64: 9083 case MVT::v2f64: 9084 // For any little-endian targets with neon, we can support unaligned ld/st 9085 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 9086 // A big-endian target may also explictly support unaligned accesses 9087 return Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian()); 9088 } 9089} 9090 9091static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 9092 unsigned AlignCheck) { 9093 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 9094 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 9095} 9096 9097EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 9098 unsigned DstAlign, unsigned SrcAlign, 9099 bool IsZeroVal, 9100 bool MemcpyStrSrc, 9101 MachineFunction &MF) const { 9102 const Function *F = MF.getFunction(); 9103 9104 // See if we can use NEON instructions for this... 9105 if (IsZeroVal && 9106 !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat) && 9107 Subtarget->hasNEON()) { 9108 if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) { 9109 return MVT::v4i32; 9110 } else if (memOpAlign(SrcAlign, DstAlign, 8) && Size >= 8) { 9111 return MVT::v2i32; 9112 } 9113 } 9114 9115 // Lowering to i32/i16 if the size permits. 9116 if (Size >= 4) { 9117 return MVT::i32; 9118 } else if (Size >= 2) { 9119 return MVT::i16; 9120 } 9121 9122 // Let the target-independent logic figure it out. 9123 return MVT::Other; 9124} 9125 9126static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 9127 if (V < 0) 9128 return false; 9129 9130 unsigned Scale = 1; 9131 switch (VT.getSimpleVT().SimpleTy) { 9132 default: return false; 9133 case MVT::i1: 9134 case MVT::i8: 9135 // Scale == 1; 9136 break; 9137 case MVT::i16: 9138 // Scale == 2; 9139 Scale = 2; 9140 break; 9141 case MVT::i32: 9142 // Scale == 4; 9143 Scale = 4; 9144 break; 9145 } 9146 9147 if ((V & (Scale - 1)) != 0) 9148 return false; 9149 V /= Scale; 9150 return V == (V & ((1LL << 5) - 1)); 9151} 9152 9153static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 9154 const ARMSubtarget *Subtarget) { 9155 bool isNeg = false; 9156 if (V < 0) { 9157 isNeg = true; 9158 V = - V; 9159 } 9160 9161 switch (VT.getSimpleVT().SimpleTy) { 9162 default: return false; 9163 case MVT::i1: 9164 case MVT::i8: 9165 case MVT::i16: 9166 case MVT::i32: 9167 // + imm12 or - imm8 9168 if (isNeg) 9169 return V == (V & ((1LL << 8) - 1)); 9170 return V == (V & ((1LL << 12) - 1)); 9171 case MVT::f32: 9172 case MVT::f64: 9173 // Same as ARM mode. FIXME: NEON? 9174 if (!Subtarget->hasVFP2()) 9175 return false; 9176 if ((V & 3) != 0) 9177 return false; 9178 V >>= 2; 9179 return V == (V & ((1LL << 8) - 1)); 9180 } 9181} 9182 9183/// isLegalAddressImmediate - Return true if the integer value can be used 9184/// as the offset of the target addressing mode for load / store of the 9185/// given type. 9186static bool isLegalAddressImmediate(int64_t V, EVT VT, 9187 const ARMSubtarget *Subtarget) { 9188 if (V == 0) 9189 return true; 9190 9191 if (!VT.isSimple()) 9192 return false; 9193 9194 if (Subtarget->isThumb1Only()) 9195 return isLegalT1AddressImmediate(V, VT); 9196 else if (Subtarget->isThumb2()) 9197 return isLegalT2AddressImmediate(V, VT, Subtarget); 9198 9199 // ARM mode. 9200 if (V < 0) 9201 V = - V; 9202 switch (VT.getSimpleVT().SimpleTy) { 9203 default: return false; 9204 case MVT::i1: 9205 case MVT::i8: 9206 case MVT::i32: 9207 // +- imm12 9208 return V == (V & ((1LL << 12) - 1)); 9209 case MVT::i16: 9210 // +- imm8 9211 return V == (V & ((1LL << 8) - 1)); 9212 case MVT::f32: 9213 case MVT::f64: 9214 if (!Subtarget->hasVFP2()) // FIXME: NEON? 9215 return false; 9216 if ((V & 3) != 0) 9217 return false; 9218 V >>= 2; 9219 return V == (V & ((1LL << 8) - 1)); 9220 } 9221} 9222 9223bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 9224 EVT VT) const { 9225 int Scale = AM.Scale; 9226 if (Scale < 0) 9227 return false; 9228 9229 switch (VT.getSimpleVT().SimpleTy) { 9230 default: return false; 9231 case MVT::i1: 9232 case MVT::i8: 9233 case MVT::i16: 9234 case MVT::i32: 9235 if (Scale == 1) 9236 return true; 9237 // r + r << imm 9238 Scale = Scale & ~1; 9239 return Scale == 2 || Scale == 4 || Scale == 8; 9240 case MVT::i64: 9241 // r + r 9242 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 9243 return true; 9244 return false; 9245 case MVT::isVoid: 9246 // Note, we allow "void" uses (basically, uses that aren't loads or 9247 // stores), because arm allows folding a scale into many arithmetic 9248 // operations. This should be made more precise and revisited later. 9249 9250 // Allow r << imm, but the imm has to be a multiple of two. 9251 if (Scale & 1) return false; 9252 return isPowerOf2_32(Scale); 9253 } 9254} 9255 9256/// isLegalAddressingMode - Return true if the addressing mode represented 9257/// by AM is legal for this target, for a load/store of the specified type. 9258bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 9259 Type *Ty) const { 9260 EVT VT = getValueType(Ty, true); 9261 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 9262 return false; 9263 9264 // Can never fold addr of global into load/store. 9265 if (AM.BaseGV) 9266 return false; 9267 9268 switch (AM.Scale) { 9269 case 0: // no scale reg, must be "r+i" or "r", or "i". 9270 break; 9271 case 1: 9272 if (Subtarget->isThumb1Only()) 9273 return false; 9274 // FALL THROUGH. 9275 default: 9276 // ARM doesn't support any R+R*scale+imm addr modes. 9277 if (AM.BaseOffs) 9278 return false; 9279 9280 if (!VT.isSimple()) 9281 return false; 9282 9283 if (Subtarget->isThumb2()) 9284 return isLegalT2ScaledAddressingMode(AM, VT); 9285 9286 int Scale = AM.Scale; 9287 switch (VT.getSimpleVT().SimpleTy) { 9288 default: return false; 9289 case MVT::i1: 9290 case MVT::i8: 9291 case MVT::i32: 9292 if (Scale < 0) Scale = -Scale; 9293 if (Scale == 1) 9294 return true; 9295 // r + r << imm 9296 return isPowerOf2_32(Scale & ~1); 9297 case MVT::i16: 9298 case MVT::i64: 9299 // r + r 9300 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 9301 return true; 9302 return false; 9303 9304 case MVT::isVoid: 9305 // Note, we allow "void" uses (basically, uses that aren't loads or 9306 // stores), because arm allows folding a scale into many arithmetic 9307 // operations. This should be made more precise and revisited later. 9308 9309 // Allow r << imm, but the imm has to be a multiple of two. 9310 if (Scale & 1) return false; 9311 return isPowerOf2_32(Scale); 9312 } 9313 } 9314 return true; 9315} 9316 9317/// isLegalICmpImmediate - Return true if the specified immediate is legal 9318/// icmp immediate, that is the target has icmp instructions which can compare 9319/// a register against the immediate without having to materialize the 9320/// immediate into a register. 9321bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 9322 // Thumb2 and ARM modes can use cmn for negative immediates. 9323 if (!Subtarget->isThumb()) 9324 return ARM_AM::getSOImmVal(llvm::abs64(Imm)) != -1; 9325 if (Subtarget->isThumb2()) 9326 return ARM_AM::getT2SOImmVal(llvm::abs64(Imm)) != -1; 9327 // Thumb1 doesn't have cmn, and only 8-bit immediates. 9328 return Imm >= 0 && Imm <= 255; 9329} 9330 9331/// isLegalAddImmediate - Return true if the specified immediate is a legal add 9332/// *or sub* immediate, that is the target has add or sub instructions which can 9333/// add a register with the immediate without having to materialize the 9334/// immediate into a register. 9335bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 9336 // Same encoding for add/sub, just flip the sign. 9337 int64_t AbsImm = llvm::abs64(Imm); 9338 if (!Subtarget->isThumb()) 9339 return ARM_AM::getSOImmVal(AbsImm) != -1; 9340 if (Subtarget->isThumb2()) 9341 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 9342 // Thumb1 only has 8-bit unsigned immediate. 9343 return AbsImm >= 0 && AbsImm <= 255; 9344} 9345 9346static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 9347 bool isSEXTLoad, SDValue &Base, 9348 SDValue &Offset, bool &isInc, 9349 SelectionDAG &DAG) { 9350 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 9351 return false; 9352 9353 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 9354 // AddressingMode 3 9355 Base = Ptr->getOperand(0); 9356 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9357 int RHSC = (int)RHS->getZExtValue(); 9358 if (RHSC < 0 && RHSC > -256) { 9359 assert(Ptr->getOpcode() == ISD::ADD); 9360 isInc = false; 9361 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9362 return true; 9363 } 9364 } 9365 isInc = (Ptr->getOpcode() == ISD::ADD); 9366 Offset = Ptr->getOperand(1); 9367 return true; 9368 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 9369 // AddressingMode 2 9370 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9371 int RHSC = (int)RHS->getZExtValue(); 9372 if (RHSC < 0 && RHSC > -0x1000) { 9373 assert(Ptr->getOpcode() == ISD::ADD); 9374 isInc = false; 9375 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9376 Base = Ptr->getOperand(0); 9377 return true; 9378 } 9379 } 9380 9381 if (Ptr->getOpcode() == ISD::ADD) { 9382 isInc = true; 9383 ARM_AM::ShiftOpc ShOpcVal= 9384 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 9385 if (ShOpcVal != ARM_AM::no_shift) { 9386 Base = Ptr->getOperand(1); 9387 Offset = Ptr->getOperand(0); 9388 } else { 9389 Base = Ptr->getOperand(0); 9390 Offset = Ptr->getOperand(1); 9391 } 9392 return true; 9393 } 9394 9395 isInc = (Ptr->getOpcode() == ISD::ADD); 9396 Base = Ptr->getOperand(0); 9397 Offset = Ptr->getOperand(1); 9398 return true; 9399 } 9400 9401 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 9402 return false; 9403} 9404 9405static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 9406 bool isSEXTLoad, SDValue &Base, 9407 SDValue &Offset, bool &isInc, 9408 SelectionDAG &DAG) { 9409 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 9410 return false; 9411 9412 Base = Ptr->getOperand(0); 9413 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9414 int RHSC = (int)RHS->getZExtValue(); 9415 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 9416 assert(Ptr->getOpcode() == ISD::ADD); 9417 isInc = false; 9418 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9419 return true; 9420 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 9421 isInc = Ptr->getOpcode() == ISD::ADD; 9422 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 9423 return true; 9424 } 9425 } 9426 9427 return false; 9428} 9429 9430/// getPreIndexedAddressParts - returns true by value, base pointer and 9431/// offset pointer and addressing mode by reference if the node's address 9432/// can be legally represented as pre-indexed load / store address. 9433bool 9434ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 9435 SDValue &Offset, 9436 ISD::MemIndexedMode &AM, 9437 SelectionDAG &DAG) const { 9438 if (Subtarget->isThumb1Only()) 9439 return false; 9440 9441 EVT VT; 9442 SDValue Ptr; 9443 bool isSEXTLoad = false; 9444 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 9445 Ptr = LD->getBasePtr(); 9446 VT = LD->getMemoryVT(); 9447 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 9448 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 9449 Ptr = ST->getBasePtr(); 9450 VT = ST->getMemoryVT(); 9451 } else 9452 return false; 9453 9454 bool isInc; 9455 bool isLegal = false; 9456 if (Subtarget->isThumb2()) 9457 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 9458 Offset, isInc, DAG); 9459 else 9460 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 9461 Offset, isInc, DAG); 9462 if (!isLegal) 9463 return false; 9464 9465 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 9466 return true; 9467} 9468 9469/// getPostIndexedAddressParts - returns true by value, base pointer and 9470/// offset pointer and addressing mode by reference if this node can be 9471/// combined with a load / store to form a post-indexed load / store. 9472bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 9473 SDValue &Base, 9474 SDValue &Offset, 9475 ISD::MemIndexedMode &AM, 9476 SelectionDAG &DAG) const { 9477 if (Subtarget->isThumb1Only()) 9478 return false; 9479 9480 EVT VT; 9481 SDValue Ptr; 9482 bool isSEXTLoad = false; 9483 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 9484 VT = LD->getMemoryVT(); 9485 Ptr = LD->getBasePtr(); 9486 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 9487 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 9488 VT = ST->getMemoryVT(); 9489 Ptr = ST->getBasePtr(); 9490 } else 9491 return false; 9492 9493 bool isInc; 9494 bool isLegal = false; 9495 if (Subtarget->isThumb2()) 9496 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 9497 isInc, DAG); 9498 else 9499 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 9500 isInc, DAG); 9501 if (!isLegal) 9502 return false; 9503 9504 if (Ptr != Base) { 9505 // Swap base ptr and offset to catch more post-index load / store when 9506 // it's legal. In Thumb2 mode, offset must be an immediate. 9507 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 9508 !Subtarget->isThumb2()) 9509 std::swap(Base, Offset); 9510 9511 // Post-indexed load / store update the base pointer. 9512 if (Ptr != Base) 9513 return false; 9514 } 9515 9516 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 9517 return true; 9518} 9519 9520void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 9521 APInt &KnownZero, 9522 APInt &KnownOne, 9523 const SelectionDAG &DAG, 9524 unsigned Depth) const { 9525 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); 9526 switch (Op.getOpcode()) { 9527 default: break; 9528 case ARMISD::CMOV: { 9529 // Bits are known zero/one if known on the LHS and RHS. 9530 DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 9531 if (KnownZero == 0 && KnownOne == 0) return; 9532 9533 APInt KnownZeroRHS, KnownOneRHS; 9534 DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1); 9535 KnownZero &= KnownZeroRHS; 9536 KnownOne &= KnownOneRHS; 9537 return; 9538 } 9539 } 9540} 9541 9542//===----------------------------------------------------------------------===// 9543// ARM Inline Assembly Support 9544//===----------------------------------------------------------------------===// 9545 9546bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 9547 // Looking for "rev" which is V6+. 9548 if (!Subtarget->hasV6Ops()) 9549 return false; 9550 9551 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 9552 std::string AsmStr = IA->getAsmString(); 9553 SmallVector<StringRef, 4> AsmPieces; 9554 SplitString(AsmStr, AsmPieces, ";\n"); 9555 9556 switch (AsmPieces.size()) { 9557 default: return false; 9558 case 1: 9559 AsmStr = AsmPieces[0]; 9560 AsmPieces.clear(); 9561 SplitString(AsmStr, AsmPieces, " \t,"); 9562 9563 // rev $0, $1 9564 if (AsmPieces.size() == 3 && 9565 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 9566 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 9567 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 9568 if (Ty && Ty->getBitWidth() == 32) 9569 return IntrinsicLowering::LowerToByteSwap(CI); 9570 } 9571 break; 9572 } 9573 9574 return false; 9575} 9576 9577/// getConstraintType - Given a constraint letter, return the type of 9578/// constraint it is for this target. 9579ARMTargetLowering::ConstraintType 9580ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 9581 if (Constraint.size() == 1) { 9582 switch (Constraint[0]) { 9583 default: break; 9584 case 'l': return C_RegisterClass; 9585 case 'w': return C_RegisterClass; 9586 case 'h': return C_RegisterClass; 9587 case 'x': return C_RegisterClass; 9588 case 't': return C_RegisterClass; 9589 case 'j': return C_Other; // Constant for movw. 9590 // An address with a single base register. Due to the way we 9591 // currently handle addresses it is the same as an 'r' memory constraint. 9592 case 'Q': return C_Memory; 9593 } 9594 } else if (Constraint.size() == 2) { 9595 switch (Constraint[0]) { 9596 default: break; 9597 // All 'U+' constraints are addresses. 9598 case 'U': return C_Memory; 9599 } 9600 } 9601 return TargetLowering::getConstraintType(Constraint); 9602} 9603 9604/// Examine constraint type and operand type and determine a weight value. 9605/// This object must already have been set up with the operand type 9606/// and the current alternative constraint selected. 9607TargetLowering::ConstraintWeight 9608ARMTargetLowering::getSingleConstraintMatchWeight( 9609 AsmOperandInfo &info, const char *constraint) const { 9610 ConstraintWeight weight = CW_Invalid; 9611 Value *CallOperandVal = info.CallOperandVal; 9612 // If we don't have a value, we can't do a match, 9613 // but allow it at the lowest weight. 9614 if (CallOperandVal == NULL) 9615 return CW_Default; 9616 Type *type = CallOperandVal->getType(); 9617 // Look at the constraint type. 9618 switch (*constraint) { 9619 default: 9620 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 9621 break; 9622 case 'l': 9623 if (type->isIntegerTy()) { 9624 if (Subtarget->isThumb()) 9625 weight = CW_SpecificReg; 9626 else 9627 weight = CW_Register; 9628 } 9629 break; 9630 case 'w': 9631 if (type->isFloatingPointTy()) 9632 weight = CW_Register; 9633 break; 9634 } 9635 return weight; 9636} 9637 9638typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 9639RCPair 9640ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 9641 EVT VT) const { 9642 if (Constraint.size() == 1) { 9643 // GCC ARM Constraint Letters 9644 switch (Constraint[0]) { 9645 case 'l': // Low regs or general regs. 9646 if (Subtarget->isThumb()) 9647 return RCPair(0U, &ARM::tGPRRegClass); 9648 return RCPair(0U, &ARM::GPRRegClass); 9649 case 'h': // High regs or no regs. 9650 if (Subtarget->isThumb()) 9651 return RCPair(0U, &ARM::hGPRRegClass); 9652 break; 9653 case 'r': 9654 return RCPair(0U, &ARM::GPRRegClass); 9655 case 'w': 9656 if (VT == MVT::f32) 9657 return RCPair(0U, &ARM::SPRRegClass); 9658 if (VT.getSizeInBits() == 64) 9659 return RCPair(0U, &ARM::DPRRegClass); 9660 if (VT.getSizeInBits() == 128) 9661 return RCPair(0U, &ARM::QPRRegClass); 9662 break; 9663 case 'x': 9664 if (VT == MVT::f32) 9665 return RCPair(0U, &ARM::SPR_8RegClass); 9666 if (VT.getSizeInBits() == 64) 9667 return RCPair(0U, &ARM::DPR_8RegClass); 9668 if (VT.getSizeInBits() == 128) 9669 return RCPair(0U, &ARM::QPR_8RegClass); 9670 break; 9671 case 't': 9672 if (VT == MVT::f32) 9673 return RCPair(0U, &ARM::SPRRegClass); 9674 break; 9675 } 9676 } 9677 if (StringRef("{cc}").equals_lower(Constraint)) 9678 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 9679 9680 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 9681} 9682 9683/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 9684/// vector. If it is invalid, don't add anything to Ops. 9685void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 9686 std::string &Constraint, 9687 std::vector<SDValue>&Ops, 9688 SelectionDAG &DAG) const { 9689 SDValue Result(0, 0); 9690 9691 // Currently only support length 1 constraints. 9692 if (Constraint.length() != 1) return; 9693 9694 char ConstraintLetter = Constraint[0]; 9695 switch (ConstraintLetter) { 9696 default: break; 9697 case 'j': 9698 case 'I': case 'J': case 'K': case 'L': 9699 case 'M': case 'N': case 'O': 9700 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 9701 if (!C) 9702 return; 9703 9704 int64_t CVal64 = C->getSExtValue(); 9705 int CVal = (int) CVal64; 9706 // None of these constraints allow values larger than 32 bits. Check 9707 // that the value fits in an int. 9708 if (CVal != CVal64) 9709 return; 9710 9711 switch (ConstraintLetter) { 9712 case 'j': 9713 // Constant suitable for movw, must be between 0 and 9714 // 65535. 9715 if (Subtarget->hasV6T2Ops()) 9716 if (CVal >= 0 && CVal <= 65535) 9717 break; 9718 return; 9719 case 'I': 9720 if (Subtarget->isThumb1Only()) { 9721 // This must be a constant between 0 and 255, for ADD 9722 // immediates. 9723 if (CVal >= 0 && CVal <= 255) 9724 break; 9725 } else if (Subtarget->isThumb2()) { 9726 // A constant that can be used as an immediate value in a 9727 // data-processing instruction. 9728 if (ARM_AM::getT2SOImmVal(CVal) != -1) 9729 break; 9730 } else { 9731 // A constant that can be used as an immediate value in a 9732 // data-processing instruction. 9733 if (ARM_AM::getSOImmVal(CVal) != -1) 9734 break; 9735 } 9736 return; 9737 9738 case 'J': 9739 if (Subtarget->isThumb()) { // FIXME thumb2 9740 // This must be a constant between -255 and -1, for negated ADD 9741 // immediates. This can be used in GCC with an "n" modifier that 9742 // prints the negated value, for use with SUB instructions. It is 9743 // not useful otherwise but is implemented for compatibility. 9744 if (CVal >= -255 && CVal <= -1) 9745 break; 9746 } else { 9747 // This must be a constant between -4095 and 4095. It is not clear 9748 // what this constraint is intended for. Implemented for 9749 // compatibility with GCC. 9750 if (CVal >= -4095 && CVal <= 4095) 9751 break; 9752 } 9753 return; 9754 9755 case 'K': 9756 if (Subtarget->isThumb1Only()) { 9757 // A 32-bit value where only one byte has a nonzero value. Exclude 9758 // zero to match GCC. This constraint is used by GCC internally for 9759 // constants that can be loaded with a move/shift combination. 9760 // It is not useful otherwise but is implemented for compatibility. 9761 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 9762 break; 9763 } else if (Subtarget->isThumb2()) { 9764 // A constant whose bitwise inverse can be used as an immediate 9765 // value in a data-processing instruction. This can be used in GCC 9766 // with a "B" modifier that prints the inverted value, for use with 9767 // BIC and MVN instructions. It is not useful otherwise but is 9768 // implemented for compatibility. 9769 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 9770 break; 9771 } else { 9772 // A constant whose bitwise inverse can be used as an immediate 9773 // value in a data-processing instruction. This can be used in GCC 9774 // with a "B" modifier that prints the inverted value, for use with 9775 // BIC and MVN instructions. It is not useful otherwise but is 9776 // implemented for compatibility. 9777 if (ARM_AM::getSOImmVal(~CVal) != -1) 9778 break; 9779 } 9780 return; 9781 9782 case 'L': 9783 if (Subtarget->isThumb1Only()) { 9784 // This must be a constant between -7 and 7, 9785 // for 3-operand ADD/SUB immediate instructions. 9786 if (CVal >= -7 && CVal < 7) 9787 break; 9788 } else if (Subtarget->isThumb2()) { 9789 // A constant whose negation can be used as an immediate value in a 9790 // data-processing instruction. This can be used in GCC with an "n" 9791 // modifier that prints the negated value, for use with SUB 9792 // instructions. It is not useful otherwise but is implemented for 9793 // compatibility. 9794 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 9795 break; 9796 } else { 9797 // A constant whose negation can be used as an immediate value in a 9798 // data-processing instruction. This can be used in GCC with an "n" 9799 // modifier that prints the negated value, for use with SUB 9800 // instructions. It is not useful otherwise but is implemented for 9801 // compatibility. 9802 if (ARM_AM::getSOImmVal(-CVal) != -1) 9803 break; 9804 } 9805 return; 9806 9807 case 'M': 9808 if (Subtarget->isThumb()) { // FIXME thumb2 9809 // This must be a multiple of 4 between 0 and 1020, for 9810 // ADD sp + immediate. 9811 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 9812 break; 9813 } else { 9814 // A power of two or a constant between 0 and 32. This is used in 9815 // GCC for the shift amount on shifted register operands, but it is 9816 // useful in general for any shift amounts. 9817 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 9818 break; 9819 } 9820 return; 9821 9822 case 'N': 9823 if (Subtarget->isThumb()) { // FIXME thumb2 9824 // This must be a constant between 0 and 31, for shift amounts. 9825 if (CVal >= 0 && CVal <= 31) 9826 break; 9827 } 9828 return; 9829 9830 case 'O': 9831 if (Subtarget->isThumb()) { // FIXME thumb2 9832 // This must be a multiple of 4 between -508 and 508, for 9833 // ADD/SUB sp = sp + immediate. 9834 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 9835 break; 9836 } 9837 return; 9838 } 9839 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 9840 break; 9841 } 9842 9843 if (Result.getNode()) { 9844 Ops.push_back(Result); 9845 return; 9846 } 9847 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 9848} 9849 9850bool 9851ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 9852 // The ARM target isn't yet aware of offsets. 9853 return false; 9854} 9855 9856bool ARM::isBitFieldInvertedMask(unsigned v) { 9857 if (v == 0xffffffff) 9858 return 0; 9859 // there can be 1's on either or both "outsides", all the "inside" 9860 // bits must be 0's 9861 unsigned int lsb = 0, msb = 31; 9862 while (v & (1 << msb)) --msb; 9863 while (v & (1 << lsb)) ++lsb; 9864 for (unsigned int i = lsb; i <= msb; ++i) { 9865 if (v & (1 << i)) 9866 return 0; 9867 } 9868 return 1; 9869} 9870 9871/// isFPImmLegal - Returns true if the target can instruction select the 9872/// specified FP immediate natively. If false, the legalizer will 9873/// materialize the FP immediate as a load from a constant pool. 9874bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 9875 if (!Subtarget->hasVFP3()) 9876 return false; 9877 if (VT == MVT::f32) 9878 return ARM_AM::getFP32Imm(Imm) != -1; 9879 if (VT == MVT::f64) 9880 return ARM_AM::getFP64Imm(Imm) != -1; 9881 return false; 9882} 9883 9884/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 9885/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 9886/// specified in the intrinsic calls. 9887bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 9888 const CallInst &I, 9889 unsigned Intrinsic) const { 9890 switch (Intrinsic) { 9891 case Intrinsic::arm_neon_vld1: 9892 case Intrinsic::arm_neon_vld2: 9893 case Intrinsic::arm_neon_vld3: 9894 case Intrinsic::arm_neon_vld4: 9895 case Intrinsic::arm_neon_vld2lane: 9896 case Intrinsic::arm_neon_vld3lane: 9897 case Intrinsic::arm_neon_vld4lane: { 9898 Info.opc = ISD::INTRINSIC_W_CHAIN; 9899 // Conservatively set memVT to the entire set of vectors loaded. 9900 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 9901 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 9902 Info.ptrVal = I.getArgOperand(0); 9903 Info.offset = 0; 9904 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 9905 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 9906 Info.vol = false; // volatile loads with NEON intrinsics not supported 9907 Info.readMem = true; 9908 Info.writeMem = false; 9909 return true; 9910 } 9911 case Intrinsic::arm_neon_vst1: 9912 case Intrinsic::arm_neon_vst2: 9913 case Intrinsic::arm_neon_vst3: 9914 case Intrinsic::arm_neon_vst4: 9915 case Intrinsic::arm_neon_vst2lane: 9916 case Intrinsic::arm_neon_vst3lane: 9917 case Intrinsic::arm_neon_vst4lane: { 9918 Info.opc = ISD::INTRINSIC_VOID; 9919 // Conservatively set memVT to the entire set of vectors stored. 9920 unsigned NumElts = 0; 9921 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 9922 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 9923 if (!ArgTy->isVectorTy()) 9924 break; 9925 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 9926 } 9927 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 9928 Info.ptrVal = I.getArgOperand(0); 9929 Info.offset = 0; 9930 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 9931 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 9932 Info.vol = false; // volatile stores with NEON intrinsics not supported 9933 Info.readMem = false; 9934 Info.writeMem = true; 9935 return true; 9936 } 9937 case Intrinsic::arm_strexd: { 9938 Info.opc = ISD::INTRINSIC_W_CHAIN; 9939 Info.memVT = MVT::i64; 9940 Info.ptrVal = I.getArgOperand(2); 9941 Info.offset = 0; 9942 Info.align = 8; 9943 Info.vol = true; 9944 Info.readMem = false; 9945 Info.writeMem = true; 9946 return true; 9947 } 9948 case Intrinsic::arm_ldrexd: { 9949 Info.opc = ISD::INTRINSIC_W_CHAIN; 9950 Info.memVT = MVT::i64; 9951 Info.ptrVal = I.getArgOperand(0); 9952 Info.offset = 0; 9953 Info.align = 8; 9954 Info.vol = true; 9955 Info.readMem = true; 9956 Info.writeMem = false; 9957 return true; 9958 } 9959 default: 9960 break; 9961 } 9962 9963 return false; 9964} 9965