ARMISelLowering.cpp revision 35b3df6e31f9aac70fb471d74e39f899dfbd689f
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARMISelLowering.h" 17#include "ARM.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMSubtarget.h" 23#include "ARMTargetMachine.h" 24#include "ARMTargetObjectFile.h" 25#include "MCTargetDesc/ARMAddressingModes.h" 26#include "llvm/CallingConv.h" 27#include "llvm/Constants.h" 28#include "llvm/Function.h" 29#include "llvm/GlobalValue.h" 30#include "llvm/Instruction.h" 31#include "llvm/Instructions.h" 32#include "llvm/Intrinsics.h" 33#include "llvm/Type.h" 34#include "llvm/CodeGen/CallingConvLower.h" 35#include "llvm/CodeGen/IntrinsicLowering.h" 36#include "llvm/CodeGen/MachineBasicBlock.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineInstrBuilder.h" 40#include "llvm/CodeGen/MachineModuleInfo.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/SelectionDAG.h" 43#include "llvm/MC/MCSectionMachO.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/ADT/StringExtras.h" 46#include "llvm/ADT/Statistic.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/raw_ostream.h" 51using namespace llvm; 52 53STATISTIC(NumTailCalls, "Number of tail calls"); 54STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 55STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 56 57// This option should go away when tail calls fully work. 58static cl::opt<bool> 59EnableARMTailCalls("arm-tail-calls", cl::Hidden, 60 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 61 cl::init(false)); 62 63cl::opt<bool> 64EnableARMLongCalls("arm-long-calls", cl::Hidden, 65 cl::desc("Generate calls via indirect call instructions"), 66 cl::init(false)); 67 68static cl::opt<bool> 69ARMInterworking("arm-interworking", cl::Hidden, 70 cl::desc("Enable / disable ARM interworking (for debugging only)"), 71 cl::init(true)); 72 73namespace { 74 class ARMCCState : public CCState { 75 public: 76 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 77 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 78 LLVMContext &C, ParmContext PC) 79 : CCState(CC, isVarArg, MF, TM, locs, C) { 80 assert(((PC == Call) || (PC == Prologue)) && 81 "ARMCCState users must specify whether their context is call" 82 "or prologue generation."); 83 CallOrPrologue = PC; 84 } 85 }; 86} 87 88// The APCS parameter registers. 89static const uint16_t GPRArgRegs[] = { 90 ARM::R0, ARM::R1, ARM::R2, ARM::R3 91}; 92 93void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 94 MVT PromotedBitwiseVT) { 95 if (VT != PromotedLdStVT) { 96 setOperationAction(ISD::LOAD, VT, Promote); 97 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 98 99 setOperationAction(ISD::STORE, VT, Promote); 100 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 101 } 102 103 MVT ElemTy = VT.getVectorElementType(); 104 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 105 setOperationAction(ISD::SETCC, VT, Custom); 106 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 107 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 108 if (ElemTy == MVT::i32) { 109 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 110 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 111 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 112 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 113 } else { 114 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 115 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 116 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 117 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 118 } 119 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 120 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 121 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 122 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 123 setOperationAction(ISD::SELECT, VT, Expand); 124 setOperationAction(ISD::SELECT_CC, VT, Expand); 125 setOperationAction(ISD::VSELECT, VT, Expand); 126 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 127 if (VT.isInteger()) { 128 setOperationAction(ISD::SHL, VT, Custom); 129 setOperationAction(ISD::SRA, VT, Custom); 130 setOperationAction(ISD::SRL, VT, Custom); 131 } 132 133 // Promote all bit-wise operations. 134 if (VT.isInteger() && VT != PromotedBitwiseVT) { 135 setOperationAction(ISD::AND, VT, Promote); 136 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 137 setOperationAction(ISD::OR, VT, Promote); 138 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 139 setOperationAction(ISD::XOR, VT, Promote); 140 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 141 } 142 143 // Neon does not support vector divide/remainder operations. 144 setOperationAction(ISD::SDIV, VT, Expand); 145 setOperationAction(ISD::UDIV, VT, Expand); 146 setOperationAction(ISD::FDIV, VT, Expand); 147 setOperationAction(ISD::SREM, VT, Expand); 148 setOperationAction(ISD::UREM, VT, Expand); 149 setOperationAction(ISD::FREM, VT, Expand); 150} 151 152void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 153 addRegisterClass(VT, &ARM::DPRRegClass); 154 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 155} 156 157void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 158 addRegisterClass(VT, &ARM::QPRRegClass); 159 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 160} 161 162static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 163 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 164 return new TargetLoweringObjectFileMachO(); 165 166 return new ARMElfTargetObjectFile(); 167} 168 169ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 170 : TargetLowering(TM, createTLOF(TM)) { 171 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 172 RegInfo = TM.getRegisterInfo(); 173 Itins = TM.getInstrItineraryData(); 174 175 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 176 177 if (Subtarget->isTargetDarwin()) { 178 // Uses VFP for Thumb libfuncs if available. 179 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 180 // Single-precision floating-point arithmetic. 181 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 182 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 183 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 184 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 185 186 // Double-precision floating-point arithmetic. 187 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 188 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 189 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 190 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 191 192 // Single-precision comparisons. 193 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 194 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 195 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 196 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 197 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 198 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 199 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 200 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 201 202 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 210 211 // Double-precision comparisons. 212 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 213 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 214 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 215 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 216 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 217 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 218 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 219 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 220 221 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 222 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 223 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 224 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 225 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 226 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 227 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 228 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 229 230 // Floating-point to integer conversions. 231 // i64 conversions are done via library routines even when generating VFP 232 // instructions, so use the same ones. 233 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 234 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 235 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 236 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 237 238 // Conversions between floating types. 239 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 240 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 241 242 // Integer to floating-point conversions. 243 // i64 conversions are done via library routines even when generating VFP 244 // instructions, so use the same ones. 245 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 246 // e.g., __floatunsidf vs. __floatunssidfvfp. 247 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 248 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 249 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 250 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 251 } 252 } 253 254 // These libcalls are not available in 32-bit. 255 setLibcallName(RTLIB::SHL_I128, 0); 256 setLibcallName(RTLIB::SRL_I128, 0); 257 setLibcallName(RTLIB::SRA_I128, 0); 258 259 if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetDarwin()) { 260 // Double-precision floating-point arithmetic helper functions 261 // RTABI chapter 4.1.2, Table 2 262 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 263 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 264 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 265 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 266 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 267 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 268 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 269 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 270 271 // Double-precision floating-point comparison helper functions 272 // RTABI chapter 4.1.2, Table 3 273 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 274 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 275 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 276 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 277 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 278 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 279 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 280 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 281 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 282 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 283 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 284 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 285 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 286 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 287 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 288 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 289 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 290 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 291 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 292 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 293 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 294 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 295 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 297 298 // Single-precision floating-point arithmetic helper functions 299 // RTABI chapter 4.1.2, Table 4 300 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 301 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 302 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 303 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 304 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 305 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 306 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 307 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 308 309 // Single-precision floating-point comparison helper functions 310 // RTABI chapter 4.1.2, Table 5 311 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 312 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 313 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 314 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 315 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 316 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 317 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 318 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 319 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 320 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 321 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 322 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 323 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 324 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 325 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 326 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 327 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 335 336 // Floating-point to integer conversions. 337 // RTABI chapter 4.1.2, Table 6 338 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 339 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 340 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 341 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 342 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 343 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 344 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 345 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 346 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 347 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 348 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 349 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 350 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 351 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 352 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 353 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 354 355 // Conversions between floating types. 356 // RTABI chapter 4.1.2, Table 7 357 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 358 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 359 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 361 362 // Integer to floating-point conversions. 363 // RTABI chapter 4.1.2, Table 8 364 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 365 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 366 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 367 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 368 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 369 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 370 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 371 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 372 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 373 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 376 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 377 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 378 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 380 381 // Long long helper functions 382 // RTABI chapter 4.2, Table 9 383 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 384 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 385 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 386 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 387 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 388 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 391 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 392 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 393 394 // Integer division functions 395 // RTABI chapter 4.3.1 396 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 397 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 398 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 399 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 400 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 401 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 402 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 403 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 404 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 405 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 406 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 407 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 408 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 409 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 410 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 411 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 412 413 // Memory operations 414 // RTABI chapter 4.3.4 415 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 416 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 417 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 418 setLibcallCallingConv(RTLIB::MEMCPY, CallingConv::ARM_AAPCS); 419 setLibcallCallingConv(RTLIB::MEMMOVE, CallingConv::ARM_AAPCS); 420 setLibcallCallingConv(RTLIB::MEMSET, CallingConv::ARM_AAPCS); 421 } 422 423 // Use divmod compiler-rt calls for iOS 5.0 and later. 424 if (Subtarget->getTargetTriple().getOS() == Triple::IOS && 425 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 426 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 427 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 428 } 429 430 if (Subtarget->isThumb1Only()) 431 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 432 else 433 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 434 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 435 !Subtarget->isThumb1Only()) { 436 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 437 if (!Subtarget->isFPOnlySP()) 438 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 439 440 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 441 } 442 443 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 444 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 445 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 446 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 447 setTruncStoreAction((MVT::SimpleValueType)VT, 448 (MVT::SimpleValueType)InnerVT, Expand); 449 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 450 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 451 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 452 } 453 454 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 455 456 if (Subtarget->hasNEON()) { 457 addDRTypeForNEON(MVT::v2f32); 458 addDRTypeForNEON(MVT::v8i8); 459 addDRTypeForNEON(MVT::v4i16); 460 addDRTypeForNEON(MVT::v2i32); 461 addDRTypeForNEON(MVT::v1i64); 462 463 addQRTypeForNEON(MVT::v4f32); 464 addQRTypeForNEON(MVT::v2f64); 465 addQRTypeForNEON(MVT::v16i8); 466 addQRTypeForNEON(MVT::v8i16); 467 addQRTypeForNEON(MVT::v4i32); 468 addQRTypeForNEON(MVT::v2i64); 469 470 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 471 // neither Neon nor VFP support any arithmetic operations on it. 472 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 473 // supported for v4f32. 474 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 475 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 476 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 477 // FIXME: Code duplication: FDIV and FREM are expanded always, see 478 // ARMTargetLowering::addTypeForNEON method for details. 479 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 480 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 481 // FIXME: Create unittest. 482 // In another words, find a way when "copysign" appears in DAG with vector 483 // operands. 484 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 485 // FIXME: Code duplication: SETCC has custom operation action, see 486 // ARMTargetLowering::addTypeForNEON method for details. 487 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 488 // FIXME: Create unittest for FNEG and for FABS. 489 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 490 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 491 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 492 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 493 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 494 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 495 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 496 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 497 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 498 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 499 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 500 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 501 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 502 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 503 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 504 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 505 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 506 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 507 508 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 509 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 510 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 511 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 512 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 513 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 514 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 515 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 516 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 517 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 518 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 519 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 520 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 521 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 522 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 523 524 // Neon does not support some operations on v1i64 and v2i64 types. 525 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 526 // Custom handling for some quad-vector types to detect VMULL. 527 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 528 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 529 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 530 // Custom handling for some vector types to avoid expensive expansions 531 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 532 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 533 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 534 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 535 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 536 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 537 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 538 // a destination type that is wider than the source, and nor does 539 // it have a FP_TO_[SU]INT instruction with a narrower destination than 540 // source. 541 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 542 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 543 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 544 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 545 546 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 547 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 548 549 setTargetDAGCombine(ISD::INTRINSIC_VOID); 550 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 551 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 552 setTargetDAGCombine(ISD::SHL); 553 setTargetDAGCombine(ISD::SRL); 554 setTargetDAGCombine(ISD::SRA); 555 setTargetDAGCombine(ISD::SIGN_EXTEND); 556 setTargetDAGCombine(ISD::ZERO_EXTEND); 557 setTargetDAGCombine(ISD::ANY_EXTEND); 558 setTargetDAGCombine(ISD::SELECT_CC); 559 setTargetDAGCombine(ISD::BUILD_VECTOR); 560 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 561 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 562 setTargetDAGCombine(ISD::STORE); 563 setTargetDAGCombine(ISD::FP_TO_SINT); 564 setTargetDAGCombine(ISD::FP_TO_UINT); 565 setTargetDAGCombine(ISD::FDIV); 566 567 // It is legal to extload from v4i8 to v4i16 or v4i32. 568 MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8, 569 MVT::v4i16, MVT::v2i16, 570 MVT::v2i32}; 571 for (unsigned i = 0; i < 6; ++i) { 572 setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal); 573 setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal); 574 setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal); 575 } 576 } 577 578 // ARM and Thumb2 support UMLAL/SMLAL. 579 if (!Subtarget->isThumb1Only()) 580 setTargetDAGCombine(ISD::ADDC); 581 582 583 computeRegisterProperties(); 584 585 // ARM does not have f32 extending load. 586 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 587 588 // ARM does not have i1 sign extending load. 589 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 590 591 // ARM supports all 4 flavors of integer indexed load / store. 592 if (!Subtarget->isThumb1Only()) { 593 for (unsigned im = (unsigned)ISD::PRE_INC; 594 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 595 setIndexedLoadAction(im, MVT::i1, Legal); 596 setIndexedLoadAction(im, MVT::i8, Legal); 597 setIndexedLoadAction(im, MVT::i16, Legal); 598 setIndexedLoadAction(im, MVT::i32, Legal); 599 setIndexedStoreAction(im, MVT::i1, Legal); 600 setIndexedStoreAction(im, MVT::i8, Legal); 601 setIndexedStoreAction(im, MVT::i16, Legal); 602 setIndexedStoreAction(im, MVT::i32, Legal); 603 } 604 } 605 606 // i64 operation support. 607 setOperationAction(ISD::MUL, MVT::i64, Expand); 608 setOperationAction(ISD::MULHU, MVT::i32, Expand); 609 if (Subtarget->isThumb1Only()) { 610 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 611 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 612 } 613 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 614 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 615 setOperationAction(ISD::MULHS, MVT::i32, Expand); 616 617 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 618 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 619 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 620 setOperationAction(ISD::SRL, MVT::i64, Custom); 621 setOperationAction(ISD::SRA, MVT::i64, Custom); 622 623 if (!Subtarget->isThumb1Only()) { 624 // FIXME: We should do this for Thumb1 as well. 625 setOperationAction(ISD::ADDC, MVT::i32, Custom); 626 setOperationAction(ISD::ADDE, MVT::i32, Custom); 627 setOperationAction(ISD::SUBC, MVT::i32, Custom); 628 setOperationAction(ISD::SUBE, MVT::i32, Custom); 629 } 630 631 // ARM does not have ROTL. 632 setOperationAction(ISD::ROTL, MVT::i32, Expand); 633 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 634 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 635 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 636 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 637 638 // These just redirect to CTTZ and CTLZ on ARM. 639 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand); 640 setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand); 641 642 // Only ARMv6 has BSWAP. 643 if (!Subtarget->hasV6Ops()) 644 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 645 646 if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) && 647 !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) { 648 // These are expanded into libcalls if the cpu doesn't have HW divider. 649 setOperationAction(ISD::SDIV, MVT::i32, Expand); 650 setOperationAction(ISD::UDIV, MVT::i32, Expand); 651 } 652 setOperationAction(ISD::SREM, MVT::i32, Expand); 653 setOperationAction(ISD::UREM, MVT::i32, Expand); 654 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 655 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 656 657 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 658 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 659 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 660 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 661 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 662 663 setOperationAction(ISD::TRAP, MVT::Other, Legal); 664 665 // Use the default implementation. 666 setOperationAction(ISD::VASTART, MVT::Other, Custom); 667 setOperationAction(ISD::VAARG, MVT::Other, Expand); 668 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 669 setOperationAction(ISD::VAEND, MVT::Other, Expand); 670 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 671 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 672 673 if (!Subtarget->isTargetDarwin()) { 674 // Non-Darwin platforms may return values in these registers via the 675 // personality function. 676 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 677 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 678 setExceptionPointerRegister(ARM::R0); 679 setExceptionSelectorRegister(ARM::R1); 680 } 681 682 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 683 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 684 // the default expansion. 685 // FIXME: This should be checking for v6k, not just v6. 686 if (Subtarget->hasDataBarrier() || 687 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 688 // membarrier needs custom lowering; the rest are legal and handled 689 // normally. 690 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 691 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 692 // Custom lowering for 64-bit ops 693 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 694 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 695 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 696 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 697 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 698 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 699 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 700 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 701 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 702 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 703 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 704 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 705 setInsertFencesForAtomic(true); 706 } else { 707 // Set them all for expansion, which will force libcalls. 708 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 709 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 710 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 711 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 712 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 713 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 714 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 715 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 716 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 717 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 718 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 719 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 720 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 721 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 722 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 723 // Unordered/Monotonic case. 724 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 725 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 726 // Since the libcalls include locking, fold in the fences 727 setShouldFoldAtomicFences(true); 728 } 729 730 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 731 732 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 733 if (!Subtarget->hasV6Ops()) { 734 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 735 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 736 } 737 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 738 739 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 740 !Subtarget->isThumb1Only()) { 741 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 742 // iff target supports vfp2. 743 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 744 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 745 } 746 747 // We want to custom lower some of our intrinsics. 748 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 749 if (Subtarget->isTargetDarwin()) { 750 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 751 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 752 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 753 } 754 755 setOperationAction(ISD::SETCC, MVT::i32, Expand); 756 setOperationAction(ISD::SETCC, MVT::f32, Expand); 757 setOperationAction(ISD::SETCC, MVT::f64, Expand); 758 setOperationAction(ISD::SELECT, MVT::i32, Custom); 759 setOperationAction(ISD::SELECT, MVT::f32, Custom); 760 setOperationAction(ISD::SELECT, MVT::f64, Custom); 761 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 762 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 763 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 764 765 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 766 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 767 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 768 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 769 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 770 771 // We don't support sin/cos/fmod/copysign/pow 772 setOperationAction(ISD::FSIN, MVT::f64, Expand); 773 setOperationAction(ISD::FSIN, MVT::f32, Expand); 774 setOperationAction(ISD::FCOS, MVT::f32, Expand); 775 setOperationAction(ISD::FCOS, MVT::f64, Expand); 776 setOperationAction(ISD::FREM, MVT::f64, Expand); 777 setOperationAction(ISD::FREM, MVT::f32, Expand); 778 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 779 !Subtarget->isThumb1Only()) { 780 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 781 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 782 } 783 setOperationAction(ISD::FPOW, MVT::f64, Expand); 784 setOperationAction(ISD::FPOW, MVT::f32, Expand); 785 786 if (!Subtarget->hasVFP4()) { 787 setOperationAction(ISD::FMA, MVT::f64, Expand); 788 setOperationAction(ISD::FMA, MVT::f32, Expand); 789 } 790 791 // Various VFP goodness 792 if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) { 793 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 794 if (Subtarget->hasVFP2()) { 795 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 796 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 797 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 798 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 799 } 800 // Special handling for half-precision FP. 801 if (!Subtarget->hasFP16()) { 802 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 803 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 804 } 805 } 806 807 // We have target-specific dag combine patterns for the following nodes: 808 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 809 setTargetDAGCombine(ISD::ADD); 810 setTargetDAGCombine(ISD::SUB); 811 setTargetDAGCombine(ISD::MUL); 812 setTargetDAGCombine(ISD::AND); 813 setTargetDAGCombine(ISD::OR); 814 setTargetDAGCombine(ISD::XOR); 815 816 if (Subtarget->hasV6Ops()) 817 setTargetDAGCombine(ISD::SRL); 818 819 setStackPointerRegisterToSaveRestore(ARM::SP); 820 821 if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() || 822 !Subtarget->hasVFP2()) 823 setSchedulingPreference(Sched::RegPressure); 824 else 825 setSchedulingPreference(Sched::Hybrid); 826 827 //// temporary - rewrite interface to use type 828 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 829 maxStoresPerMemset = 16; 830 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 831 832 // On ARM arguments smaller than 4 bytes are extended, so all arguments 833 // are at least 4 bytes aligned. 834 setMinStackArgumentAlignment(4); 835 836 benefitFromCodePlacementOpt = true; 837 838 // Prefer likely predicted branches to selects on out-of-order cores. 839 predictableSelectIsExpensive = Subtarget->isLikeA9(); 840 841 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 842} 843 844// FIXME: It might make sense to define the representative register class as the 845// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 846// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 847// SPR's representative would be DPR_VFP2. This should work well if register 848// pressure tracking were modified such that a register use would increment the 849// pressure of the register class's representative and all of it's super 850// classes' representatives transitively. We have not implemented this because 851// of the difficulty prior to coalescing of modeling operand register classes 852// due to the common occurrence of cross class copies and subregister insertions 853// and extractions. 854std::pair<const TargetRegisterClass*, uint8_t> 855ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 856 const TargetRegisterClass *RRC = 0; 857 uint8_t Cost = 1; 858 switch (VT.getSimpleVT().SimpleTy) { 859 default: 860 return TargetLowering::findRepresentativeClass(VT); 861 // Use DPR as representative register class for all floating point 862 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 863 // the cost is 1 for both f32 and f64. 864 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 865 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 866 RRC = &ARM::DPRRegClass; 867 // When NEON is used for SP, only half of the register file is available 868 // because operations that define both SP and DP results will be constrained 869 // to the VFP2 class (D0-D15). We currently model this constraint prior to 870 // coalescing by double-counting the SP regs. See the FIXME above. 871 if (Subtarget->useNEONForSinglePrecisionFP()) 872 Cost = 2; 873 break; 874 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 875 case MVT::v4f32: case MVT::v2f64: 876 RRC = &ARM::DPRRegClass; 877 Cost = 2; 878 break; 879 case MVT::v4i64: 880 RRC = &ARM::DPRRegClass; 881 Cost = 4; 882 break; 883 case MVT::v8i64: 884 RRC = &ARM::DPRRegClass; 885 Cost = 8; 886 break; 887 } 888 return std::make_pair(RRC, Cost); 889} 890 891const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 892 switch (Opcode) { 893 default: return 0; 894 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 895 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 896 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 897 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 898 case ARMISD::CALL: return "ARMISD::CALL"; 899 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 900 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 901 case ARMISD::tCALL: return "ARMISD::tCALL"; 902 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 903 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 904 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 905 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 906 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 907 case ARMISD::CMP: return "ARMISD::CMP"; 908 case ARMISD::CMN: return "ARMISD::CMN"; 909 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 910 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 911 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 912 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 913 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 914 915 case ARMISD::CMOV: return "ARMISD::CMOV"; 916 917 case ARMISD::RBIT: return "ARMISD::RBIT"; 918 919 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 920 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 921 case ARMISD::SITOF: return "ARMISD::SITOF"; 922 case ARMISD::UITOF: return "ARMISD::UITOF"; 923 924 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 925 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 926 case ARMISD::RRX: return "ARMISD::RRX"; 927 928 case ARMISD::ADDC: return "ARMISD::ADDC"; 929 case ARMISD::ADDE: return "ARMISD::ADDE"; 930 case ARMISD::SUBC: return "ARMISD::SUBC"; 931 case ARMISD::SUBE: return "ARMISD::SUBE"; 932 933 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 934 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 935 936 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 937 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 938 939 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 940 941 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 942 943 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 944 945 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 946 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 947 948 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 949 950 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 951 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 952 case ARMISD::VCGE: return "ARMISD::VCGE"; 953 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 954 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 955 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 956 case ARMISD::VCGT: return "ARMISD::VCGT"; 957 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 958 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 959 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 960 case ARMISD::VTST: return "ARMISD::VTST"; 961 962 case ARMISD::VSHL: return "ARMISD::VSHL"; 963 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 964 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 965 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 966 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 967 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 968 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 969 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 970 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 971 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 972 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 973 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 974 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 975 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 976 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 977 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 978 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 979 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 980 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 981 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 982 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 983 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 984 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 985 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 986 case ARMISD::VDUP: return "ARMISD::VDUP"; 987 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 988 case ARMISD::VEXT: return "ARMISD::VEXT"; 989 case ARMISD::VREV64: return "ARMISD::VREV64"; 990 case ARMISD::VREV32: return "ARMISD::VREV32"; 991 case ARMISD::VREV16: return "ARMISD::VREV16"; 992 case ARMISD::VZIP: return "ARMISD::VZIP"; 993 case ARMISD::VUZP: return "ARMISD::VUZP"; 994 case ARMISD::VTRN: return "ARMISD::VTRN"; 995 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 996 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 997 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 998 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 999 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1000 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1001 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1002 case ARMISD::FMAX: return "ARMISD::FMAX"; 1003 case ARMISD::FMIN: return "ARMISD::FMIN"; 1004 case ARMISD::BFI: return "ARMISD::BFI"; 1005 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1006 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1007 case ARMISD::VBSL: return "ARMISD::VBSL"; 1008 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1009 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1010 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1011 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1012 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1013 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1014 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1015 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1016 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1017 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1018 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1019 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1020 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1021 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1022 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1023 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1024 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1025 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1026 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1027 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1028 } 1029} 1030 1031EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 1032 if (!VT.isVector()) return getPointerTy(); 1033 return VT.changeVectorElementTypeToInteger(); 1034} 1035 1036/// getRegClassFor - Return the register class that should be used for the 1037/// specified value type. 1038const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 1039 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1040 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1041 // load / store 4 to 8 consecutive D registers. 1042 if (Subtarget->hasNEON()) { 1043 if (VT == MVT::v4i64) 1044 return &ARM::QQPRRegClass; 1045 if (VT == MVT::v8i64) 1046 return &ARM::QQQQPRRegClass; 1047 } 1048 return TargetLowering::getRegClassFor(VT); 1049} 1050 1051// Create a fast isel object. 1052FastISel * 1053ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1054 const TargetLibraryInfo *libInfo) const { 1055 return ARM::createFastISel(funcInfo, libInfo); 1056} 1057 1058/// getMaximalGlobalOffset - Returns the maximal possible offset which can 1059/// be used for loads / stores from the global. 1060unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 1061 return (Subtarget->isThumb1Only() ? 127 : 4095); 1062} 1063 1064Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1065 unsigned NumVals = N->getNumValues(); 1066 if (!NumVals) 1067 return Sched::RegPressure; 1068 1069 for (unsigned i = 0; i != NumVals; ++i) { 1070 EVT VT = N->getValueType(i); 1071 if (VT == MVT::Glue || VT == MVT::Other) 1072 continue; 1073 if (VT.isFloatingPoint() || VT.isVector()) 1074 return Sched::ILP; 1075 } 1076 1077 if (!N->isMachineOpcode()) 1078 return Sched::RegPressure; 1079 1080 // Load are scheduled for latency even if there instruction itinerary 1081 // is not available. 1082 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1083 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1084 1085 if (MCID.getNumDefs() == 0) 1086 return Sched::RegPressure; 1087 if (!Itins->isEmpty() && 1088 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1089 return Sched::ILP; 1090 1091 return Sched::RegPressure; 1092} 1093 1094//===----------------------------------------------------------------------===// 1095// Lowering Code 1096//===----------------------------------------------------------------------===// 1097 1098/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1099static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1100 switch (CC) { 1101 default: llvm_unreachable("Unknown condition code!"); 1102 case ISD::SETNE: return ARMCC::NE; 1103 case ISD::SETEQ: return ARMCC::EQ; 1104 case ISD::SETGT: return ARMCC::GT; 1105 case ISD::SETGE: return ARMCC::GE; 1106 case ISD::SETLT: return ARMCC::LT; 1107 case ISD::SETLE: return ARMCC::LE; 1108 case ISD::SETUGT: return ARMCC::HI; 1109 case ISD::SETUGE: return ARMCC::HS; 1110 case ISD::SETULT: return ARMCC::LO; 1111 case ISD::SETULE: return ARMCC::LS; 1112 } 1113} 1114 1115/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1116static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1117 ARMCC::CondCodes &CondCode2) { 1118 CondCode2 = ARMCC::AL; 1119 switch (CC) { 1120 default: llvm_unreachable("Unknown FP condition!"); 1121 case ISD::SETEQ: 1122 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1123 case ISD::SETGT: 1124 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1125 case ISD::SETGE: 1126 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1127 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1128 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1129 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1130 case ISD::SETO: CondCode = ARMCC::VC; break; 1131 case ISD::SETUO: CondCode = ARMCC::VS; break; 1132 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1133 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1134 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1135 case ISD::SETLT: 1136 case ISD::SETULT: CondCode = ARMCC::LT; break; 1137 case ISD::SETLE: 1138 case ISD::SETULE: CondCode = ARMCC::LE; break; 1139 case ISD::SETNE: 1140 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1141 } 1142} 1143 1144//===----------------------------------------------------------------------===// 1145// Calling Convention Implementation 1146//===----------------------------------------------------------------------===// 1147 1148#include "ARMGenCallingConv.inc" 1149 1150/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1151/// given CallingConvention value. 1152CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1153 bool Return, 1154 bool isVarArg) const { 1155 switch (CC) { 1156 default: 1157 llvm_unreachable("Unsupported calling convention"); 1158 case CallingConv::Fast: 1159 if (Subtarget->hasVFP2() && !isVarArg) { 1160 if (!Subtarget->isAAPCS_ABI()) 1161 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1162 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1163 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1164 } 1165 // Fallthrough 1166 case CallingConv::C: { 1167 // Use target triple & subtarget features to do actual dispatch. 1168 if (!Subtarget->isAAPCS_ABI()) 1169 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1170 else if (Subtarget->hasVFP2() && 1171 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1172 !isVarArg) 1173 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1174 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1175 } 1176 case CallingConv::ARM_AAPCS_VFP: 1177 if (!isVarArg) 1178 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1179 // Fallthrough 1180 case CallingConv::ARM_AAPCS: 1181 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1182 case CallingConv::ARM_APCS: 1183 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1184 case CallingConv::GHC: 1185 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1186 } 1187} 1188 1189/// LowerCallResult - Lower the result values of a call into the 1190/// appropriate copies out of appropriate physical registers. 1191SDValue 1192ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1193 CallingConv::ID CallConv, bool isVarArg, 1194 const SmallVectorImpl<ISD::InputArg> &Ins, 1195 DebugLoc dl, SelectionDAG &DAG, 1196 SmallVectorImpl<SDValue> &InVals) const { 1197 1198 // Assign locations to each value returned by this call. 1199 SmallVector<CCValAssign, 16> RVLocs; 1200 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1201 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1202 CCInfo.AnalyzeCallResult(Ins, 1203 CCAssignFnForNode(CallConv, /* Return*/ true, 1204 isVarArg)); 1205 1206 // Copy all of the result registers out of their specified physreg. 1207 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1208 CCValAssign VA = RVLocs[i]; 1209 1210 SDValue Val; 1211 if (VA.needsCustom()) { 1212 // Handle f64 or half of a v2f64. 1213 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1214 InFlag); 1215 Chain = Lo.getValue(1); 1216 InFlag = Lo.getValue(2); 1217 VA = RVLocs[++i]; // skip ahead to next loc 1218 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1219 InFlag); 1220 Chain = Hi.getValue(1); 1221 InFlag = Hi.getValue(2); 1222 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1223 1224 if (VA.getLocVT() == MVT::v2f64) { 1225 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1226 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1227 DAG.getConstant(0, MVT::i32)); 1228 1229 VA = RVLocs[++i]; // skip ahead to next loc 1230 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1231 Chain = Lo.getValue(1); 1232 InFlag = Lo.getValue(2); 1233 VA = RVLocs[++i]; // skip ahead to next loc 1234 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1235 Chain = Hi.getValue(1); 1236 InFlag = Hi.getValue(2); 1237 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1238 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1239 DAG.getConstant(1, MVT::i32)); 1240 } 1241 } else { 1242 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1243 InFlag); 1244 Chain = Val.getValue(1); 1245 InFlag = Val.getValue(2); 1246 } 1247 1248 switch (VA.getLocInfo()) { 1249 default: llvm_unreachable("Unknown loc info!"); 1250 case CCValAssign::Full: break; 1251 case CCValAssign::BCvt: 1252 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1253 break; 1254 } 1255 1256 InVals.push_back(Val); 1257 } 1258 1259 return Chain; 1260} 1261 1262/// LowerMemOpCallTo - Store the argument to the stack. 1263SDValue 1264ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1265 SDValue StackPtr, SDValue Arg, 1266 DebugLoc dl, SelectionDAG &DAG, 1267 const CCValAssign &VA, 1268 ISD::ArgFlagsTy Flags) const { 1269 unsigned LocMemOffset = VA.getLocMemOffset(); 1270 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1271 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1272 return DAG.getStore(Chain, dl, Arg, PtrOff, 1273 MachinePointerInfo::getStack(LocMemOffset), 1274 false, false, 0); 1275} 1276 1277void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1278 SDValue Chain, SDValue &Arg, 1279 RegsToPassVector &RegsToPass, 1280 CCValAssign &VA, CCValAssign &NextVA, 1281 SDValue &StackPtr, 1282 SmallVector<SDValue, 8> &MemOpChains, 1283 ISD::ArgFlagsTy Flags) const { 1284 1285 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1286 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1287 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1288 1289 if (NextVA.isRegLoc()) 1290 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1291 else { 1292 assert(NextVA.isMemLoc()); 1293 if (StackPtr.getNode() == 0) 1294 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1295 1296 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1297 dl, DAG, NextVA, 1298 Flags)); 1299 } 1300} 1301 1302/// LowerCall - Lowering a call into a callseq_start <- 1303/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1304/// nodes. 1305SDValue 1306ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1307 SmallVectorImpl<SDValue> &InVals) const { 1308 SelectionDAG &DAG = CLI.DAG; 1309 DebugLoc &dl = CLI.DL; 1310 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 1311 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 1312 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 1313 SDValue Chain = CLI.Chain; 1314 SDValue Callee = CLI.Callee; 1315 bool &isTailCall = CLI.IsTailCall; 1316 CallingConv::ID CallConv = CLI.CallConv; 1317 bool doesNotRet = CLI.DoesNotReturn; 1318 bool isVarArg = CLI.IsVarArg; 1319 1320 MachineFunction &MF = DAG.getMachineFunction(); 1321 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1322 bool IsSibCall = false; 1323 // Disable tail calls if they're not supported. 1324 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 1325 isTailCall = false; 1326 if (isTailCall) { 1327 // Check if it's really possible to do a tail call. 1328 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1329 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1330 Outs, OutVals, Ins, DAG); 1331 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1332 // detected sibcalls. 1333 if (isTailCall) { 1334 ++NumTailCalls; 1335 IsSibCall = true; 1336 } 1337 } 1338 1339 // Analyze operands of the call, assigning locations to each operand. 1340 SmallVector<CCValAssign, 16> ArgLocs; 1341 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1342 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1343 CCInfo.AnalyzeCallOperands(Outs, 1344 CCAssignFnForNode(CallConv, /* Return*/ false, 1345 isVarArg)); 1346 1347 // Get a count of how many bytes are to be pushed on the stack. 1348 unsigned NumBytes = CCInfo.getNextStackOffset(); 1349 1350 // For tail calls, memory operands are available in our caller's stack. 1351 if (IsSibCall) 1352 NumBytes = 0; 1353 1354 // Adjust the stack pointer for the new arguments... 1355 // These operations are automatically eliminated by the prolog/epilog pass 1356 if (!IsSibCall) 1357 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1358 1359 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1360 1361 RegsToPassVector RegsToPass; 1362 SmallVector<SDValue, 8> MemOpChains; 1363 1364 // Walk the register/memloc assignments, inserting copies/loads. In the case 1365 // of tail call optimization, arguments are handled later. 1366 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1367 i != e; 1368 ++i, ++realArgIdx) { 1369 CCValAssign &VA = ArgLocs[i]; 1370 SDValue Arg = OutVals[realArgIdx]; 1371 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1372 bool isByVal = Flags.isByVal(); 1373 1374 // Promote the value if needed. 1375 switch (VA.getLocInfo()) { 1376 default: llvm_unreachable("Unknown loc info!"); 1377 case CCValAssign::Full: break; 1378 case CCValAssign::SExt: 1379 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1380 break; 1381 case CCValAssign::ZExt: 1382 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1383 break; 1384 case CCValAssign::AExt: 1385 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1386 break; 1387 case CCValAssign::BCvt: 1388 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1389 break; 1390 } 1391 1392 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1393 if (VA.needsCustom()) { 1394 if (VA.getLocVT() == MVT::v2f64) { 1395 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1396 DAG.getConstant(0, MVT::i32)); 1397 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1398 DAG.getConstant(1, MVT::i32)); 1399 1400 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1401 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1402 1403 VA = ArgLocs[++i]; // skip ahead to next loc 1404 if (VA.isRegLoc()) { 1405 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1406 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1407 } else { 1408 assert(VA.isMemLoc()); 1409 1410 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1411 dl, DAG, VA, Flags)); 1412 } 1413 } else { 1414 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1415 StackPtr, MemOpChains, Flags); 1416 } 1417 } else if (VA.isRegLoc()) { 1418 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1419 } else if (isByVal) { 1420 assert(VA.isMemLoc()); 1421 unsigned offset = 0; 1422 1423 // True if this byval aggregate will be split between registers 1424 // and memory. 1425 if (CCInfo.isFirstByValRegValid()) { 1426 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1427 unsigned int i, j; 1428 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1429 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1430 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1431 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1432 MachinePointerInfo(), 1433 false, false, false, 0); 1434 MemOpChains.push_back(Load.getValue(1)); 1435 RegsToPass.push_back(std::make_pair(j, Load)); 1436 } 1437 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1438 CCInfo.clearFirstByValReg(); 1439 } 1440 1441 if (Flags.getByValSize() - 4*offset > 0) { 1442 unsigned LocMemOffset = VA.getLocMemOffset(); 1443 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1444 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1445 StkPtrOff); 1446 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1447 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1448 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1449 MVT::i32); 1450 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32); 1451 1452 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1453 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1454 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1455 Ops, array_lengthof(Ops))); 1456 } 1457 } else if (!IsSibCall) { 1458 assert(VA.isMemLoc()); 1459 1460 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1461 dl, DAG, VA, Flags)); 1462 } 1463 } 1464 1465 if (!MemOpChains.empty()) 1466 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1467 &MemOpChains[0], MemOpChains.size()); 1468 1469 // Build a sequence of copy-to-reg nodes chained together with token chain 1470 // and flag operands which copy the outgoing args into the appropriate regs. 1471 SDValue InFlag; 1472 // Tail call byval lowering might overwrite argument registers so in case of 1473 // tail call optimization the copies to registers are lowered later. 1474 if (!isTailCall) 1475 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1476 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1477 RegsToPass[i].second, InFlag); 1478 InFlag = Chain.getValue(1); 1479 } 1480 1481 // For tail calls lower the arguments to the 'real' stack slot. 1482 if (isTailCall) { 1483 // Force all the incoming stack arguments to be loaded from the stack 1484 // before any new outgoing arguments are stored to the stack, because the 1485 // outgoing stack slots may alias the incoming argument stack slots, and 1486 // the alias isn't otherwise explicit. This is slightly more conservative 1487 // than necessary, because it means that each store effectively depends 1488 // on every argument instead of just those arguments it would clobber. 1489 1490 // Do not flag preceding copytoreg stuff together with the following stuff. 1491 InFlag = SDValue(); 1492 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1493 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1494 RegsToPass[i].second, InFlag); 1495 InFlag = Chain.getValue(1); 1496 } 1497 InFlag =SDValue(); 1498 } 1499 1500 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1501 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1502 // node so that legalize doesn't hack it. 1503 bool isDirect = false; 1504 bool isARMFunc = false; 1505 bool isLocalARMFunc = false; 1506 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1507 1508 if (EnableARMLongCalls) { 1509 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1510 && "long-calls with non-static relocation model!"); 1511 // Handle a global address or an external symbol. If it's not one of 1512 // those, the target's already in a register, so we don't need to do 1513 // anything extra. 1514 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1515 const GlobalValue *GV = G->getGlobal(); 1516 // Create a constant pool entry for the callee address 1517 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1518 ARMConstantPoolValue *CPV = 1519 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1520 1521 // Get the address of the callee into a register 1522 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1523 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1524 Callee = DAG.getLoad(getPointerTy(), dl, 1525 DAG.getEntryNode(), CPAddr, 1526 MachinePointerInfo::getConstantPool(), 1527 false, false, false, 0); 1528 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1529 const char *Sym = S->getSymbol(); 1530 1531 // Create a constant pool entry for the callee address 1532 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1533 ARMConstantPoolValue *CPV = 1534 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1535 ARMPCLabelIndex, 0); 1536 // Get the address of the callee into a register 1537 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1538 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1539 Callee = DAG.getLoad(getPointerTy(), dl, 1540 DAG.getEntryNode(), CPAddr, 1541 MachinePointerInfo::getConstantPool(), 1542 false, false, false, 0); 1543 } 1544 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1545 const GlobalValue *GV = G->getGlobal(); 1546 isDirect = true; 1547 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1548 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1549 getTargetMachine().getRelocationModel() != Reloc::Static; 1550 isARMFunc = !Subtarget->isThumb() || isStub; 1551 // ARM call to a local ARM function is predicable. 1552 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1553 // tBX takes a register source operand. 1554 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1555 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1556 ARMConstantPoolValue *CPV = 1557 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1558 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1559 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1560 Callee = DAG.getLoad(getPointerTy(), dl, 1561 DAG.getEntryNode(), CPAddr, 1562 MachinePointerInfo::getConstantPool(), 1563 false, false, false, 0); 1564 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1565 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1566 getPointerTy(), Callee, PICLabel); 1567 } else { 1568 // On ELF targets for PIC code, direct calls should go through the PLT 1569 unsigned OpFlags = 0; 1570 if (Subtarget->isTargetELF() && 1571 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1572 OpFlags = ARMII::MO_PLT; 1573 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1574 } 1575 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1576 isDirect = true; 1577 bool isStub = Subtarget->isTargetDarwin() && 1578 getTargetMachine().getRelocationModel() != Reloc::Static; 1579 isARMFunc = !Subtarget->isThumb() || isStub; 1580 // tBX takes a register source operand. 1581 const char *Sym = S->getSymbol(); 1582 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1583 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1584 ARMConstantPoolValue *CPV = 1585 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1586 ARMPCLabelIndex, 4); 1587 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1588 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1589 Callee = DAG.getLoad(getPointerTy(), dl, 1590 DAG.getEntryNode(), CPAddr, 1591 MachinePointerInfo::getConstantPool(), 1592 false, false, false, 0); 1593 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1594 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1595 getPointerTy(), Callee, PICLabel); 1596 } else { 1597 unsigned OpFlags = 0; 1598 // On ELF targets for PIC code, direct calls should go through the PLT 1599 if (Subtarget->isTargetELF() && 1600 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1601 OpFlags = ARMII::MO_PLT; 1602 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1603 } 1604 } 1605 1606 // FIXME: handle tail calls differently. 1607 unsigned CallOpc; 1608 bool HasMinSizeAttr = MF.getFunction()->getFnAttributes(). 1609 hasAttribute(Attributes::MinSize); 1610 if (Subtarget->isThumb()) { 1611 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1612 CallOpc = ARMISD::CALL_NOLINK; 1613 else 1614 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1615 } else { 1616 if (!isDirect && !Subtarget->hasV5TOps()) 1617 CallOpc = ARMISD::CALL_NOLINK; 1618 else if (doesNotRet && isDirect && Subtarget->hasRAS() && 1619 // Emit regular call when code size is the priority 1620 !HasMinSizeAttr) 1621 // "mov lr, pc; b _foo" to avoid confusing the RSP 1622 CallOpc = ARMISD::CALL_NOLINK; 1623 else 1624 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 1625 } 1626 1627 std::vector<SDValue> Ops; 1628 Ops.push_back(Chain); 1629 Ops.push_back(Callee); 1630 1631 // Add argument registers to the end of the list so that they are known live 1632 // into the call. 1633 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1634 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1635 RegsToPass[i].second.getValueType())); 1636 1637 // Add a register mask operand representing the call-preserved registers. 1638 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1639 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 1640 assert(Mask && "Missing call preserved mask for calling convention"); 1641 Ops.push_back(DAG.getRegisterMask(Mask)); 1642 1643 if (InFlag.getNode()) 1644 Ops.push_back(InFlag); 1645 1646 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1647 if (isTailCall) 1648 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1649 1650 // Returns a chain and a flag for retval copy to use. 1651 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1652 InFlag = Chain.getValue(1); 1653 1654 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1655 DAG.getIntPtrConstant(0, true), InFlag); 1656 if (!Ins.empty()) 1657 InFlag = Chain.getValue(1); 1658 1659 // Handle result values, copying them out of physregs into vregs that we 1660 // return. 1661 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1662 dl, DAG, InVals); 1663} 1664 1665/// HandleByVal - Every parameter *after* a byval parameter is passed 1666/// on the stack. Remember the next parameter register to allocate, 1667/// and then confiscate the rest of the parameter registers to insure 1668/// this. 1669void 1670ARMTargetLowering::HandleByVal( 1671 CCState *State, unsigned &size, unsigned Align) const { 1672 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1673 assert((State->getCallOrPrologue() == Prologue || 1674 State->getCallOrPrologue() == Call) && 1675 "unhandled ParmContext"); 1676 if ((!State->isFirstByValRegValid()) && 1677 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1678 if (Subtarget->isAAPCS_ABI() && Align > 4) { 1679 unsigned AlignInRegs = Align / 4; 1680 unsigned Waste = (ARM::R4 - reg) % AlignInRegs; 1681 for (unsigned i = 0; i < Waste; ++i) 1682 reg = State->AllocateReg(GPRArgRegs, 4); 1683 } 1684 if (reg != 0) { 1685 State->setFirstByValReg(reg); 1686 // At a call site, a byval parameter that is split between 1687 // registers and memory needs its size truncated here. In a 1688 // function prologue, such byval parameters are reassembled in 1689 // memory, and are not truncated. 1690 if (State->getCallOrPrologue() == Call) { 1691 unsigned excess = 4 * (ARM::R4 - reg); 1692 assert(size >= excess && "expected larger existing stack allocation"); 1693 size -= excess; 1694 } 1695 } 1696 } 1697 // Confiscate any remaining parameter registers to preclude their 1698 // assignment to subsequent parameters. 1699 while (State->AllocateReg(GPRArgRegs, 4)) 1700 ; 1701} 1702 1703/// MatchingStackOffset - Return true if the given stack call argument is 1704/// already available in the same position (relatively) of the caller's 1705/// incoming argument stack. 1706static 1707bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1708 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1709 const TargetInstrInfo *TII) { 1710 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1711 int FI = INT_MAX; 1712 if (Arg.getOpcode() == ISD::CopyFromReg) { 1713 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1714 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1715 return false; 1716 MachineInstr *Def = MRI->getVRegDef(VR); 1717 if (!Def) 1718 return false; 1719 if (!Flags.isByVal()) { 1720 if (!TII->isLoadFromStackSlot(Def, FI)) 1721 return false; 1722 } else { 1723 return false; 1724 } 1725 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1726 if (Flags.isByVal()) 1727 // ByVal argument is passed in as a pointer but it's now being 1728 // dereferenced. e.g. 1729 // define @foo(%struct.X* %A) { 1730 // tail call @bar(%struct.X* byval %A) 1731 // } 1732 return false; 1733 SDValue Ptr = Ld->getBasePtr(); 1734 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1735 if (!FINode) 1736 return false; 1737 FI = FINode->getIndex(); 1738 } else 1739 return false; 1740 1741 assert(FI != INT_MAX); 1742 if (!MFI->isFixedObjectIndex(FI)) 1743 return false; 1744 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1745} 1746 1747/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1748/// for tail call optimization. Targets which want to do tail call 1749/// optimization should implement this function. 1750bool 1751ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1752 CallingConv::ID CalleeCC, 1753 bool isVarArg, 1754 bool isCalleeStructRet, 1755 bool isCallerStructRet, 1756 const SmallVectorImpl<ISD::OutputArg> &Outs, 1757 const SmallVectorImpl<SDValue> &OutVals, 1758 const SmallVectorImpl<ISD::InputArg> &Ins, 1759 SelectionDAG& DAG) const { 1760 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1761 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1762 bool CCMatch = CallerCC == CalleeCC; 1763 1764 // Look for obvious safe cases to perform tail call optimization that do not 1765 // require ABI changes. This is what gcc calls sibcall. 1766 1767 // Do not sibcall optimize vararg calls unless the call site is not passing 1768 // any arguments. 1769 if (isVarArg && !Outs.empty()) 1770 return false; 1771 1772 // Also avoid sibcall optimization if either caller or callee uses struct 1773 // return semantics. 1774 if (isCalleeStructRet || isCallerStructRet) 1775 return false; 1776 1777 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1778 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1779 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1780 // support in the assembler and linker to be used. This would need to be 1781 // fixed to fully support tail calls in Thumb1. 1782 // 1783 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1784 // LR. This means if we need to reload LR, it takes an extra instructions, 1785 // which outweighs the value of the tail call; but here we don't know yet 1786 // whether LR is going to be used. Probably the right approach is to 1787 // generate the tail call here and turn it back into CALL/RET in 1788 // emitEpilogue if LR is used. 1789 1790 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1791 // but we need to make sure there are enough registers; the only valid 1792 // registers are the 4 used for parameters. We don't currently do this 1793 // case. 1794 if (Subtarget->isThumb1Only()) 1795 return false; 1796 1797 // If the calling conventions do not match, then we'd better make sure the 1798 // results are returned in the same way as what the caller expects. 1799 if (!CCMatch) { 1800 SmallVector<CCValAssign, 16> RVLocs1; 1801 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1802 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1803 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1804 1805 SmallVector<CCValAssign, 16> RVLocs2; 1806 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1807 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1808 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1809 1810 if (RVLocs1.size() != RVLocs2.size()) 1811 return false; 1812 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1813 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1814 return false; 1815 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1816 return false; 1817 if (RVLocs1[i].isRegLoc()) { 1818 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1819 return false; 1820 } else { 1821 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1822 return false; 1823 } 1824 } 1825 } 1826 1827 // If Caller's vararg or byval argument has been split between registers and 1828 // stack, do not perform tail call, since part of the argument is in caller's 1829 // local frame. 1830 const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). 1831 getInfo<ARMFunctionInfo>(); 1832 if (AFI_Caller->getVarArgsRegSaveSize()) 1833 return false; 1834 1835 // If the callee takes no arguments then go on to check the results of the 1836 // call. 1837 if (!Outs.empty()) { 1838 // Check if stack adjustment is needed. For now, do not do this if any 1839 // argument is passed on the stack. 1840 SmallVector<CCValAssign, 16> ArgLocs; 1841 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1842 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1843 CCInfo.AnalyzeCallOperands(Outs, 1844 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1845 if (CCInfo.getNextStackOffset()) { 1846 MachineFunction &MF = DAG.getMachineFunction(); 1847 1848 // Check if the arguments are already laid out in the right way as 1849 // the caller's fixed stack objects. 1850 MachineFrameInfo *MFI = MF.getFrameInfo(); 1851 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1852 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1853 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1854 i != e; 1855 ++i, ++realArgIdx) { 1856 CCValAssign &VA = ArgLocs[i]; 1857 EVT RegVT = VA.getLocVT(); 1858 SDValue Arg = OutVals[realArgIdx]; 1859 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1860 if (VA.getLocInfo() == CCValAssign::Indirect) 1861 return false; 1862 if (VA.needsCustom()) { 1863 // f64 and vector types are split into multiple registers or 1864 // register/stack-slot combinations. The types will not match 1865 // the registers; give up on memory f64 refs until we figure 1866 // out what to do about this. 1867 if (!VA.isRegLoc()) 1868 return false; 1869 if (!ArgLocs[++i].isRegLoc()) 1870 return false; 1871 if (RegVT == MVT::v2f64) { 1872 if (!ArgLocs[++i].isRegLoc()) 1873 return false; 1874 if (!ArgLocs[++i].isRegLoc()) 1875 return false; 1876 } 1877 } else if (!VA.isRegLoc()) { 1878 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1879 MFI, MRI, TII)) 1880 return false; 1881 } 1882 } 1883 } 1884 } 1885 1886 return true; 1887} 1888 1889bool 1890ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1891 MachineFunction &MF, bool isVarArg, 1892 const SmallVectorImpl<ISD::OutputArg> &Outs, 1893 LLVMContext &Context) const { 1894 SmallVector<CCValAssign, 16> RVLocs; 1895 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 1896 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, 1897 isVarArg)); 1898} 1899 1900SDValue 1901ARMTargetLowering::LowerReturn(SDValue Chain, 1902 CallingConv::ID CallConv, bool isVarArg, 1903 const SmallVectorImpl<ISD::OutputArg> &Outs, 1904 const SmallVectorImpl<SDValue> &OutVals, 1905 DebugLoc dl, SelectionDAG &DAG) const { 1906 1907 // CCValAssign - represent the assignment of the return value to a location. 1908 SmallVector<CCValAssign, 16> RVLocs; 1909 1910 // CCState - Info about the registers and stack slots. 1911 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1912 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1913 1914 // Analyze outgoing return values. 1915 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1916 isVarArg)); 1917 1918 // If this is the first return lowered for this function, add 1919 // the regs to the liveout set for the function. 1920 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1921 for (unsigned i = 0; i != RVLocs.size(); ++i) 1922 if (RVLocs[i].isRegLoc()) 1923 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1924 } 1925 1926 SDValue Flag; 1927 1928 // Copy the result values into the output registers. 1929 for (unsigned i = 0, realRVLocIdx = 0; 1930 i != RVLocs.size(); 1931 ++i, ++realRVLocIdx) { 1932 CCValAssign &VA = RVLocs[i]; 1933 assert(VA.isRegLoc() && "Can only return in registers!"); 1934 1935 SDValue Arg = OutVals[realRVLocIdx]; 1936 1937 switch (VA.getLocInfo()) { 1938 default: llvm_unreachable("Unknown loc info!"); 1939 case CCValAssign::Full: break; 1940 case CCValAssign::BCvt: 1941 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1942 break; 1943 } 1944 1945 if (VA.needsCustom()) { 1946 if (VA.getLocVT() == MVT::v2f64) { 1947 // Extract the first half and return it in two registers. 1948 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1949 DAG.getConstant(0, MVT::i32)); 1950 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1951 DAG.getVTList(MVT::i32, MVT::i32), Half); 1952 1953 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1954 Flag = Chain.getValue(1); 1955 VA = RVLocs[++i]; // skip ahead to next loc 1956 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1957 HalfGPRs.getValue(1), Flag); 1958 Flag = Chain.getValue(1); 1959 VA = RVLocs[++i]; // skip ahead to next loc 1960 1961 // Extract the 2nd half and fall through to handle it as an f64 value. 1962 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1963 DAG.getConstant(1, MVT::i32)); 1964 } 1965 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1966 // available. 1967 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1968 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1969 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1970 Flag = Chain.getValue(1); 1971 VA = RVLocs[++i]; // skip ahead to next loc 1972 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1973 Flag); 1974 } else 1975 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1976 1977 // Guarantee that all emitted copies are 1978 // stuck together, avoiding something bad. 1979 Flag = Chain.getValue(1); 1980 } 1981 1982 SDValue result; 1983 if (Flag.getNode()) 1984 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1985 else // Return Void 1986 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1987 1988 return result; 1989} 1990 1991bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1992 if (N->getNumValues() != 1) 1993 return false; 1994 if (!N->hasNUsesOfValue(1, 0)) 1995 return false; 1996 1997 SDValue TCChain = Chain; 1998 SDNode *Copy = *N->use_begin(); 1999 if (Copy->getOpcode() == ISD::CopyToReg) { 2000 // If the copy has a glue operand, we conservatively assume it isn't safe to 2001 // perform a tail call. 2002 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2003 return false; 2004 TCChain = Copy->getOperand(0); 2005 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2006 SDNode *VMov = Copy; 2007 // f64 returned in a pair of GPRs. 2008 SmallPtrSet<SDNode*, 2> Copies; 2009 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2010 UI != UE; ++UI) { 2011 if (UI->getOpcode() != ISD::CopyToReg) 2012 return false; 2013 Copies.insert(*UI); 2014 } 2015 if (Copies.size() > 2) 2016 return false; 2017 2018 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2019 UI != UE; ++UI) { 2020 SDValue UseChain = UI->getOperand(0); 2021 if (Copies.count(UseChain.getNode())) 2022 // Second CopyToReg 2023 Copy = *UI; 2024 else 2025 // First CopyToReg 2026 TCChain = UseChain; 2027 } 2028 } else if (Copy->getOpcode() == ISD::BITCAST) { 2029 // f32 returned in a single GPR. 2030 if (!Copy->hasOneUse()) 2031 return false; 2032 Copy = *Copy->use_begin(); 2033 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2034 return false; 2035 Chain = Copy->getOperand(0); 2036 } else { 2037 return false; 2038 } 2039 2040 bool HasRet = false; 2041 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2042 UI != UE; ++UI) { 2043 if (UI->getOpcode() != ARMISD::RET_FLAG) 2044 return false; 2045 HasRet = true; 2046 } 2047 2048 if (!HasRet) 2049 return false; 2050 2051 Chain = TCChain; 2052 return true; 2053} 2054 2055bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2056 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 2057 return false; 2058 2059 if (!CI->isTailCall()) 2060 return false; 2061 2062 return !Subtarget->isThumb1Only(); 2063} 2064 2065// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2066// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2067// one of the above mentioned nodes. It has to be wrapped because otherwise 2068// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2069// be used to form addressing mode. These wrapped nodes will be selected 2070// into MOVi. 2071static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2072 EVT PtrVT = Op.getValueType(); 2073 // FIXME there is no actual debug info here 2074 DebugLoc dl = Op.getDebugLoc(); 2075 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2076 SDValue Res; 2077 if (CP->isMachineConstantPoolEntry()) 2078 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2079 CP->getAlignment()); 2080 else 2081 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2082 CP->getAlignment()); 2083 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2084} 2085 2086unsigned ARMTargetLowering::getJumpTableEncoding() const { 2087 return MachineJumpTableInfo::EK_Inline; 2088} 2089 2090SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2091 SelectionDAG &DAG) const { 2092 MachineFunction &MF = DAG.getMachineFunction(); 2093 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2094 unsigned ARMPCLabelIndex = 0; 2095 DebugLoc DL = Op.getDebugLoc(); 2096 EVT PtrVT = getPointerTy(); 2097 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2098 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2099 SDValue CPAddr; 2100 if (RelocM == Reloc::Static) { 2101 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2102 } else { 2103 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2104 ARMPCLabelIndex = AFI->createPICLabelUId(); 2105 ARMConstantPoolValue *CPV = 2106 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2107 ARMCP::CPBlockAddress, PCAdj); 2108 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2109 } 2110 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2111 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 2112 MachinePointerInfo::getConstantPool(), 2113 false, false, false, 0); 2114 if (RelocM == Reloc::Static) 2115 return Result; 2116 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2117 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2118} 2119 2120// Lower ISD::GlobalTLSAddress using the "general dynamic" model 2121SDValue 2122ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2123 SelectionDAG &DAG) const { 2124 DebugLoc dl = GA->getDebugLoc(); 2125 EVT PtrVT = getPointerTy(); 2126 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2127 MachineFunction &MF = DAG.getMachineFunction(); 2128 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2129 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2130 ARMConstantPoolValue *CPV = 2131 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2132 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2133 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2134 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2135 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2136 MachinePointerInfo::getConstantPool(), 2137 false, false, false, 0); 2138 SDValue Chain = Argument.getValue(1); 2139 2140 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2141 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2142 2143 // call __tls_get_addr. 2144 ArgListTy Args; 2145 ArgListEntry Entry; 2146 Entry.Node = Argument; 2147 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2148 Args.push_back(Entry); 2149 // FIXME: is there useful debug info available here? 2150 TargetLowering::CallLoweringInfo CLI(Chain, 2151 (Type *) Type::getInt32Ty(*DAG.getContext()), 2152 false, false, false, false, 2153 0, CallingConv::C, /*isTailCall=*/false, 2154 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 2155 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2156 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2157 return CallResult.first; 2158} 2159 2160// Lower ISD::GlobalTLSAddress using the "initial exec" or 2161// "local exec" model. 2162SDValue 2163ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2164 SelectionDAG &DAG, 2165 TLSModel::Model model) const { 2166 const GlobalValue *GV = GA->getGlobal(); 2167 DebugLoc dl = GA->getDebugLoc(); 2168 SDValue Offset; 2169 SDValue Chain = DAG.getEntryNode(); 2170 EVT PtrVT = getPointerTy(); 2171 // Get the Thread Pointer 2172 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2173 2174 if (model == TLSModel::InitialExec) { 2175 MachineFunction &MF = DAG.getMachineFunction(); 2176 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2177 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2178 // Initial exec model. 2179 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2180 ARMConstantPoolValue *CPV = 2181 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2182 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2183 true); 2184 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2185 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2186 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2187 MachinePointerInfo::getConstantPool(), 2188 false, false, false, 0); 2189 Chain = Offset.getValue(1); 2190 2191 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2192 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2193 2194 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2195 MachinePointerInfo::getConstantPool(), 2196 false, false, false, 0); 2197 } else { 2198 // local exec model 2199 assert(model == TLSModel::LocalExec); 2200 ARMConstantPoolValue *CPV = 2201 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2202 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2203 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2204 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2205 MachinePointerInfo::getConstantPool(), 2206 false, false, false, 0); 2207 } 2208 2209 // The address of the thread local variable is the add of the thread 2210 // pointer with the offset of the variable. 2211 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2212} 2213 2214SDValue 2215ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2216 // TODO: implement the "local dynamic" model 2217 assert(Subtarget->isTargetELF() && 2218 "TLS not implemented for non-ELF targets"); 2219 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2220 2221 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2222 2223 switch (model) { 2224 case TLSModel::GeneralDynamic: 2225 case TLSModel::LocalDynamic: 2226 return LowerToTLSGeneralDynamicModel(GA, DAG); 2227 case TLSModel::InitialExec: 2228 case TLSModel::LocalExec: 2229 return LowerToTLSExecModels(GA, DAG, model); 2230 } 2231 llvm_unreachable("bogus TLS model"); 2232} 2233 2234SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2235 SelectionDAG &DAG) const { 2236 EVT PtrVT = getPointerTy(); 2237 DebugLoc dl = Op.getDebugLoc(); 2238 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2239 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2240 if (RelocM == Reloc::PIC_) { 2241 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2242 ARMConstantPoolValue *CPV = 2243 ARMConstantPoolConstant::Create(GV, 2244 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2245 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2246 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2247 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2248 CPAddr, 2249 MachinePointerInfo::getConstantPool(), 2250 false, false, false, 0); 2251 SDValue Chain = Result.getValue(1); 2252 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2253 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2254 if (!UseGOTOFF) 2255 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2256 MachinePointerInfo::getGOT(), 2257 false, false, false, 0); 2258 return Result; 2259 } 2260 2261 // If we have T2 ops, we can materialize the address directly via movt/movw 2262 // pair. This is always cheaper. 2263 if (Subtarget->useMovt()) { 2264 ++NumMovwMovt; 2265 // FIXME: Once remat is capable of dealing with instructions with register 2266 // operands, expand this into two nodes. 2267 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2268 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2269 } else { 2270 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2271 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2272 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2273 MachinePointerInfo::getConstantPool(), 2274 false, false, false, 0); 2275 } 2276} 2277 2278SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2279 SelectionDAG &DAG) const { 2280 EVT PtrVT = getPointerTy(); 2281 DebugLoc dl = Op.getDebugLoc(); 2282 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2283 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2284 MachineFunction &MF = DAG.getMachineFunction(); 2285 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2286 2287 // FIXME: Enable this for static codegen when tool issues are fixed. Also 2288 // update ARMFastISel::ARMMaterializeGV. 2289 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2290 ++NumMovwMovt; 2291 // FIXME: Once remat is capable of dealing with instructions with register 2292 // operands, expand this into two nodes. 2293 if (RelocM == Reloc::Static) 2294 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2295 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2296 2297 unsigned Wrapper = (RelocM == Reloc::PIC_) 2298 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2299 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2300 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2301 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2302 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2303 MachinePointerInfo::getGOT(), 2304 false, false, false, 0); 2305 return Result; 2306 } 2307 2308 unsigned ARMPCLabelIndex = 0; 2309 SDValue CPAddr; 2310 if (RelocM == Reloc::Static) { 2311 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2312 } else { 2313 ARMPCLabelIndex = AFI->createPICLabelUId(); 2314 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2315 ARMConstantPoolValue *CPV = 2316 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2317 PCAdj); 2318 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2319 } 2320 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2321 2322 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2323 MachinePointerInfo::getConstantPool(), 2324 false, false, false, 0); 2325 SDValue Chain = Result.getValue(1); 2326 2327 if (RelocM == Reloc::PIC_) { 2328 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2329 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2330 } 2331 2332 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2333 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2334 false, false, false, 0); 2335 2336 return Result; 2337} 2338 2339SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2340 SelectionDAG &DAG) const { 2341 assert(Subtarget->isTargetELF() && 2342 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2343 MachineFunction &MF = DAG.getMachineFunction(); 2344 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2345 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2346 EVT PtrVT = getPointerTy(); 2347 DebugLoc dl = Op.getDebugLoc(); 2348 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2349 ARMConstantPoolValue *CPV = 2350 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2351 ARMPCLabelIndex, PCAdj); 2352 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2353 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2354 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2355 MachinePointerInfo::getConstantPool(), 2356 false, false, false, 0); 2357 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2358 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2359} 2360 2361SDValue 2362ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2363 DebugLoc dl = Op.getDebugLoc(); 2364 SDValue Val = DAG.getConstant(0, MVT::i32); 2365 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2366 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2367 Op.getOperand(1), Val); 2368} 2369 2370SDValue 2371ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2372 DebugLoc dl = Op.getDebugLoc(); 2373 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2374 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2375} 2376 2377SDValue 2378ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2379 const ARMSubtarget *Subtarget) const { 2380 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2381 DebugLoc dl = Op.getDebugLoc(); 2382 switch (IntNo) { 2383 default: return SDValue(); // Don't custom lower most intrinsics. 2384 case Intrinsic::arm_thread_pointer: { 2385 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2386 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2387 } 2388 case Intrinsic::eh_sjlj_lsda: { 2389 MachineFunction &MF = DAG.getMachineFunction(); 2390 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2391 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2392 EVT PtrVT = getPointerTy(); 2393 DebugLoc dl = Op.getDebugLoc(); 2394 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2395 SDValue CPAddr; 2396 unsigned PCAdj = (RelocM != Reloc::PIC_) 2397 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2398 ARMConstantPoolValue *CPV = 2399 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2400 ARMCP::CPLSDA, PCAdj); 2401 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2402 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2403 SDValue Result = 2404 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2405 MachinePointerInfo::getConstantPool(), 2406 false, false, false, 0); 2407 2408 if (RelocM == Reloc::PIC_) { 2409 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2410 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2411 } 2412 return Result; 2413 } 2414 case Intrinsic::arm_neon_vmulls: 2415 case Intrinsic::arm_neon_vmullu: { 2416 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2417 ? ARMISD::VMULLs : ARMISD::VMULLu; 2418 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2419 Op.getOperand(1), Op.getOperand(2)); 2420 } 2421 } 2422} 2423 2424static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2425 const ARMSubtarget *Subtarget) { 2426 DebugLoc dl = Op.getDebugLoc(); 2427 if (!Subtarget->hasDataBarrier()) { 2428 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2429 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2430 // here. 2431 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2432 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2433 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2434 DAG.getConstant(0, MVT::i32)); 2435 } 2436 2437 SDValue Op5 = Op.getOperand(5); 2438 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2439 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2440 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2441 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2442 2443 ARM_MB::MemBOpt DMBOpt; 2444 if (isDeviceBarrier) 2445 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2446 else 2447 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2448 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2449 DAG.getConstant(DMBOpt, MVT::i32)); 2450} 2451 2452 2453static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2454 const ARMSubtarget *Subtarget) { 2455 // FIXME: handle "fence singlethread" more efficiently. 2456 DebugLoc dl = Op.getDebugLoc(); 2457 if (!Subtarget->hasDataBarrier()) { 2458 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2459 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2460 // here. 2461 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2462 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2463 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2464 DAG.getConstant(0, MVT::i32)); 2465 } 2466 2467 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2468 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2469} 2470 2471static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2472 const ARMSubtarget *Subtarget) { 2473 // ARM pre v5TE and Thumb1 does not have preload instructions. 2474 if (!(Subtarget->isThumb2() || 2475 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2476 // Just preserve the chain. 2477 return Op.getOperand(0); 2478 2479 DebugLoc dl = Op.getDebugLoc(); 2480 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2481 if (!isRead && 2482 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2483 // ARMv7 with MP extension has PLDW. 2484 return Op.getOperand(0); 2485 2486 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2487 if (Subtarget->isThumb()) { 2488 // Invert the bits. 2489 isRead = ~isRead & 1; 2490 isData = ~isData & 1; 2491 } 2492 2493 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2494 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2495 DAG.getConstant(isData, MVT::i32)); 2496} 2497 2498static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2499 MachineFunction &MF = DAG.getMachineFunction(); 2500 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2501 2502 // vastart just stores the address of the VarArgsFrameIndex slot into the 2503 // memory location argument. 2504 DebugLoc dl = Op.getDebugLoc(); 2505 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2506 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2507 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2508 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2509 MachinePointerInfo(SV), false, false, 0); 2510} 2511 2512SDValue 2513ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2514 SDValue &Root, SelectionDAG &DAG, 2515 DebugLoc dl) const { 2516 MachineFunction &MF = DAG.getMachineFunction(); 2517 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2518 2519 const TargetRegisterClass *RC; 2520 if (AFI->isThumb1OnlyFunction()) 2521 RC = &ARM::tGPRRegClass; 2522 else 2523 RC = &ARM::GPRRegClass; 2524 2525 // Transform the arguments stored in physical registers into virtual ones. 2526 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2527 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2528 2529 SDValue ArgValue2; 2530 if (NextVA.isMemLoc()) { 2531 MachineFrameInfo *MFI = MF.getFrameInfo(); 2532 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2533 2534 // Create load node to retrieve arguments from the stack. 2535 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2536 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2537 MachinePointerInfo::getFixedStack(FI), 2538 false, false, false, 0); 2539 } else { 2540 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2541 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2542 } 2543 2544 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2545} 2546 2547void 2548ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2549 unsigned &VARegSize, unsigned &VARegSaveSize) 2550 const { 2551 unsigned NumGPRs; 2552 if (CCInfo.isFirstByValRegValid()) 2553 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2554 else { 2555 unsigned int firstUnalloced; 2556 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2557 sizeof(GPRArgRegs) / 2558 sizeof(GPRArgRegs[0])); 2559 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2560 } 2561 2562 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2563 VARegSize = NumGPRs * 4; 2564 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2565} 2566 2567// The remaining GPRs hold either the beginning of variable-argument 2568// data, or the beginning of an aggregate passed by value (usuall 2569// byval). Either way, we allocate stack slots adjacent to the data 2570// provided by our caller, and store the unallocated registers there. 2571// If this is a variadic function, the va_list pointer will begin with 2572// these values; otherwise, this reassembles a (byval) structure that 2573// was split between registers and memory. 2574void 2575ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2576 DebugLoc dl, SDValue &Chain, 2577 const Value *OrigArg, 2578 unsigned OffsetFromOrigArg, 2579 unsigned ArgOffset, 2580 bool ForceMutable) const { 2581 MachineFunction &MF = DAG.getMachineFunction(); 2582 MachineFrameInfo *MFI = MF.getFrameInfo(); 2583 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2584 unsigned firstRegToSaveIndex; 2585 if (CCInfo.isFirstByValRegValid()) 2586 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2587 else { 2588 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2589 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2590 } 2591 2592 unsigned VARegSize, VARegSaveSize; 2593 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2594 if (VARegSaveSize) { 2595 // If this function is vararg, store any remaining integer argument regs 2596 // to their spots on the stack so that they may be loaded by deferencing 2597 // the result of va_next. 2598 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2599 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2600 ArgOffset + VARegSaveSize 2601 - VARegSize, 2602 false)); 2603 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2604 getPointerTy()); 2605 2606 SmallVector<SDValue, 4> MemOps; 2607 for (unsigned i = 0; firstRegToSaveIndex < 4; ++firstRegToSaveIndex, ++i) { 2608 const TargetRegisterClass *RC; 2609 if (AFI->isThumb1OnlyFunction()) 2610 RC = &ARM::tGPRRegClass; 2611 else 2612 RC = &ARM::GPRRegClass; 2613 2614 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2615 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2616 SDValue Store = 2617 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2618 MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i), 2619 false, false, 0); 2620 MemOps.push_back(Store); 2621 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2622 DAG.getConstant(4, getPointerTy())); 2623 } 2624 if (!MemOps.empty()) 2625 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2626 &MemOps[0], MemOps.size()); 2627 } else 2628 // This will point to the next argument passed via stack. 2629 AFI->setVarArgsFrameIndex( 2630 MFI->CreateFixedObject(4, ArgOffset, !ForceMutable)); 2631} 2632 2633SDValue 2634ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2635 CallingConv::ID CallConv, bool isVarArg, 2636 const SmallVectorImpl<ISD::InputArg> 2637 &Ins, 2638 DebugLoc dl, SelectionDAG &DAG, 2639 SmallVectorImpl<SDValue> &InVals) 2640 const { 2641 MachineFunction &MF = DAG.getMachineFunction(); 2642 MachineFrameInfo *MFI = MF.getFrameInfo(); 2643 2644 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2645 2646 // Assign locations to all of the incoming arguments. 2647 SmallVector<CCValAssign, 16> ArgLocs; 2648 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2649 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2650 CCInfo.AnalyzeFormalArguments(Ins, 2651 CCAssignFnForNode(CallConv, /* Return*/ false, 2652 isVarArg)); 2653 2654 SmallVector<SDValue, 16> ArgValues; 2655 int lastInsIndex = -1; 2656 SDValue ArgValue; 2657 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 2658 unsigned CurArgIdx = 0; 2659 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2660 CCValAssign &VA = ArgLocs[i]; 2661 std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx); 2662 CurArgIdx = Ins[VA.getValNo()].OrigArgIndex; 2663 // Arguments stored in registers. 2664 if (VA.isRegLoc()) { 2665 EVT RegVT = VA.getLocVT(); 2666 2667 if (VA.needsCustom()) { 2668 // f64 and vector types are split up into multiple registers or 2669 // combinations of registers and stack slots. 2670 if (VA.getLocVT() == MVT::v2f64) { 2671 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2672 Chain, DAG, dl); 2673 VA = ArgLocs[++i]; // skip ahead to next loc 2674 SDValue ArgValue2; 2675 if (VA.isMemLoc()) { 2676 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2677 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2678 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2679 MachinePointerInfo::getFixedStack(FI), 2680 false, false, false, 0); 2681 } else { 2682 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2683 Chain, DAG, dl); 2684 } 2685 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2686 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2687 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2688 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2689 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2690 } else 2691 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2692 2693 } else { 2694 const TargetRegisterClass *RC; 2695 2696 if (RegVT == MVT::f32) 2697 RC = &ARM::SPRRegClass; 2698 else if (RegVT == MVT::f64) 2699 RC = &ARM::DPRRegClass; 2700 else if (RegVT == MVT::v2f64) 2701 RC = &ARM::QPRRegClass; 2702 else if (RegVT == MVT::i32) 2703 RC = AFI->isThumb1OnlyFunction() ? 2704 (const TargetRegisterClass*)&ARM::tGPRRegClass : 2705 (const TargetRegisterClass*)&ARM::GPRRegClass; 2706 else 2707 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2708 2709 // Transform the arguments in physical registers into virtual ones. 2710 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2711 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2712 } 2713 2714 // If this is an 8 or 16-bit value, it is really passed promoted 2715 // to 32 bits. Insert an assert[sz]ext to capture this, then 2716 // truncate to the right size. 2717 switch (VA.getLocInfo()) { 2718 default: llvm_unreachable("Unknown loc info!"); 2719 case CCValAssign::Full: break; 2720 case CCValAssign::BCvt: 2721 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2722 break; 2723 case CCValAssign::SExt: 2724 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2725 DAG.getValueType(VA.getValVT())); 2726 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2727 break; 2728 case CCValAssign::ZExt: 2729 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2730 DAG.getValueType(VA.getValVT())); 2731 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2732 break; 2733 } 2734 2735 InVals.push_back(ArgValue); 2736 2737 } else { // VA.isRegLoc() 2738 2739 // sanity check 2740 assert(VA.isMemLoc()); 2741 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2742 2743 int index = ArgLocs[i].getValNo(); 2744 2745 // Some Ins[] entries become multiple ArgLoc[] entries. 2746 // Process them only once. 2747 if (index != lastInsIndex) 2748 { 2749 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2750 // FIXME: For now, all byval parameter objects are marked mutable. 2751 // This can be changed with more analysis. 2752 // In case of tail call optimization mark all arguments mutable. 2753 // Since they could be overwritten by lowering of arguments in case of 2754 // a tail call. 2755 if (Flags.isByVal()) { 2756 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2757 if (!AFI->getVarArgsFrameIndex()) { 2758 VarArgStyleRegisters(CCInfo, DAG, 2759 dl, Chain, CurOrigArg, 2760 Ins[VA.getValNo()].PartOffset, 2761 VA.getLocMemOffset(), 2762 true /*force mutable frames*/); 2763 int VAFrameIndex = AFI->getVarArgsFrameIndex(); 2764 InVals.push_back(DAG.getFrameIndex(VAFrameIndex, getPointerTy())); 2765 } else { 2766 int FI = MFI->CreateFixedObject(Flags.getByValSize(), 2767 VA.getLocMemOffset(), false); 2768 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2769 } 2770 } else { 2771 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2772 VA.getLocMemOffset(), true); 2773 2774 // Create load nodes to retrieve arguments from the stack. 2775 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2776 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2777 MachinePointerInfo::getFixedStack(FI), 2778 false, false, false, 0)); 2779 } 2780 lastInsIndex = index; 2781 } 2782 } 2783 } 2784 2785 // varargs 2786 if (isVarArg) 2787 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0, 0, 2788 CCInfo.getNextStackOffset()); 2789 2790 return Chain; 2791} 2792 2793/// isFloatingPointZero - Return true if this is +0.0. 2794static bool isFloatingPointZero(SDValue Op) { 2795 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2796 return CFP->getValueAPF().isPosZero(); 2797 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2798 // Maybe this has already been legalized into the constant pool? 2799 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2800 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2801 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2802 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2803 return CFP->getValueAPF().isPosZero(); 2804 } 2805 } 2806 return false; 2807} 2808 2809/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2810/// the given operands. 2811SDValue 2812ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2813 SDValue &ARMcc, SelectionDAG &DAG, 2814 DebugLoc dl) const { 2815 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2816 unsigned C = RHSC->getZExtValue(); 2817 if (!isLegalICmpImmediate(C)) { 2818 // Constant does not fit, try adjusting it by one? 2819 switch (CC) { 2820 default: break; 2821 case ISD::SETLT: 2822 case ISD::SETGE: 2823 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2824 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2825 RHS = DAG.getConstant(C-1, MVT::i32); 2826 } 2827 break; 2828 case ISD::SETULT: 2829 case ISD::SETUGE: 2830 if (C != 0 && isLegalICmpImmediate(C-1)) { 2831 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2832 RHS = DAG.getConstant(C-1, MVT::i32); 2833 } 2834 break; 2835 case ISD::SETLE: 2836 case ISD::SETGT: 2837 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2838 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2839 RHS = DAG.getConstant(C+1, MVT::i32); 2840 } 2841 break; 2842 case ISD::SETULE: 2843 case ISD::SETUGT: 2844 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2845 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2846 RHS = DAG.getConstant(C+1, MVT::i32); 2847 } 2848 break; 2849 } 2850 } 2851 } 2852 2853 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2854 ARMISD::NodeType CompareType; 2855 switch (CondCode) { 2856 default: 2857 CompareType = ARMISD::CMP; 2858 break; 2859 case ARMCC::EQ: 2860 case ARMCC::NE: 2861 // Uses only Z Flag 2862 CompareType = ARMISD::CMPZ; 2863 break; 2864 } 2865 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2866 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2867} 2868 2869/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2870SDValue 2871ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2872 DebugLoc dl) const { 2873 SDValue Cmp; 2874 if (!isFloatingPointZero(RHS)) 2875 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2876 else 2877 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2878 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2879} 2880 2881/// duplicateCmp - Glue values can have only one use, so this function 2882/// duplicates a comparison node. 2883SDValue 2884ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2885 unsigned Opc = Cmp.getOpcode(); 2886 DebugLoc DL = Cmp.getDebugLoc(); 2887 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2888 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2889 2890 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2891 Cmp = Cmp.getOperand(0); 2892 Opc = Cmp.getOpcode(); 2893 if (Opc == ARMISD::CMPFP) 2894 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2895 else { 2896 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2897 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2898 } 2899 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2900} 2901 2902SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2903 SDValue Cond = Op.getOperand(0); 2904 SDValue SelectTrue = Op.getOperand(1); 2905 SDValue SelectFalse = Op.getOperand(2); 2906 DebugLoc dl = Op.getDebugLoc(); 2907 2908 // Convert: 2909 // 2910 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2911 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2912 // 2913 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2914 const ConstantSDNode *CMOVTrue = 2915 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2916 const ConstantSDNode *CMOVFalse = 2917 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2918 2919 if (CMOVTrue && CMOVFalse) { 2920 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2921 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2922 2923 SDValue True; 2924 SDValue False; 2925 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2926 True = SelectTrue; 2927 False = SelectFalse; 2928 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2929 True = SelectFalse; 2930 False = SelectTrue; 2931 } 2932 2933 if (True.getNode() && False.getNode()) { 2934 EVT VT = Op.getValueType(); 2935 SDValue ARMcc = Cond.getOperand(2); 2936 SDValue CCR = Cond.getOperand(3); 2937 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2938 assert(True.getValueType() == VT); 2939 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2940 } 2941 } 2942 } 2943 2944 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 2945 // undefined bits before doing a full-word comparison with zero. 2946 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 2947 DAG.getConstant(1, Cond.getValueType())); 2948 2949 return DAG.getSelectCC(dl, Cond, 2950 DAG.getConstant(0, Cond.getValueType()), 2951 SelectTrue, SelectFalse, ISD::SETNE); 2952} 2953 2954SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2955 EVT VT = Op.getValueType(); 2956 SDValue LHS = Op.getOperand(0); 2957 SDValue RHS = Op.getOperand(1); 2958 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2959 SDValue TrueVal = Op.getOperand(2); 2960 SDValue FalseVal = Op.getOperand(3); 2961 DebugLoc dl = Op.getDebugLoc(); 2962 2963 if (LHS.getValueType() == MVT::i32) { 2964 SDValue ARMcc; 2965 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2966 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2967 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2968 } 2969 2970 ARMCC::CondCodes CondCode, CondCode2; 2971 FPCCToARMCC(CC, CondCode, CondCode2); 2972 2973 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2974 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2975 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2976 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2977 ARMcc, CCR, Cmp); 2978 if (CondCode2 != ARMCC::AL) { 2979 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2980 // FIXME: Needs another CMP because flag can have but one use. 2981 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2982 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2983 Result, TrueVal, ARMcc2, CCR, Cmp2); 2984 } 2985 return Result; 2986} 2987 2988/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2989/// to morph to an integer compare sequence. 2990static bool canChangeToInt(SDValue Op, bool &SeenZero, 2991 const ARMSubtarget *Subtarget) { 2992 SDNode *N = Op.getNode(); 2993 if (!N->hasOneUse()) 2994 // Otherwise it requires moving the value from fp to integer registers. 2995 return false; 2996 if (!N->getNumValues()) 2997 return false; 2998 EVT VT = Op.getValueType(); 2999 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 3000 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 3001 // vmrs are very slow, e.g. cortex-a8. 3002 return false; 3003 3004 if (isFloatingPointZero(Op)) { 3005 SeenZero = true; 3006 return true; 3007 } 3008 return ISD::isNormalLoad(N); 3009} 3010 3011static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 3012 if (isFloatingPointZero(Op)) 3013 return DAG.getConstant(0, MVT::i32); 3014 3015 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 3016 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3017 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 3018 Ld->isVolatile(), Ld->isNonTemporal(), 3019 Ld->isInvariant(), Ld->getAlignment()); 3020 3021 llvm_unreachable("Unknown VFP cmp argument!"); 3022} 3023 3024static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 3025 SDValue &RetVal1, SDValue &RetVal2) { 3026 if (isFloatingPointZero(Op)) { 3027 RetVal1 = DAG.getConstant(0, MVT::i32); 3028 RetVal2 = DAG.getConstant(0, MVT::i32); 3029 return; 3030 } 3031 3032 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 3033 SDValue Ptr = Ld->getBasePtr(); 3034 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3035 Ld->getChain(), Ptr, 3036 Ld->getPointerInfo(), 3037 Ld->isVolatile(), Ld->isNonTemporal(), 3038 Ld->isInvariant(), Ld->getAlignment()); 3039 3040 EVT PtrType = Ptr.getValueType(); 3041 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 3042 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 3043 PtrType, Ptr, DAG.getConstant(4, PtrType)); 3044 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3045 Ld->getChain(), NewPtr, 3046 Ld->getPointerInfo().getWithOffset(4), 3047 Ld->isVolatile(), Ld->isNonTemporal(), 3048 Ld->isInvariant(), NewAlign); 3049 return; 3050 } 3051 3052 llvm_unreachable("Unknown VFP cmp argument!"); 3053} 3054 3055/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 3056/// f32 and even f64 comparisons to integer ones. 3057SDValue 3058ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 3059 SDValue Chain = Op.getOperand(0); 3060 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3061 SDValue LHS = Op.getOperand(2); 3062 SDValue RHS = Op.getOperand(3); 3063 SDValue Dest = Op.getOperand(4); 3064 DebugLoc dl = Op.getDebugLoc(); 3065 3066 bool LHSSeenZero = false; 3067 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 3068 bool RHSSeenZero = false; 3069 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 3070 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 3071 // If unsafe fp math optimization is enabled and there are no other uses of 3072 // the CMP operands, and the condition code is EQ or NE, we can optimize it 3073 // to an integer comparison. 3074 if (CC == ISD::SETOEQ) 3075 CC = ISD::SETEQ; 3076 else if (CC == ISD::SETUNE) 3077 CC = ISD::SETNE; 3078 3079 SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32); 3080 SDValue ARMcc; 3081 if (LHS.getValueType() == MVT::f32) { 3082 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3083 bitcastf32Toi32(LHS, DAG), Mask); 3084 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3085 bitcastf32Toi32(RHS, DAG), Mask); 3086 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3087 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3088 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3089 Chain, Dest, ARMcc, CCR, Cmp); 3090 } 3091 3092 SDValue LHS1, LHS2; 3093 SDValue RHS1, RHS2; 3094 expandf64Toi32(LHS, DAG, LHS1, LHS2); 3095 expandf64Toi32(RHS, DAG, RHS1, RHS2); 3096 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 3097 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 3098 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3099 ARMcc = DAG.getConstant(CondCode, MVT::i32); 3100 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3101 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 3102 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 3103 } 3104 3105 return SDValue(); 3106} 3107 3108SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3109 SDValue Chain = Op.getOperand(0); 3110 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3111 SDValue LHS = Op.getOperand(2); 3112 SDValue RHS = Op.getOperand(3); 3113 SDValue Dest = Op.getOperand(4); 3114 DebugLoc dl = Op.getDebugLoc(); 3115 3116 if (LHS.getValueType() == MVT::i32) { 3117 SDValue ARMcc; 3118 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3119 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3120 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3121 Chain, Dest, ARMcc, CCR, Cmp); 3122 } 3123 3124 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 3125 3126 if (getTargetMachine().Options.UnsafeFPMath && 3127 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 3128 CC == ISD::SETNE || CC == ISD::SETUNE)) { 3129 SDValue Result = OptimizeVFPBrcond(Op, DAG); 3130 if (Result.getNode()) 3131 return Result; 3132 } 3133 3134 ARMCC::CondCodes CondCode, CondCode2; 3135 FPCCToARMCC(CC, CondCode, CondCode2); 3136 3137 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3138 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3139 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3140 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3141 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 3142 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3143 if (CondCode2 != ARMCC::AL) { 3144 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 3145 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 3146 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3147 } 3148 return Res; 3149} 3150 3151SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 3152 SDValue Chain = Op.getOperand(0); 3153 SDValue Table = Op.getOperand(1); 3154 SDValue Index = Op.getOperand(2); 3155 DebugLoc dl = Op.getDebugLoc(); 3156 3157 EVT PTy = getPointerTy(); 3158 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 3159 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 3160 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 3161 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 3162 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 3163 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 3164 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3165 if (Subtarget->isThumb2()) { 3166 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 3167 // which does another jump to the destination. This also makes it easier 3168 // to translate it to TBB / TBH later. 3169 // FIXME: This might not work if the function is extremely large. 3170 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 3171 Addr, Op.getOperand(2), JTI, UId); 3172 } 3173 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3174 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3175 MachinePointerInfo::getJumpTable(), 3176 false, false, false, 0); 3177 Chain = Addr.getValue(1); 3178 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3179 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3180 } else { 3181 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3182 MachinePointerInfo::getJumpTable(), 3183 false, false, false, 0); 3184 Chain = Addr.getValue(1); 3185 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3186 } 3187} 3188 3189static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3190 EVT VT = Op.getValueType(); 3191 DebugLoc dl = Op.getDebugLoc(); 3192 3193 if (Op.getValueType().getVectorElementType() == MVT::i32) { 3194 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 3195 return Op; 3196 return DAG.UnrollVectorOp(Op.getNode()); 3197 } 3198 3199 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 3200 "Invalid type for custom lowering!"); 3201 if (VT != MVT::v4i16) 3202 return DAG.UnrollVectorOp(Op.getNode()); 3203 3204 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 3205 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 3206} 3207 3208static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3209 EVT VT = Op.getValueType(); 3210 if (VT.isVector()) 3211 return LowerVectorFP_TO_INT(Op, DAG); 3212 3213 DebugLoc dl = Op.getDebugLoc(); 3214 unsigned Opc; 3215 3216 switch (Op.getOpcode()) { 3217 default: llvm_unreachable("Invalid opcode!"); 3218 case ISD::FP_TO_SINT: 3219 Opc = ARMISD::FTOSI; 3220 break; 3221 case ISD::FP_TO_UINT: 3222 Opc = ARMISD::FTOUI; 3223 break; 3224 } 3225 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3226 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3227} 3228 3229static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3230 EVT VT = Op.getValueType(); 3231 DebugLoc dl = Op.getDebugLoc(); 3232 3233 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 3234 if (VT.getVectorElementType() == MVT::f32) 3235 return Op; 3236 return DAG.UnrollVectorOp(Op.getNode()); 3237 } 3238 3239 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3240 "Invalid type for custom lowering!"); 3241 if (VT != MVT::v4f32) 3242 return DAG.UnrollVectorOp(Op.getNode()); 3243 3244 unsigned CastOpc; 3245 unsigned Opc; 3246 switch (Op.getOpcode()) { 3247 default: llvm_unreachable("Invalid opcode!"); 3248 case ISD::SINT_TO_FP: 3249 CastOpc = ISD::SIGN_EXTEND; 3250 Opc = ISD::SINT_TO_FP; 3251 break; 3252 case ISD::UINT_TO_FP: 3253 CastOpc = ISD::ZERO_EXTEND; 3254 Opc = ISD::UINT_TO_FP; 3255 break; 3256 } 3257 3258 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3259 return DAG.getNode(Opc, dl, VT, Op); 3260} 3261 3262static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3263 EVT VT = Op.getValueType(); 3264 if (VT.isVector()) 3265 return LowerVectorINT_TO_FP(Op, DAG); 3266 3267 DebugLoc dl = Op.getDebugLoc(); 3268 unsigned Opc; 3269 3270 switch (Op.getOpcode()) { 3271 default: llvm_unreachable("Invalid opcode!"); 3272 case ISD::SINT_TO_FP: 3273 Opc = ARMISD::SITOF; 3274 break; 3275 case ISD::UINT_TO_FP: 3276 Opc = ARMISD::UITOF; 3277 break; 3278 } 3279 3280 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3281 return DAG.getNode(Opc, dl, VT, Op); 3282} 3283 3284SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3285 // Implement fcopysign with a fabs and a conditional fneg. 3286 SDValue Tmp0 = Op.getOperand(0); 3287 SDValue Tmp1 = Op.getOperand(1); 3288 DebugLoc dl = Op.getDebugLoc(); 3289 EVT VT = Op.getValueType(); 3290 EVT SrcVT = Tmp1.getValueType(); 3291 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3292 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3293 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3294 3295 if (UseNEON) { 3296 // Use VBSL to copy the sign bit. 3297 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3298 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3299 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3300 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3301 if (VT == MVT::f64) 3302 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3303 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3304 DAG.getConstant(32, MVT::i32)); 3305 else /*if (VT == MVT::f32)*/ 3306 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3307 if (SrcVT == MVT::f32) { 3308 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3309 if (VT == MVT::f64) 3310 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3311 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3312 DAG.getConstant(32, MVT::i32)); 3313 } else if (VT == MVT::f32) 3314 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3315 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3316 DAG.getConstant(32, MVT::i32)); 3317 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3318 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3319 3320 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3321 MVT::i32); 3322 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3323 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3324 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3325 3326 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3327 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3328 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3329 if (VT == MVT::f32) { 3330 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3331 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3332 DAG.getConstant(0, MVT::i32)); 3333 } else { 3334 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3335 } 3336 3337 return Res; 3338 } 3339 3340 // Bitcast operand 1 to i32. 3341 if (SrcVT == MVT::f64) 3342 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3343 &Tmp1, 1).getValue(1); 3344 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3345 3346 // Or in the signbit with integer operations. 3347 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3348 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3349 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3350 if (VT == MVT::f32) { 3351 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3352 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3353 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3354 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3355 } 3356 3357 // f64: Or the high part with signbit and then combine two parts. 3358 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3359 &Tmp0, 1); 3360 SDValue Lo = Tmp0.getValue(0); 3361 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3362 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3363 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3364} 3365 3366SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3367 MachineFunction &MF = DAG.getMachineFunction(); 3368 MachineFrameInfo *MFI = MF.getFrameInfo(); 3369 MFI->setReturnAddressIsTaken(true); 3370 3371 EVT VT = Op.getValueType(); 3372 DebugLoc dl = Op.getDebugLoc(); 3373 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3374 if (Depth) { 3375 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3376 SDValue Offset = DAG.getConstant(4, MVT::i32); 3377 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3378 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3379 MachinePointerInfo(), false, false, false, 0); 3380 } 3381 3382 // Return LR, which contains the return address. Mark it an implicit live-in. 3383 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3384 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3385} 3386 3387SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3388 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3389 MFI->setFrameAddressIsTaken(true); 3390 3391 EVT VT = Op.getValueType(); 3392 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3393 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3394 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3395 ? ARM::R7 : ARM::R11; 3396 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3397 while (Depth--) 3398 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3399 MachinePointerInfo(), 3400 false, false, false, 0); 3401 return FrameAddr; 3402} 3403 3404/// ExpandBITCAST - If the target supports VFP, this function is called to 3405/// expand a bit convert where either the source or destination type is i64 to 3406/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3407/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3408/// vectors), since the legalizer won't know what to do with that. 3409static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3410 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3411 DebugLoc dl = N->getDebugLoc(); 3412 SDValue Op = N->getOperand(0); 3413 3414 // This function is only supposed to be called for i64 types, either as the 3415 // source or destination of the bit convert. 3416 EVT SrcVT = Op.getValueType(); 3417 EVT DstVT = N->getValueType(0); 3418 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3419 "ExpandBITCAST called for non-i64 type"); 3420 3421 // Turn i64->f64 into VMOVDRR. 3422 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3423 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3424 DAG.getConstant(0, MVT::i32)); 3425 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3426 DAG.getConstant(1, MVT::i32)); 3427 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3428 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3429 } 3430 3431 // Turn f64->i64 into VMOVRRD. 3432 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3433 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3434 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3435 // Merge the pieces into a single i64 value. 3436 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3437 } 3438 3439 return SDValue(); 3440} 3441 3442/// getZeroVector - Returns a vector of specified type with all zero elements. 3443/// Zero vectors are used to represent vector negation and in those cases 3444/// will be implemented with the NEON VNEG instruction. However, VNEG does 3445/// not support i64 elements, so sometimes the zero vectors will need to be 3446/// explicitly constructed. Regardless, use a canonical VMOV to create the 3447/// zero vector. 3448static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3449 assert(VT.isVector() && "Expected a vector type"); 3450 // The canonical modified immediate encoding of a zero vector is....0! 3451 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3452 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3453 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3454 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3455} 3456 3457/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3458/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3459SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3460 SelectionDAG &DAG) const { 3461 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3462 EVT VT = Op.getValueType(); 3463 unsigned VTBits = VT.getSizeInBits(); 3464 DebugLoc dl = Op.getDebugLoc(); 3465 SDValue ShOpLo = Op.getOperand(0); 3466 SDValue ShOpHi = Op.getOperand(1); 3467 SDValue ShAmt = Op.getOperand(2); 3468 SDValue ARMcc; 3469 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3470 3471 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3472 3473 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3474 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3475 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3476 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3477 DAG.getConstant(VTBits, MVT::i32)); 3478 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3479 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3480 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3481 3482 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3483 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3484 ARMcc, DAG, dl); 3485 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3486 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3487 CCR, Cmp); 3488 3489 SDValue Ops[2] = { Lo, Hi }; 3490 return DAG.getMergeValues(Ops, 2, dl); 3491} 3492 3493/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3494/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3495SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3496 SelectionDAG &DAG) const { 3497 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3498 EVT VT = Op.getValueType(); 3499 unsigned VTBits = VT.getSizeInBits(); 3500 DebugLoc dl = Op.getDebugLoc(); 3501 SDValue ShOpLo = Op.getOperand(0); 3502 SDValue ShOpHi = Op.getOperand(1); 3503 SDValue ShAmt = Op.getOperand(2); 3504 SDValue ARMcc; 3505 3506 assert(Op.getOpcode() == ISD::SHL_PARTS); 3507 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3508 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3509 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3510 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3511 DAG.getConstant(VTBits, MVT::i32)); 3512 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3513 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3514 3515 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3516 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3517 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3518 ARMcc, DAG, dl); 3519 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3520 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3521 CCR, Cmp); 3522 3523 SDValue Ops[2] = { Lo, Hi }; 3524 return DAG.getMergeValues(Ops, 2, dl); 3525} 3526 3527SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3528 SelectionDAG &DAG) const { 3529 // The rounding mode is in bits 23:22 of the FPSCR. 3530 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3531 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3532 // so that the shift + and get folded into a bitfield extract. 3533 DebugLoc dl = Op.getDebugLoc(); 3534 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3535 DAG.getConstant(Intrinsic::arm_get_fpscr, 3536 MVT::i32)); 3537 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3538 DAG.getConstant(1U << 22, MVT::i32)); 3539 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3540 DAG.getConstant(22, MVT::i32)); 3541 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3542 DAG.getConstant(3, MVT::i32)); 3543} 3544 3545static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3546 const ARMSubtarget *ST) { 3547 EVT VT = N->getValueType(0); 3548 DebugLoc dl = N->getDebugLoc(); 3549 3550 if (!ST->hasV6T2Ops()) 3551 return SDValue(); 3552 3553 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3554 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3555} 3556 3557static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3558 const ARMSubtarget *ST) { 3559 EVT VT = N->getValueType(0); 3560 DebugLoc dl = N->getDebugLoc(); 3561 3562 if (!VT.isVector()) 3563 return SDValue(); 3564 3565 // Lower vector shifts on NEON to use VSHL. 3566 assert(ST->hasNEON() && "unexpected vector shift"); 3567 3568 // Left shifts translate directly to the vshiftu intrinsic. 3569 if (N->getOpcode() == ISD::SHL) 3570 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3571 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3572 N->getOperand(0), N->getOperand(1)); 3573 3574 assert((N->getOpcode() == ISD::SRA || 3575 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3576 3577 // NEON uses the same intrinsics for both left and right shifts. For 3578 // right shifts, the shift amounts are negative, so negate the vector of 3579 // shift amounts. 3580 EVT ShiftVT = N->getOperand(1).getValueType(); 3581 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3582 getZeroVector(ShiftVT, DAG, dl), 3583 N->getOperand(1)); 3584 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3585 Intrinsic::arm_neon_vshifts : 3586 Intrinsic::arm_neon_vshiftu); 3587 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3588 DAG.getConstant(vshiftInt, MVT::i32), 3589 N->getOperand(0), NegatedCount); 3590} 3591 3592static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3593 const ARMSubtarget *ST) { 3594 EVT VT = N->getValueType(0); 3595 DebugLoc dl = N->getDebugLoc(); 3596 3597 // We can get here for a node like i32 = ISD::SHL i32, i64 3598 if (VT != MVT::i64) 3599 return SDValue(); 3600 3601 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3602 "Unknown shift to lower!"); 3603 3604 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3605 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3606 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3607 return SDValue(); 3608 3609 // If we are in thumb mode, we don't have RRX. 3610 if (ST->isThumb1Only()) return SDValue(); 3611 3612 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3613 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3614 DAG.getConstant(0, MVT::i32)); 3615 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3616 DAG.getConstant(1, MVT::i32)); 3617 3618 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3619 // captures the result into a carry flag. 3620 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3621 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3622 3623 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3624 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3625 3626 // Merge the pieces into a single i64 value. 3627 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3628} 3629 3630static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3631 SDValue TmpOp0, TmpOp1; 3632 bool Invert = false; 3633 bool Swap = false; 3634 unsigned Opc = 0; 3635 3636 SDValue Op0 = Op.getOperand(0); 3637 SDValue Op1 = Op.getOperand(1); 3638 SDValue CC = Op.getOperand(2); 3639 EVT VT = Op.getValueType(); 3640 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3641 DebugLoc dl = Op.getDebugLoc(); 3642 3643 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3644 switch (SetCCOpcode) { 3645 default: llvm_unreachable("Illegal FP comparison"); 3646 case ISD::SETUNE: 3647 case ISD::SETNE: Invert = true; // Fallthrough 3648 case ISD::SETOEQ: 3649 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3650 case ISD::SETOLT: 3651 case ISD::SETLT: Swap = true; // Fallthrough 3652 case ISD::SETOGT: 3653 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3654 case ISD::SETOLE: 3655 case ISD::SETLE: Swap = true; // Fallthrough 3656 case ISD::SETOGE: 3657 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3658 case ISD::SETUGE: Swap = true; // Fallthrough 3659 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3660 case ISD::SETUGT: Swap = true; // Fallthrough 3661 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3662 case ISD::SETUEQ: Invert = true; // Fallthrough 3663 case ISD::SETONE: 3664 // Expand this to (OLT | OGT). 3665 TmpOp0 = Op0; 3666 TmpOp1 = Op1; 3667 Opc = ISD::OR; 3668 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3669 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3670 break; 3671 case ISD::SETUO: Invert = true; // Fallthrough 3672 case ISD::SETO: 3673 // Expand this to (OLT | OGE). 3674 TmpOp0 = Op0; 3675 TmpOp1 = Op1; 3676 Opc = ISD::OR; 3677 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3678 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3679 break; 3680 } 3681 } else { 3682 // Integer comparisons. 3683 switch (SetCCOpcode) { 3684 default: llvm_unreachable("Illegal integer comparison"); 3685 case ISD::SETNE: Invert = true; 3686 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3687 case ISD::SETLT: Swap = true; 3688 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3689 case ISD::SETLE: Swap = true; 3690 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3691 case ISD::SETULT: Swap = true; 3692 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3693 case ISD::SETULE: Swap = true; 3694 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3695 } 3696 3697 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3698 if (Opc == ARMISD::VCEQ) { 3699 3700 SDValue AndOp; 3701 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3702 AndOp = Op0; 3703 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3704 AndOp = Op1; 3705 3706 // Ignore bitconvert. 3707 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3708 AndOp = AndOp.getOperand(0); 3709 3710 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3711 Opc = ARMISD::VTST; 3712 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3713 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3714 Invert = !Invert; 3715 } 3716 } 3717 } 3718 3719 if (Swap) 3720 std::swap(Op0, Op1); 3721 3722 // If one of the operands is a constant vector zero, attempt to fold the 3723 // comparison to a specialized compare-against-zero form. 3724 SDValue SingleOp; 3725 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3726 SingleOp = Op0; 3727 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3728 if (Opc == ARMISD::VCGE) 3729 Opc = ARMISD::VCLEZ; 3730 else if (Opc == ARMISD::VCGT) 3731 Opc = ARMISD::VCLTZ; 3732 SingleOp = Op1; 3733 } 3734 3735 SDValue Result; 3736 if (SingleOp.getNode()) { 3737 switch (Opc) { 3738 case ARMISD::VCEQ: 3739 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3740 case ARMISD::VCGE: 3741 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3742 case ARMISD::VCLEZ: 3743 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3744 case ARMISD::VCGT: 3745 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3746 case ARMISD::VCLTZ: 3747 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3748 default: 3749 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3750 } 3751 } else { 3752 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3753 } 3754 3755 if (Invert) 3756 Result = DAG.getNOT(dl, Result, VT); 3757 3758 return Result; 3759} 3760 3761/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3762/// valid vector constant for a NEON instruction with a "modified immediate" 3763/// operand (e.g., VMOV). If so, return the encoded value. 3764static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3765 unsigned SplatBitSize, SelectionDAG &DAG, 3766 EVT &VT, bool is128Bits, NEONModImmType type) { 3767 unsigned OpCmode, Imm; 3768 3769 // SplatBitSize is set to the smallest size that splats the vector, so a 3770 // zero vector will always have SplatBitSize == 8. However, NEON modified 3771 // immediate instructions others than VMOV do not support the 8-bit encoding 3772 // of a zero vector, and the default encoding of zero is supposed to be the 3773 // 32-bit version. 3774 if (SplatBits == 0) 3775 SplatBitSize = 32; 3776 3777 switch (SplatBitSize) { 3778 case 8: 3779 if (type != VMOVModImm) 3780 return SDValue(); 3781 // Any 1-byte value is OK. Op=0, Cmode=1110. 3782 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3783 OpCmode = 0xe; 3784 Imm = SplatBits; 3785 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3786 break; 3787 3788 case 16: 3789 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3790 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3791 if ((SplatBits & ~0xff) == 0) { 3792 // Value = 0x00nn: Op=x, Cmode=100x. 3793 OpCmode = 0x8; 3794 Imm = SplatBits; 3795 break; 3796 } 3797 if ((SplatBits & ~0xff00) == 0) { 3798 // Value = 0xnn00: Op=x, Cmode=101x. 3799 OpCmode = 0xa; 3800 Imm = SplatBits >> 8; 3801 break; 3802 } 3803 return SDValue(); 3804 3805 case 32: 3806 // NEON's 32-bit VMOV supports splat values where: 3807 // * only one byte is nonzero, or 3808 // * the least significant byte is 0xff and the second byte is nonzero, or 3809 // * the least significant 2 bytes are 0xff and the third is nonzero. 3810 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3811 if ((SplatBits & ~0xff) == 0) { 3812 // Value = 0x000000nn: Op=x, Cmode=000x. 3813 OpCmode = 0; 3814 Imm = SplatBits; 3815 break; 3816 } 3817 if ((SplatBits & ~0xff00) == 0) { 3818 // Value = 0x0000nn00: Op=x, Cmode=001x. 3819 OpCmode = 0x2; 3820 Imm = SplatBits >> 8; 3821 break; 3822 } 3823 if ((SplatBits & ~0xff0000) == 0) { 3824 // Value = 0x00nn0000: Op=x, Cmode=010x. 3825 OpCmode = 0x4; 3826 Imm = SplatBits >> 16; 3827 break; 3828 } 3829 if ((SplatBits & ~0xff000000) == 0) { 3830 // Value = 0xnn000000: Op=x, Cmode=011x. 3831 OpCmode = 0x6; 3832 Imm = SplatBits >> 24; 3833 break; 3834 } 3835 3836 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3837 if (type == OtherModImm) return SDValue(); 3838 3839 if ((SplatBits & ~0xffff) == 0 && 3840 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3841 // Value = 0x0000nnff: Op=x, Cmode=1100. 3842 OpCmode = 0xc; 3843 Imm = SplatBits >> 8; 3844 SplatBits |= 0xff; 3845 break; 3846 } 3847 3848 if ((SplatBits & ~0xffffff) == 0 && 3849 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3850 // Value = 0x00nnffff: Op=x, Cmode=1101. 3851 OpCmode = 0xd; 3852 Imm = SplatBits >> 16; 3853 SplatBits |= 0xffff; 3854 break; 3855 } 3856 3857 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3858 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3859 // VMOV.I32. A (very) minor optimization would be to replicate the value 3860 // and fall through here to test for a valid 64-bit splat. But, then the 3861 // caller would also need to check and handle the change in size. 3862 return SDValue(); 3863 3864 case 64: { 3865 if (type != VMOVModImm) 3866 return SDValue(); 3867 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3868 uint64_t BitMask = 0xff; 3869 uint64_t Val = 0; 3870 unsigned ImmMask = 1; 3871 Imm = 0; 3872 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3873 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3874 Val |= BitMask; 3875 Imm |= ImmMask; 3876 } else if ((SplatBits & BitMask) != 0) { 3877 return SDValue(); 3878 } 3879 BitMask <<= 8; 3880 ImmMask <<= 1; 3881 } 3882 // Op=1, Cmode=1110. 3883 OpCmode = 0x1e; 3884 SplatBits = Val; 3885 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3886 break; 3887 } 3888 3889 default: 3890 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3891 } 3892 3893 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3894 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3895} 3896 3897SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 3898 const ARMSubtarget *ST) const { 3899 if (!ST->useNEONForSinglePrecisionFP() || !ST->hasVFP3() || ST->hasD16()) 3900 return SDValue(); 3901 3902 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 3903 assert(Op.getValueType() == MVT::f32 && 3904 "ConstantFP custom lowering should only occur for f32."); 3905 3906 // Try splatting with a VMOV.f32... 3907 APFloat FPVal = CFP->getValueAPF(); 3908 int ImmVal = ARM_AM::getFP32Imm(FPVal); 3909 if (ImmVal != -1) { 3910 DebugLoc DL = Op.getDebugLoc(); 3911 SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32); 3912 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 3913 NewVal); 3914 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 3915 DAG.getConstant(0, MVT::i32)); 3916 } 3917 3918 // If that fails, try a VMOV.i32 3919 EVT VMovVT; 3920 unsigned iVal = FPVal.bitcastToAPInt().getZExtValue(); 3921 SDValue NewVal = isNEONModifiedImm(iVal, 0, 32, DAG, VMovVT, false, 3922 VMOVModImm); 3923 if (NewVal != SDValue()) { 3924 DebugLoc DL = Op.getDebugLoc(); 3925 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 3926 NewVal); 3927 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 3928 VecConstant); 3929 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 3930 DAG.getConstant(0, MVT::i32)); 3931 } 3932 3933 // Finally, try a VMVN.i32 3934 NewVal = isNEONModifiedImm(~iVal & 0xffffffff, 0, 32, DAG, VMovVT, false, 3935 VMVNModImm); 3936 if (NewVal != SDValue()) { 3937 DebugLoc DL = Op.getDebugLoc(); 3938 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 3939 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 3940 VecConstant); 3941 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 3942 DAG.getConstant(0, MVT::i32)); 3943 } 3944 3945 return SDValue(); 3946} 3947 3948// check if an VEXT instruction can handle the shuffle mask when the 3949// vector sources of the shuffle are the same. 3950static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 3951 unsigned NumElts = VT.getVectorNumElements(); 3952 3953 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3954 if (M[0] < 0) 3955 return false; 3956 3957 Imm = M[0]; 3958 3959 // If this is a VEXT shuffle, the immediate value is the index of the first 3960 // element. The other shuffle indices must be the successive elements after 3961 // the first one. 3962 unsigned ExpectedElt = Imm; 3963 for (unsigned i = 1; i < NumElts; ++i) { 3964 // Increment the expected index. If it wraps around, just follow it 3965 // back to index zero and keep going. 3966 ++ExpectedElt; 3967 if (ExpectedElt == NumElts) 3968 ExpectedElt = 0; 3969 3970 if (M[i] < 0) continue; // ignore UNDEF indices 3971 if (ExpectedElt != static_cast<unsigned>(M[i])) 3972 return false; 3973 } 3974 3975 return true; 3976} 3977 3978 3979static bool isVEXTMask(ArrayRef<int> M, EVT VT, 3980 bool &ReverseVEXT, unsigned &Imm) { 3981 unsigned NumElts = VT.getVectorNumElements(); 3982 ReverseVEXT = false; 3983 3984 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3985 if (M[0] < 0) 3986 return false; 3987 3988 Imm = M[0]; 3989 3990 // If this is a VEXT shuffle, the immediate value is the index of the first 3991 // element. The other shuffle indices must be the successive elements after 3992 // the first one. 3993 unsigned ExpectedElt = Imm; 3994 for (unsigned i = 1; i < NumElts; ++i) { 3995 // Increment the expected index. If it wraps around, it may still be 3996 // a VEXT but the source vectors must be swapped. 3997 ExpectedElt += 1; 3998 if (ExpectedElt == NumElts * 2) { 3999 ExpectedElt = 0; 4000 ReverseVEXT = true; 4001 } 4002 4003 if (M[i] < 0) continue; // ignore UNDEF indices 4004 if (ExpectedElt != static_cast<unsigned>(M[i])) 4005 return false; 4006 } 4007 4008 // Adjust the index value if the source operands will be swapped. 4009 if (ReverseVEXT) 4010 Imm -= NumElts; 4011 4012 return true; 4013} 4014 4015/// isVREVMask - Check if a vector shuffle corresponds to a VREV 4016/// instruction with the specified blocksize. (The order of the elements 4017/// within each block of the vector is reversed.) 4018static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 4019 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 4020 "Only possible block sizes for VREV are: 16, 32, 64"); 4021 4022 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4023 if (EltSz == 64) 4024 return false; 4025 4026 unsigned NumElts = VT.getVectorNumElements(); 4027 unsigned BlockElts = M[0] + 1; 4028 // If the first shuffle index is UNDEF, be optimistic. 4029 if (M[0] < 0) 4030 BlockElts = BlockSize / EltSz; 4031 4032 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 4033 return false; 4034 4035 for (unsigned i = 0; i < NumElts; ++i) { 4036 if (M[i] < 0) continue; // ignore UNDEF indices 4037 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 4038 return false; 4039 } 4040 4041 return true; 4042} 4043 4044static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 4045 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 4046 // range, then 0 is placed into the resulting vector. So pretty much any mask 4047 // of 8 elements can work here. 4048 return VT == MVT::v8i8 && M.size() == 8; 4049} 4050 4051static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4052 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4053 if (EltSz == 64) 4054 return false; 4055 4056 unsigned NumElts = VT.getVectorNumElements(); 4057 WhichResult = (M[0] == 0 ? 0 : 1); 4058 for (unsigned i = 0; i < NumElts; i += 2) { 4059 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4060 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 4061 return false; 4062 } 4063 return true; 4064} 4065 4066/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 4067/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4068/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 4069static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4070 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4071 if (EltSz == 64) 4072 return false; 4073 4074 unsigned NumElts = VT.getVectorNumElements(); 4075 WhichResult = (M[0] == 0 ? 0 : 1); 4076 for (unsigned i = 0; i < NumElts; i += 2) { 4077 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4078 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 4079 return false; 4080 } 4081 return true; 4082} 4083 4084static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4085 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4086 if (EltSz == 64) 4087 return false; 4088 4089 unsigned NumElts = VT.getVectorNumElements(); 4090 WhichResult = (M[0] == 0 ? 0 : 1); 4091 for (unsigned i = 0; i != NumElts; ++i) { 4092 if (M[i] < 0) continue; // ignore UNDEF indices 4093 if ((unsigned) M[i] != 2 * i + WhichResult) 4094 return false; 4095 } 4096 4097 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4098 if (VT.is64BitVector() && EltSz == 32) 4099 return false; 4100 4101 return true; 4102} 4103 4104/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 4105/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4106/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 4107static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4108 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4109 if (EltSz == 64) 4110 return false; 4111 4112 unsigned Half = VT.getVectorNumElements() / 2; 4113 WhichResult = (M[0] == 0 ? 0 : 1); 4114 for (unsigned j = 0; j != 2; ++j) { 4115 unsigned Idx = WhichResult; 4116 for (unsigned i = 0; i != Half; ++i) { 4117 int MIdx = M[i + j * Half]; 4118 if (MIdx >= 0 && (unsigned) MIdx != Idx) 4119 return false; 4120 Idx += 2; 4121 } 4122 } 4123 4124 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4125 if (VT.is64BitVector() && EltSz == 32) 4126 return false; 4127 4128 return true; 4129} 4130 4131static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4132 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4133 if (EltSz == 64) 4134 return false; 4135 4136 unsigned NumElts = VT.getVectorNumElements(); 4137 WhichResult = (M[0] == 0 ? 0 : 1); 4138 unsigned Idx = WhichResult * NumElts / 2; 4139 for (unsigned i = 0; i != NumElts; i += 2) { 4140 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4141 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 4142 return false; 4143 Idx += 1; 4144 } 4145 4146 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4147 if (VT.is64BitVector() && EltSz == 32) 4148 return false; 4149 4150 return true; 4151} 4152 4153/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 4154/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4155/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 4156static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4157 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4158 if (EltSz == 64) 4159 return false; 4160 4161 unsigned NumElts = VT.getVectorNumElements(); 4162 WhichResult = (M[0] == 0 ? 0 : 1); 4163 unsigned Idx = WhichResult * NumElts / 2; 4164 for (unsigned i = 0; i != NumElts; i += 2) { 4165 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4166 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 4167 return false; 4168 Idx += 1; 4169 } 4170 4171 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4172 if (VT.is64BitVector() && EltSz == 32) 4173 return false; 4174 4175 return true; 4176} 4177 4178// If N is an integer constant that can be moved into a register in one 4179// instruction, return an SDValue of such a constant (will become a MOV 4180// instruction). Otherwise return null. 4181static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 4182 const ARMSubtarget *ST, DebugLoc dl) { 4183 uint64_t Val; 4184 if (!isa<ConstantSDNode>(N)) 4185 return SDValue(); 4186 Val = cast<ConstantSDNode>(N)->getZExtValue(); 4187 4188 if (ST->isThumb1Only()) { 4189 if (Val <= 255 || ~Val <= 255) 4190 return DAG.getConstant(Val, MVT::i32); 4191 } else { 4192 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 4193 return DAG.getConstant(Val, MVT::i32); 4194 } 4195 return SDValue(); 4196} 4197 4198// If this is a case we can't handle, return null and let the default 4199// expansion code take care of it. 4200SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 4201 const ARMSubtarget *ST) const { 4202 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4203 DebugLoc dl = Op.getDebugLoc(); 4204 EVT VT = Op.getValueType(); 4205 4206 APInt SplatBits, SplatUndef; 4207 unsigned SplatBitSize; 4208 bool HasAnyUndefs; 4209 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4210 if (SplatBitSize <= 64) { 4211 // Check if an immediate VMOV works. 4212 EVT VmovVT; 4213 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4214 SplatUndef.getZExtValue(), SplatBitSize, 4215 DAG, VmovVT, VT.is128BitVector(), 4216 VMOVModImm); 4217 if (Val.getNode()) { 4218 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 4219 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4220 } 4221 4222 // Try an immediate VMVN. 4223 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 4224 Val = isNEONModifiedImm(NegatedImm, 4225 SplatUndef.getZExtValue(), SplatBitSize, 4226 DAG, VmovVT, VT.is128BitVector(), 4227 VMVNModImm); 4228 if (Val.getNode()) { 4229 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 4230 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4231 } 4232 4233 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 4234 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 4235 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 4236 if (ImmVal != -1) { 4237 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 4238 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 4239 } 4240 } 4241 } 4242 } 4243 4244 // Scan through the operands to see if only one value is used. 4245 // 4246 // As an optimisation, even if more than one value is used it may be more 4247 // profitable to splat with one value then change some lanes. 4248 // 4249 // Heuristically we decide to do this if the vector has a "dominant" value, 4250 // defined as splatted to more than half of the lanes. 4251 unsigned NumElts = VT.getVectorNumElements(); 4252 bool isOnlyLowElement = true; 4253 bool usesOnlyOneValue = true; 4254 bool hasDominantValue = false; 4255 bool isConstant = true; 4256 4257 // Map of the number of times a particular SDValue appears in the 4258 // element list. 4259 DenseMap<SDValue, unsigned> ValueCounts; 4260 SDValue Value; 4261 for (unsigned i = 0; i < NumElts; ++i) { 4262 SDValue V = Op.getOperand(i); 4263 if (V.getOpcode() == ISD::UNDEF) 4264 continue; 4265 if (i > 0) 4266 isOnlyLowElement = false; 4267 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4268 isConstant = false; 4269 4270 ValueCounts.insert(std::make_pair(V, 0)); 4271 unsigned &Count = ValueCounts[V]; 4272 4273 // Is this value dominant? (takes up more than half of the lanes) 4274 if (++Count > (NumElts / 2)) { 4275 hasDominantValue = true; 4276 Value = V; 4277 } 4278 } 4279 if (ValueCounts.size() != 1) 4280 usesOnlyOneValue = false; 4281 if (!Value.getNode() && ValueCounts.size() > 0) 4282 Value = ValueCounts.begin()->first; 4283 4284 if (ValueCounts.size() == 0) 4285 return DAG.getUNDEF(VT); 4286 4287 if (isOnlyLowElement) 4288 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 4289 4290 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4291 4292 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 4293 // i32 and try again. 4294 if (hasDominantValue && EltSize <= 32) { 4295 if (!isConstant) { 4296 SDValue N; 4297 4298 // If we are VDUPing a value that comes directly from a vector, that will 4299 // cause an unnecessary move to and from a GPR, where instead we could 4300 // just use VDUPLANE. 4301 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 4302 // We need to create a new undef vector to use for the VDUPLANE if the 4303 // size of the vector from which we get the value is different than the 4304 // size of the vector that we need to create. We will insert the element 4305 // such that the register coalescer will remove unnecessary copies. 4306 if (VT != Value->getOperand(0).getValueType()) { 4307 ConstantSDNode *constIndex; 4308 constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)); 4309 assert(constIndex && "The index is not a constant!"); 4310 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 4311 VT.getVectorNumElements(); 4312 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4313 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 4314 Value, DAG.getConstant(index, MVT::i32)), 4315 DAG.getConstant(index, MVT::i32)); 4316 } else { 4317 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4318 Value->getOperand(0), Value->getOperand(1)); 4319 } 4320 } 4321 else 4322 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 4323 4324 if (!usesOnlyOneValue) { 4325 // The dominant value was splatted as 'N', but we now have to insert 4326 // all differing elements. 4327 for (unsigned I = 0; I < NumElts; ++I) { 4328 if (Op.getOperand(I) == Value) 4329 continue; 4330 SmallVector<SDValue, 3> Ops; 4331 Ops.push_back(N); 4332 Ops.push_back(Op.getOperand(I)); 4333 Ops.push_back(DAG.getConstant(I, MVT::i32)); 4334 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, &Ops[0], 3); 4335 } 4336 } 4337 return N; 4338 } 4339 if (VT.getVectorElementType().isFloatingPoint()) { 4340 SmallVector<SDValue, 8> Ops; 4341 for (unsigned i = 0; i < NumElts; ++i) 4342 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4343 Op.getOperand(i))); 4344 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 4345 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 4346 Val = LowerBUILD_VECTOR(Val, DAG, ST); 4347 if (Val.getNode()) 4348 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4349 } 4350 if (usesOnlyOneValue) { 4351 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 4352 if (isConstant && Val.getNode()) 4353 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4354 } 4355 } 4356 4357 // If all elements are constants and the case above didn't get hit, fall back 4358 // to the default expansion, which will generate a load from the constant 4359 // pool. 4360 if (isConstant) 4361 return SDValue(); 4362 4363 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4364 if (NumElts >= 4) { 4365 SDValue shuffle = ReconstructShuffle(Op, DAG); 4366 if (shuffle != SDValue()) 4367 return shuffle; 4368 } 4369 4370 // Vectors with 32- or 64-bit elements can be built by directly assigning 4371 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4372 // will be legalized. 4373 if (EltSize >= 32) { 4374 // Do the expansion with floating-point types, since that is what the VFP 4375 // registers are defined to use, and since i64 is not legal. 4376 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4377 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4378 SmallVector<SDValue, 8> Ops; 4379 for (unsigned i = 0; i < NumElts; ++i) 4380 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4381 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4382 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4383 } 4384 4385 return SDValue(); 4386} 4387 4388// Gather data to see if the operation can be modelled as a 4389// shuffle in combination with VEXTs. 4390SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4391 SelectionDAG &DAG) const { 4392 DebugLoc dl = Op.getDebugLoc(); 4393 EVT VT = Op.getValueType(); 4394 unsigned NumElts = VT.getVectorNumElements(); 4395 4396 SmallVector<SDValue, 2> SourceVecs; 4397 SmallVector<unsigned, 2> MinElts; 4398 SmallVector<unsigned, 2> MaxElts; 4399 4400 for (unsigned i = 0; i < NumElts; ++i) { 4401 SDValue V = Op.getOperand(i); 4402 if (V.getOpcode() == ISD::UNDEF) 4403 continue; 4404 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4405 // A shuffle can only come from building a vector from various 4406 // elements of other vectors. 4407 return SDValue(); 4408 } else if (V.getOperand(0).getValueType().getVectorElementType() != 4409 VT.getVectorElementType()) { 4410 // This code doesn't know how to handle shuffles where the vector 4411 // element types do not match (this happens because type legalization 4412 // promotes the return type of EXTRACT_VECTOR_ELT). 4413 // FIXME: It might be appropriate to extend this code to handle 4414 // mismatched types. 4415 return SDValue(); 4416 } 4417 4418 // Record this extraction against the appropriate vector if possible... 4419 SDValue SourceVec = V.getOperand(0); 4420 // If the element number isn't a constant, we can't effectively 4421 // analyze what's going on. 4422 if (!isa<ConstantSDNode>(V.getOperand(1))) 4423 return SDValue(); 4424 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4425 bool FoundSource = false; 4426 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4427 if (SourceVecs[j] == SourceVec) { 4428 if (MinElts[j] > EltNo) 4429 MinElts[j] = EltNo; 4430 if (MaxElts[j] < EltNo) 4431 MaxElts[j] = EltNo; 4432 FoundSource = true; 4433 break; 4434 } 4435 } 4436 4437 // Or record a new source if not... 4438 if (!FoundSource) { 4439 SourceVecs.push_back(SourceVec); 4440 MinElts.push_back(EltNo); 4441 MaxElts.push_back(EltNo); 4442 } 4443 } 4444 4445 // Currently only do something sane when at most two source vectors 4446 // involved. 4447 if (SourceVecs.size() > 2) 4448 return SDValue(); 4449 4450 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4451 int VEXTOffsets[2] = {0, 0}; 4452 4453 // This loop extracts the usage patterns of the source vectors 4454 // and prepares appropriate SDValues for a shuffle if possible. 4455 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4456 if (SourceVecs[i].getValueType() == VT) { 4457 // No VEXT necessary 4458 ShuffleSrcs[i] = SourceVecs[i]; 4459 VEXTOffsets[i] = 0; 4460 continue; 4461 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4462 // It probably isn't worth padding out a smaller vector just to 4463 // break it down again in a shuffle. 4464 return SDValue(); 4465 } 4466 4467 // Since only 64-bit and 128-bit vectors are legal on ARM and 4468 // we've eliminated the other cases... 4469 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4470 "unexpected vector sizes in ReconstructShuffle"); 4471 4472 if (MaxElts[i] - MinElts[i] >= NumElts) { 4473 // Span too large for a VEXT to cope 4474 return SDValue(); 4475 } 4476 4477 if (MinElts[i] >= NumElts) { 4478 // The extraction can just take the second half 4479 VEXTOffsets[i] = NumElts; 4480 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4481 SourceVecs[i], 4482 DAG.getIntPtrConstant(NumElts)); 4483 } else if (MaxElts[i] < NumElts) { 4484 // The extraction can just take the first half 4485 VEXTOffsets[i] = 0; 4486 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4487 SourceVecs[i], 4488 DAG.getIntPtrConstant(0)); 4489 } else { 4490 // An actual VEXT is needed 4491 VEXTOffsets[i] = MinElts[i]; 4492 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4493 SourceVecs[i], 4494 DAG.getIntPtrConstant(0)); 4495 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4496 SourceVecs[i], 4497 DAG.getIntPtrConstant(NumElts)); 4498 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4499 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4500 } 4501 } 4502 4503 SmallVector<int, 8> Mask; 4504 4505 for (unsigned i = 0; i < NumElts; ++i) { 4506 SDValue Entry = Op.getOperand(i); 4507 if (Entry.getOpcode() == ISD::UNDEF) { 4508 Mask.push_back(-1); 4509 continue; 4510 } 4511 4512 SDValue ExtractVec = Entry.getOperand(0); 4513 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4514 .getOperand(1))->getSExtValue(); 4515 if (ExtractVec == SourceVecs[0]) { 4516 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4517 } else { 4518 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4519 } 4520 } 4521 4522 // Final check before we try to produce nonsense... 4523 if (isShuffleMaskLegal(Mask, VT)) 4524 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4525 &Mask[0]); 4526 4527 return SDValue(); 4528} 4529 4530/// isShuffleMaskLegal - Targets can use this to indicate that they only 4531/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4532/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4533/// are assumed to be legal. 4534bool 4535ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4536 EVT VT) const { 4537 if (VT.getVectorNumElements() == 4 && 4538 (VT.is128BitVector() || VT.is64BitVector())) { 4539 unsigned PFIndexes[4]; 4540 for (unsigned i = 0; i != 4; ++i) { 4541 if (M[i] < 0) 4542 PFIndexes[i] = 8; 4543 else 4544 PFIndexes[i] = M[i]; 4545 } 4546 4547 // Compute the index in the perfect shuffle table. 4548 unsigned PFTableIndex = 4549 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4550 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4551 unsigned Cost = (PFEntry >> 30); 4552 4553 if (Cost <= 4) 4554 return true; 4555 } 4556 4557 bool ReverseVEXT; 4558 unsigned Imm, WhichResult; 4559 4560 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4561 return (EltSize >= 32 || 4562 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4563 isVREVMask(M, VT, 64) || 4564 isVREVMask(M, VT, 32) || 4565 isVREVMask(M, VT, 16) || 4566 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4567 isVTBLMask(M, VT) || 4568 isVTRNMask(M, VT, WhichResult) || 4569 isVUZPMask(M, VT, WhichResult) || 4570 isVZIPMask(M, VT, WhichResult) || 4571 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4572 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4573 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4574} 4575 4576/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4577/// the specified operations to build the shuffle. 4578static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4579 SDValue RHS, SelectionDAG &DAG, 4580 DebugLoc dl) { 4581 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4582 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4583 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4584 4585 enum { 4586 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4587 OP_VREV, 4588 OP_VDUP0, 4589 OP_VDUP1, 4590 OP_VDUP2, 4591 OP_VDUP3, 4592 OP_VEXT1, 4593 OP_VEXT2, 4594 OP_VEXT3, 4595 OP_VUZPL, // VUZP, left result 4596 OP_VUZPR, // VUZP, right result 4597 OP_VZIPL, // VZIP, left result 4598 OP_VZIPR, // VZIP, right result 4599 OP_VTRNL, // VTRN, left result 4600 OP_VTRNR // VTRN, right result 4601 }; 4602 4603 if (OpNum == OP_COPY) { 4604 if (LHSID == (1*9+2)*9+3) return LHS; 4605 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4606 return RHS; 4607 } 4608 4609 SDValue OpLHS, OpRHS; 4610 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4611 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4612 EVT VT = OpLHS.getValueType(); 4613 4614 switch (OpNum) { 4615 default: llvm_unreachable("Unknown shuffle opcode!"); 4616 case OP_VREV: 4617 // VREV divides the vector in half and swaps within the half. 4618 if (VT.getVectorElementType() == MVT::i32 || 4619 VT.getVectorElementType() == MVT::f32) 4620 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4621 // vrev <4 x i16> -> VREV32 4622 if (VT.getVectorElementType() == MVT::i16) 4623 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4624 // vrev <4 x i8> -> VREV16 4625 assert(VT.getVectorElementType() == MVT::i8); 4626 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4627 case OP_VDUP0: 4628 case OP_VDUP1: 4629 case OP_VDUP2: 4630 case OP_VDUP3: 4631 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4632 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4633 case OP_VEXT1: 4634 case OP_VEXT2: 4635 case OP_VEXT3: 4636 return DAG.getNode(ARMISD::VEXT, dl, VT, 4637 OpLHS, OpRHS, 4638 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4639 case OP_VUZPL: 4640 case OP_VUZPR: 4641 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4642 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4643 case OP_VZIPL: 4644 case OP_VZIPR: 4645 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4646 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4647 case OP_VTRNL: 4648 case OP_VTRNR: 4649 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4650 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4651 } 4652} 4653 4654static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4655 ArrayRef<int> ShuffleMask, 4656 SelectionDAG &DAG) { 4657 // Check to see if we can use the VTBL instruction. 4658 SDValue V1 = Op.getOperand(0); 4659 SDValue V2 = Op.getOperand(1); 4660 DebugLoc DL = Op.getDebugLoc(); 4661 4662 SmallVector<SDValue, 8> VTBLMask; 4663 for (ArrayRef<int>::iterator 4664 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4665 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4666 4667 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4668 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4669 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4670 &VTBLMask[0], 8)); 4671 4672 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4673 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4674 &VTBLMask[0], 8)); 4675} 4676 4677static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4678 SDValue V1 = Op.getOperand(0); 4679 SDValue V2 = Op.getOperand(1); 4680 DebugLoc dl = Op.getDebugLoc(); 4681 EVT VT = Op.getValueType(); 4682 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4683 4684 // Convert shuffles that are directly supported on NEON to target-specific 4685 // DAG nodes, instead of keeping them as shuffles and matching them again 4686 // during code selection. This is more efficient and avoids the possibility 4687 // of inconsistencies between legalization and selection. 4688 // FIXME: floating-point vectors should be canonicalized to integer vectors 4689 // of the same time so that they get CSEd properly. 4690 ArrayRef<int> ShuffleMask = SVN->getMask(); 4691 4692 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4693 if (EltSize <= 32) { 4694 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4695 int Lane = SVN->getSplatIndex(); 4696 // If this is undef splat, generate it via "just" vdup, if possible. 4697 if (Lane == -1) Lane = 0; 4698 4699 // Test if V1 is a SCALAR_TO_VECTOR. 4700 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4701 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4702 } 4703 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 4704 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 4705 // reaches it). 4706 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 4707 !isa<ConstantSDNode>(V1.getOperand(0))) { 4708 bool IsScalarToVector = true; 4709 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 4710 if (V1.getOperand(i).getOpcode() != ISD::UNDEF) { 4711 IsScalarToVector = false; 4712 break; 4713 } 4714 if (IsScalarToVector) 4715 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4716 } 4717 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4718 DAG.getConstant(Lane, MVT::i32)); 4719 } 4720 4721 bool ReverseVEXT; 4722 unsigned Imm; 4723 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4724 if (ReverseVEXT) 4725 std::swap(V1, V2); 4726 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4727 DAG.getConstant(Imm, MVT::i32)); 4728 } 4729 4730 if (isVREVMask(ShuffleMask, VT, 64)) 4731 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4732 if (isVREVMask(ShuffleMask, VT, 32)) 4733 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4734 if (isVREVMask(ShuffleMask, VT, 16)) 4735 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4736 4737 if (V2->getOpcode() == ISD::UNDEF && 4738 isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 4739 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 4740 DAG.getConstant(Imm, MVT::i32)); 4741 } 4742 4743 // Check for Neon shuffles that modify both input vectors in place. 4744 // If both results are used, i.e., if there are two shuffles with the same 4745 // source operands and with masks corresponding to both results of one of 4746 // these operations, DAG memoization will ensure that a single node is 4747 // used for both shuffles. 4748 unsigned WhichResult; 4749 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4750 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4751 V1, V2).getValue(WhichResult); 4752 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4753 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4754 V1, V2).getValue(WhichResult); 4755 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4756 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4757 V1, V2).getValue(WhichResult); 4758 4759 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4760 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4761 V1, V1).getValue(WhichResult); 4762 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4763 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4764 V1, V1).getValue(WhichResult); 4765 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4766 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4767 V1, V1).getValue(WhichResult); 4768 } 4769 4770 // If the shuffle is not directly supported and it has 4 elements, use 4771 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4772 unsigned NumElts = VT.getVectorNumElements(); 4773 if (NumElts == 4) { 4774 unsigned PFIndexes[4]; 4775 for (unsigned i = 0; i != 4; ++i) { 4776 if (ShuffleMask[i] < 0) 4777 PFIndexes[i] = 8; 4778 else 4779 PFIndexes[i] = ShuffleMask[i]; 4780 } 4781 4782 // Compute the index in the perfect shuffle table. 4783 unsigned PFTableIndex = 4784 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4785 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4786 unsigned Cost = (PFEntry >> 30); 4787 4788 if (Cost <= 4) 4789 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4790 } 4791 4792 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4793 if (EltSize >= 32) { 4794 // Do the expansion with floating-point types, since that is what the VFP 4795 // registers are defined to use, and since i64 is not legal. 4796 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4797 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4798 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4799 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4800 SmallVector<SDValue, 8> Ops; 4801 for (unsigned i = 0; i < NumElts; ++i) { 4802 if (ShuffleMask[i] < 0) 4803 Ops.push_back(DAG.getUNDEF(EltVT)); 4804 else 4805 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4806 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4807 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4808 MVT::i32))); 4809 } 4810 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4811 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4812 } 4813 4814 if (VT == MVT::v8i8) { 4815 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4816 if (NewOp.getNode()) 4817 return NewOp; 4818 } 4819 4820 return SDValue(); 4821} 4822 4823static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4824 // INSERT_VECTOR_ELT is legal only for immediate indexes. 4825 SDValue Lane = Op.getOperand(2); 4826 if (!isa<ConstantSDNode>(Lane)) 4827 return SDValue(); 4828 4829 return Op; 4830} 4831 4832static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4833 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4834 SDValue Lane = Op.getOperand(1); 4835 if (!isa<ConstantSDNode>(Lane)) 4836 return SDValue(); 4837 4838 SDValue Vec = Op.getOperand(0); 4839 if (Op.getValueType() == MVT::i32 && 4840 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4841 DebugLoc dl = Op.getDebugLoc(); 4842 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4843 } 4844 4845 return Op; 4846} 4847 4848static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4849 // The only time a CONCAT_VECTORS operation can have legal types is when 4850 // two 64-bit vectors are concatenated to a 128-bit vector. 4851 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4852 "unexpected CONCAT_VECTORS"); 4853 DebugLoc dl = Op.getDebugLoc(); 4854 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4855 SDValue Op0 = Op.getOperand(0); 4856 SDValue Op1 = Op.getOperand(1); 4857 if (Op0.getOpcode() != ISD::UNDEF) 4858 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4859 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4860 DAG.getIntPtrConstant(0)); 4861 if (Op1.getOpcode() != ISD::UNDEF) 4862 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4863 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4864 DAG.getIntPtrConstant(1)); 4865 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4866} 4867 4868/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4869/// element has been zero/sign-extended, depending on the isSigned parameter, 4870/// from an integer type half its size. 4871static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4872 bool isSigned) { 4873 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4874 EVT VT = N->getValueType(0); 4875 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4876 SDNode *BVN = N->getOperand(0).getNode(); 4877 if (BVN->getValueType(0) != MVT::v4i32 || 4878 BVN->getOpcode() != ISD::BUILD_VECTOR) 4879 return false; 4880 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4881 unsigned HiElt = 1 - LoElt; 4882 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4883 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4884 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4885 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4886 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4887 return false; 4888 if (isSigned) { 4889 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4890 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4891 return true; 4892 } else { 4893 if (Hi0->isNullValue() && Hi1->isNullValue()) 4894 return true; 4895 } 4896 return false; 4897 } 4898 4899 if (N->getOpcode() != ISD::BUILD_VECTOR) 4900 return false; 4901 4902 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4903 SDNode *Elt = N->getOperand(i).getNode(); 4904 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4905 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4906 unsigned HalfSize = EltSize / 2; 4907 if (isSigned) { 4908 if (!isIntN(HalfSize, C->getSExtValue())) 4909 return false; 4910 } else { 4911 if (!isUIntN(HalfSize, C->getZExtValue())) 4912 return false; 4913 } 4914 continue; 4915 } 4916 return false; 4917 } 4918 4919 return true; 4920} 4921 4922/// isSignExtended - Check if a node is a vector value that is sign-extended 4923/// or a constant BUILD_VECTOR with sign-extended elements. 4924static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4925 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4926 return true; 4927 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4928 return true; 4929 return false; 4930} 4931 4932/// isZeroExtended - Check if a node is a vector value that is zero-extended 4933/// or a constant BUILD_VECTOR with zero-extended elements. 4934static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4935 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4936 return true; 4937 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4938 return true; 4939 return false; 4940} 4941 4942/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4943/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4944static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4945 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4946 return N->getOperand(0); 4947 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4948 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4949 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4950 LD->isNonTemporal(), LD->isInvariant(), 4951 LD->getAlignment()); 4952 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4953 // have been legalized as a BITCAST from v4i32. 4954 if (N->getOpcode() == ISD::BITCAST) { 4955 SDNode *BVN = N->getOperand(0).getNode(); 4956 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4957 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4958 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4959 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4960 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4961 } 4962 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4963 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4964 EVT VT = N->getValueType(0); 4965 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4966 unsigned NumElts = VT.getVectorNumElements(); 4967 MVT TruncVT = MVT::getIntegerVT(EltSize); 4968 SmallVector<SDValue, 8> Ops; 4969 for (unsigned i = 0; i != NumElts; ++i) { 4970 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4971 const APInt &CInt = C->getAPIntValue(); 4972 // Element types smaller than 32 bits are not legal, so use i32 elements. 4973 // The values are implicitly truncated so sext vs. zext doesn't matter. 4974 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32)); 4975 } 4976 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4977 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4978} 4979 4980static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4981 unsigned Opcode = N->getOpcode(); 4982 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4983 SDNode *N0 = N->getOperand(0).getNode(); 4984 SDNode *N1 = N->getOperand(1).getNode(); 4985 return N0->hasOneUse() && N1->hasOneUse() && 4986 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4987 } 4988 return false; 4989} 4990 4991static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4992 unsigned Opcode = N->getOpcode(); 4993 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4994 SDNode *N0 = N->getOperand(0).getNode(); 4995 SDNode *N1 = N->getOperand(1).getNode(); 4996 return N0->hasOneUse() && N1->hasOneUse() && 4997 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4998 } 4999 return false; 5000} 5001 5002static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 5003 // Multiplications are only custom-lowered for 128-bit vectors so that 5004 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 5005 EVT VT = Op.getValueType(); 5006 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 5007 SDNode *N0 = Op.getOperand(0).getNode(); 5008 SDNode *N1 = Op.getOperand(1).getNode(); 5009 unsigned NewOpc = 0; 5010 bool isMLA = false; 5011 bool isN0SExt = isSignExtended(N0, DAG); 5012 bool isN1SExt = isSignExtended(N1, DAG); 5013 if (isN0SExt && isN1SExt) 5014 NewOpc = ARMISD::VMULLs; 5015 else { 5016 bool isN0ZExt = isZeroExtended(N0, DAG); 5017 bool isN1ZExt = isZeroExtended(N1, DAG); 5018 if (isN0ZExt && isN1ZExt) 5019 NewOpc = ARMISD::VMULLu; 5020 else if (isN1SExt || isN1ZExt) { 5021 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 5022 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 5023 if (isN1SExt && isAddSubSExt(N0, DAG)) { 5024 NewOpc = ARMISD::VMULLs; 5025 isMLA = true; 5026 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 5027 NewOpc = ARMISD::VMULLu; 5028 isMLA = true; 5029 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 5030 std::swap(N0, N1); 5031 NewOpc = ARMISD::VMULLu; 5032 isMLA = true; 5033 } 5034 } 5035 5036 if (!NewOpc) { 5037 if (VT == MVT::v2i64) 5038 // Fall through to expand this. It is not legal. 5039 return SDValue(); 5040 else 5041 // Other vector multiplications are legal. 5042 return Op; 5043 } 5044 } 5045 5046 // Legalize to a VMULL instruction. 5047 DebugLoc DL = Op.getDebugLoc(); 5048 SDValue Op0; 5049 SDValue Op1 = SkipExtension(N1, DAG); 5050 if (!isMLA) { 5051 Op0 = SkipExtension(N0, DAG); 5052 assert(Op0.getValueType().is64BitVector() && 5053 Op1.getValueType().is64BitVector() && 5054 "unexpected types for extended operands to VMULL"); 5055 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 5056 } 5057 5058 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 5059 // isel lowering to take advantage of no-stall back to back vmul + vmla. 5060 // vmull q0, d4, d6 5061 // vmlal q0, d5, d6 5062 // is faster than 5063 // vaddl q0, d4, d5 5064 // vmovl q1, d6 5065 // vmul q0, q0, q1 5066 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 5067 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 5068 EVT Op1VT = Op1.getValueType(); 5069 return DAG.getNode(N0->getOpcode(), DL, VT, 5070 DAG.getNode(NewOpc, DL, VT, 5071 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 5072 DAG.getNode(NewOpc, DL, VT, 5073 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 5074} 5075 5076static SDValue 5077LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 5078 // Convert to float 5079 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 5080 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 5081 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 5082 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 5083 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 5084 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 5085 // Get reciprocal estimate. 5086 // float4 recip = vrecpeq_f32(yf); 5087 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5088 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 5089 // Because char has a smaller range than uchar, we can actually get away 5090 // without any newton steps. This requires that we use a weird bias 5091 // of 0xb000, however (again, this has been exhaustively tested). 5092 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 5093 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 5094 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 5095 Y = DAG.getConstant(0xb000, MVT::i32); 5096 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 5097 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 5098 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 5099 // Convert back to short. 5100 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 5101 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 5102 return X; 5103} 5104 5105static SDValue 5106LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 5107 SDValue N2; 5108 // Convert to float. 5109 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 5110 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 5111 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 5112 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 5113 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5114 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5115 5116 // Use reciprocal estimate and one refinement step. 5117 // float4 recip = vrecpeq_f32(yf); 5118 // recip *= vrecpsq_f32(yf, recip); 5119 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5120 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 5121 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5122 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5123 N1, N2); 5124 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5125 // Because short has a smaller range than ushort, we can actually get away 5126 // with only a single newton step. This requires that we use a weird bias 5127 // of 89, however (again, this has been exhaustively tested). 5128 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 5129 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5130 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5131 N1 = DAG.getConstant(0x89, MVT::i32); 5132 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5133 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5134 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5135 // Convert back to integer and return. 5136 // return vmovn_s32(vcvt_s32_f32(result)); 5137 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5138 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5139 return N0; 5140} 5141 5142static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 5143 EVT VT = Op.getValueType(); 5144 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5145 "unexpected type for custom-lowering ISD::SDIV"); 5146 5147 DebugLoc dl = Op.getDebugLoc(); 5148 SDValue N0 = Op.getOperand(0); 5149 SDValue N1 = Op.getOperand(1); 5150 SDValue N2, N3; 5151 5152 if (VT == MVT::v8i8) { 5153 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 5154 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 5155 5156 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5157 DAG.getIntPtrConstant(4)); 5158 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5159 DAG.getIntPtrConstant(4)); 5160 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5161 DAG.getIntPtrConstant(0)); 5162 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5163 DAG.getIntPtrConstant(0)); 5164 5165 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 5166 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 5167 5168 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5169 N0 = LowerCONCAT_VECTORS(N0, DAG); 5170 5171 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 5172 return N0; 5173 } 5174 return LowerSDIV_v4i16(N0, N1, dl, DAG); 5175} 5176 5177static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 5178 EVT VT = Op.getValueType(); 5179 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5180 "unexpected type for custom-lowering ISD::UDIV"); 5181 5182 DebugLoc dl = Op.getDebugLoc(); 5183 SDValue N0 = Op.getOperand(0); 5184 SDValue N1 = Op.getOperand(1); 5185 SDValue N2, N3; 5186 5187 if (VT == MVT::v8i8) { 5188 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 5189 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 5190 5191 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5192 DAG.getIntPtrConstant(4)); 5193 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5194 DAG.getIntPtrConstant(4)); 5195 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5196 DAG.getIntPtrConstant(0)); 5197 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5198 DAG.getIntPtrConstant(0)); 5199 5200 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 5201 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 5202 5203 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5204 N0 = LowerCONCAT_VECTORS(N0, DAG); 5205 5206 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 5207 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 5208 N0); 5209 return N0; 5210 } 5211 5212 // v4i16 sdiv ... Convert to float. 5213 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 5214 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 5215 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 5216 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 5217 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5218 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5219 5220 // Use reciprocal estimate and two refinement steps. 5221 // float4 recip = vrecpeq_f32(yf); 5222 // recip *= vrecpsq_f32(yf, recip); 5223 // recip *= vrecpsq_f32(yf, recip); 5224 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5225 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 5226 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5227 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5228 BN1, N2); 5229 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5230 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5231 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5232 BN1, N2); 5233 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5234 // Simply multiplying by the reciprocal estimate can leave us a few ulps 5235 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 5236 // and that it will never cause us to return an answer too large). 5237 // float4 result = as_float4(as_int4(xf*recip) + 2); 5238 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5239 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5240 N1 = DAG.getConstant(2, MVT::i32); 5241 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5242 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5243 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5244 // Convert back to integer and return. 5245 // return vmovn_u32(vcvt_s32_f32(result)); 5246 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5247 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5248 return N0; 5249} 5250 5251static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 5252 EVT VT = Op.getNode()->getValueType(0); 5253 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 5254 5255 unsigned Opc; 5256 bool ExtraOp = false; 5257 switch (Op.getOpcode()) { 5258 default: llvm_unreachable("Invalid code"); 5259 case ISD::ADDC: Opc = ARMISD::ADDC; break; 5260 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 5261 case ISD::SUBC: Opc = ARMISD::SUBC; break; 5262 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 5263 } 5264 5265 if (!ExtraOp) 5266 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 5267 Op.getOperand(1)); 5268 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 5269 Op.getOperand(1), Op.getOperand(2)); 5270} 5271 5272static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 5273 // Monotonic load/store is legal for all targets 5274 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 5275 return Op; 5276 5277 // Aquire/Release load/store is not legal for targets without a 5278 // dmb or equivalent available. 5279 return SDValue(); 5280} 5281 5282 5283static void 5284ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 5285 SelectionDAG &DAG, unsigned NewOp) { 5286 DebugLoc dl = Node->getDebugLoc(); 5287 assert (Node->getValueType(0) == MVT::i64 && 5288 "Only know how to expand i64 atomics"); 5289 5290 SmallVector<SDValue, 6> Ops; 5291 Ops.push_back(Node->getOperand(0)); // Chain 5292 Ops.push_back(Node->getOperand(1)); // Ptr 5293 // Low part of Val1 5294 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5295 Node->getOperand(2), DAG.getIntPtrConstant(0))); 5296 // High part of Val1 5297 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5298 Node->getOperand(2), DAG.getIntPtrConstant(1))); 5299 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 5300 // High part of Val1 5301 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5302 Node->getOperand(3), DAG.getIntPtrConstant(0))); 5303 // High part of Val2 5304 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5305 Node->getOperand(3), DAG.getIntPtrConstant(1))); 5306 } 5307 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 5308 SDValue Result = 5309 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 5310 cast<MemSDNode>(Node)->getMemOperand()); 5311 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 5312 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 5313 Results.push_back(Result.getValue(2)); 5314} 5315 5316SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5317 switch (Op.getOpcode()) { 5318 default: llvm_unreachable("Don't know how to custom lower this!"); 5319 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5320 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5321 case ISD::GlobalAddress: 5322 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 5323 LowerGlobalAddressELF(Op, DAG); 5324 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5325 case ISD::SELECT: return LowerSELECT(Op, DAG); 5326 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5327 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 5328 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 5329 case ISD::VASTART: return LowerVASTART(Op, DAG); 5330 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 5331 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 5332 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 5333 case ISD::SINT_TO_FP: 5334 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5335 case ISD::FP_TO_SINT: 5336 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 5337 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5338 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5339 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5340 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 5341 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 5342 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 5343 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 5344 Subtarget); 5345 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 5346 case ISD::SHL: 5347 case ISD::SRL: 5348 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 5349 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 5350 case ISD::SRL_PARTS: 5351 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 5352 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 5353 case ISD::SETCC: return LowerVSETCC(Op, DAG); 5354 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 5355 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 5356 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5357 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5358 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5359 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 5360 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5361 case ISD::MUL: return LowerMUL(Op, DAG); 5362 case ISD::SDIV: return LowerSDIV(Op, DAG); 5363 case ISD::UDIV: return LowerUDIV(Op, DAG); 5364 case ISD::ADDC: 5365 case ISD::ADDE: 5366 case ISD::SUBC: 5367 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 5368 case ISD::ATOMIC_LOAD: 5369 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 5370 } 5371} 5372 5373/// ReplaceNodeResults - Replace the results of node with an illegal result 5374/// type with new values built out of custom code. 5375void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 5376 SmallVectorImpl<SDValue>&Results, 5377 SelectionDAG &DAG) const { 5378 SDValue Res; 5379 switch (N->getOpcode()) { 5380 default: 5381 llvm_unreachable("Don't know how to custom expand this!"); 5382 case ISD::BITCAST: 5383 Res = ExpandBITCAST(N, DAG); 5384 break; 5385 case ISD::SRL: 5386 case ISD::SRA: 5387 Res = Expand64BitShift(N, DAG, Subtarget); 5388 break; 5389 case ISD::ATOMIC_LOAD_ADD: 5390 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 5391 return; 5392 case ISD::ATOMIC_LOAD_AND: 5393 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 5394 return; 5395 case ISD::ATOMIC_LOAD_NAND: 5396 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 5397 return; 5398 case ISD::ATOMIC_LOAD_OR: 5399 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 5400 return; 5401 case ISD::ATOMIC_LOAD_SUB: 5402 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 5403 return; 5404 case ISD::ATOMIC_LOAD_XOR: 5405 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5406 return; 5407 case ISD::ATOMIC_SWAP: 5408 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5409 return; 5410 case ISD::ATOMIC_CMP_SWAP: 5411 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5412 return; 5413 case ISD::ATOMIC_LOAD_MIN: 5414 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMMIN64_DAG); 5415 return; 5416 case ISD::ATOMIC_LOAD_UMIN: 5417 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMUMIN64_DAG); 5418 return; 5419 case ISD::ATOMIC_LOAD_MAX: 5420 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMMAX64_DAG); 5421 return; 5422 case ISD::ATOMIC_LOAD_UMAX: 5423 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMUMAX64_DAG); 5424 return; 5425 } 5426 if (Res.getNode()) 5427 Results.push_back(Res); 5428} 5429 5430//===----------------------------------------------------------------------===// 5431// ARM Scheduler Hooks 5432//===----------------------------------------------------------------------===// 5433 5434MachineBasicBlock * 5435ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5436 MachineBasicBlock *BB, 5437 unsigned Size) const { 5438 unsigned dest = MI->getOperand(0).getReg(); 5439 unsigned ptr = MI->getOperand(1).getReg(); 5440 unsigned oldval = MI->getOperand(2).getReg(); 5441 unsigned newval = MI->getOperand(3).getReg(); 5442 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5443 DebugLoc dl = MI->getDebugLoc(); 5444 bool isThumb2 = Subtarget->isThumb2(); 5445 5446 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5447 unsigned scratch = MRI.createVirtualRegister(isThumb2 ? 5448 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5449 (const TargetRegisterClass*)&ARM::GPRRegClass); 5450 5451 if (isThumb2) { 5452 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5453 MRI.constrainRegClass(oldval, &ARM::rGPRRegClass); 5454 MRI.constrainRegClass(newval, &ARM::rGPRRegClass); 5455 } 5456 5457 unsigned ldrOpc, strOpc; 5458 switch (Size) { 5459 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5460 case 1: 5461 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5462 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5463 break; 5464 case 2: 5465 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5466 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5467 break; 5468 case 4: 5469 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5470 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5471 break; 5472 } 5473 5474 MachineFunction *MF = BB->getParent(); 5475 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5476 MachineFunction::iterator It = BB; 5477 ++It; // insert the new blocks after the current block 5478 5479 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5480 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5481 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5482 MF->insert(It, loop1MBB); 5483 MF->insert(It, loop2MBB); 5484 MF->insert(It, exitMBB); 5485 5486 // Transfer the remainder of BB and its successor edges to exitMBB. 5487 exitMBB->splice(exitMBB->begin(), BB, 5488 llvm::next(MachineBasicBlock::iterator(MI)), 5489 BB->end()); 5490 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5491 5492 // thisMBB: 5493 // ... 5494 // fallthrough --> loop1MBB 5495 BB->addSuccessor(loop1MBB); 5496 5497 // loop1MBB: 5498 // ldrex dest, [ptr] 5499 // cmp dest, oldval 5500 // bne exitMBB 5501 BB = loop1MBB; 5502 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5503 if (ldrOpc == ARM::t2LDREX) 5504 MIB.addImm(0); 5505 AddDefaultPred(MIB); 5506 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5507 .addReg(dest).addReg(oldval)); 5508 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5509 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5510 BB->addSuccessor(loop2MBB); 5511 BB->addSuccessor(exitMBB); 5512 5513 // loop2MBB: 5514 // strex scratch, newval, [ptr] 5515 // cmp scratch, #0 5516 // bne loop1MBB 5517 BB = loop2MBB; 5518 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5519 if (strOpc == ARM::t2STREX) 5520 MIB.addImm(0); 5521 AddDefaultPred(MIB); 5522 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5523 .addReg(scratch).addImm(0)); 5524 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5525 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5526 BB->addSuccessor(loop1MBB); 5527 BB->addSuccessor(exitMBB); 5528 5529 // exitMBB: 5530 // ... 5531 BB = exitMBB; 5532 5533 MI->eraseFromParent(); // The instruction is gone now. 5534 5535 return BB; 5536} 5537 5538MachineBasicBlock * 5539ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5540 unsigned Size, unsigned BinOpcode) const { 5541 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5542 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5543 5544 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5545 MachineFunction *MF = BB->getParent(); 5546 MachineFunction::iterator It = BB; 5547 ++It; 5548 5549 unsigned dest = MI->getOperand(0).getReg(); 5550 unsigned ptr = MI->getOperand(1).getReg(); 5551 unsigned incr = MI->getOperand(2).getReg(); 5552 DebugLoc dl = MI->getDebugLoc(); 5553 bool isThumb2 = Subtarget->isThumb2(); 5554 5555 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5556 if (isThumb2) { 5557 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5558 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 5559 } 5560 5561 unsigned ldrOpc, strOpc; 5562 switch (Size) { 5563 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5564 case 1: 5565 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5566 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5567 break; 5568 case 2: 5569 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5570 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5571 break; 5572 case 4: 5573 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5574 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5575 break; 5576 } 5577 5578 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5579 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5580 MF->insert(It, loopMBB); 5581 MF->insert(It, exitMBB); 5582 5583 // Transfer the remainder of BB and its successor edges to exitMBB. 5584 exitMBB->splice(exitMBB->begin(), BB, 5585 llvm::next(MachineBasicBlock::iterator(MI)), 5586 BB->end()); 5587 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5588 5589 const TargetRegisterClass *TRC = isThumb2 ? 5590 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5591 (const TargetRegisterClass*)&ARM::GPRRegClass; 5592 unsigned scratch = MRI.createVirtualRegister(TRC); 5593 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5594 5595 // thisMBB: 5596 // ... 5597 // fallthrough --> loopMBB 5598 BB->addSuccessor(loopMBB); 5599 5600 // loopMBB: 5601 // ldrex dest, ptr 5602 // <binop> scratch2, dest, incr 5603 // strex scratch, scratch2, ptr 5604 // cmp scratch, #0 5605 // bne- loopMBB 5606 // fallthrough --> exitMBB 5607 BB = loopMBB; 5608 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5609 if (ldrOpc == ARM::t2LDREX) 5610 MIB.addImm(0); 5611 AddDefaultPred(MIB); 5612 if (BinOpcode) { 5613 // operand order needs to go the other way for NAND 5614 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5615 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5616 addReg(incr).addReg(dest)).addReg(0); 5617 else 5618 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5619 addReg(dest).addReg(incr)).addReg(0); 5620 } 5621 5622 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5623 if (strOpc == ARM::t2STREX) 5624 MIB.addImm(0); 5625 AddDefaultPred(MIB); 5626 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5627 .addReg(scratch).addImm(0)); 5628 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5629 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5630 5631 BB->addSuccessor(loopMBB); 5632 BB->addSuccessor(exitMBB); 5633 5634 // exitMBB: 5635 // ... 5636 BB = exitMBB; 5637 5638 MI->eraseFromParent(); // The instruction is gone now. 5639 5640 return BB; 5641} 5642 5643MachineBasicBlock * 5644ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5645 MachineBasicBlock *BB, 5646 unsigned Size, 5647 bool signExtend, 5648 ARMCC::CondCodes Cond) const { 5649 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5650 5651 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5652 MachineFunction *MF = BB->getParent(); 5653 MachineFunction::iterator It = BB; 5654 ++It; 5655 5656 unsigned dest = MI->getOperand(0).getReg(); 5657 unsigned ptr = MI->getOperand(1).getReg(); 5658 unsigned incr = MI->getOperand(2).getReg(); 5659 unsigned oldval = dest; 5660 DebugLoc dl = MI->getDebugLoc(); 5661 bool isThumb2 = Subtarget->isThumb2(); 5662 5663 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5664 if (isThumb2) { 5665 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5666 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 5667 } 5668 5669 unsigned ldrOpc, strOpc, extendOpc; 5670 switch (Size) { 5671 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5672 case 1: 5673 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5674 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5675 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 5676 break; 5677 case 2: 5678 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5679 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5680 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 5681 break; 5682 case 4: 5683 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5684 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5685 extendOpc = 0; 5686 break; 5687 } 5688 5689 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5690 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5691 MF->insert(It, loopMBB); 5692 MF->insert(It, exitMBB); 5693 5694 // Transfer the remainder of BB and its successor edges to exitMBB. 5695 exitMBB->splice(exitMBB->begin(), BB, 5696 llvm::next(MachineBasicBlock::iterator(MI)), 5697 BB->end()); 5698 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5699 5700 const TargetRegisterClass *TRC = isThumb2 ? 5701 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5702 (const TargetRegisterClass*)&ARM::GPRRegClass; 5703 unsigned scratch = MRI.createVirtualRegister(TRC); 5704 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5705 5706 // thisMBB: 5707 // ... 5708 // fallthrough --> loopMBB 5709 BB->addSuccessor(loopMBB); 5710 5711 // loopMBB: 5712 // ldrex dest, ptr 5713 // (sign extend dest, if required) 5714 // cmp dest, incr 5715 // cmov.cond scratch2, incr, dest 5716 // strex scratch, scratch2, ptr 5717 // cmp scratch, #0 5718 // bne- loopMBB 5719 // fallthrough --> exitMBB 5720 BB = loopMBB; 5721 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5722 if (ldrOpc == ARM::t2LDREX) 5723 MIB.addImm(0); 5724 AddDefaultPred(MIB); 5725 5726 // Sign extend the value, if necessary. 5727 if (signExtend && extendOpc) { 5728 oldval = MRI.createVirtualRegister(&ARM::GPRRegClass); 5729 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 5730 .addReg(dest) 5731 .addImm(0)); 5732 } 5733 5734 // Build compare and cmov instructions. 5735 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5736 .addReg(oldval).addReg(incr)); 5737 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5738 .addReg(incr).addReg(oldval).addImm(Cond).addReg(ARM::CPSR); 5739 5740 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5741 if (strOpc == ARM::t2STREX) 5742 MIB.addImm(0); 5743 AddDefaultPred(MIB); 5744 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5745 .addReg(scratch).addImm(0)); 5746 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5747 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5748 5749 BB->addSuccessor(loopMBB); 5750 BB->addSuccessor(exitMBB); 5751 5752 // exitMBB: 5753 // ... 5754 BB = exitMBB; 5755 5756 MI->eraseFromParent(); // The instruction is gone now. 5757 5758 return BB; 5759} 5760 5761MachineBasicBlock * 5762ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 5763 unsigned Op1, unsigned Op2, 5764 bool NeedsCarry, bool IsCmpxchg, 5765 bool IsMinMax, ARMCC::CondCodes CC) const { 5766 // This also handles ATOMIC_SWAP, indicated by Op1==0. 5767 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5768 5769 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5770 MachineFunction *MF = BB->getParent(); 5771 MachineFunction::iterator It = BB; 5772 ++It; 5773 5774 unsigned destlo = MI->getOperand(0).getReg(); 5775 unsigned desthi = MI->getOperand(1).getReg(); 5776 unsigned ptr = MI->getOperand(2).getReg(); 5777 unsigned vallo = MI->getOperand(3).getReg(); 5778 unsigned valhi = MI->getOperand(4).getReg(); 5779 DebugLoc dl = MI->getDebugLoc(); 5780 bool isThumb2 = Subtarget->isThumb2(); 5781 5782 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5783 if (isThumb2) { 5784 MRI.constrainRegClass(destlo, &ARM::rGPRRegClass); 5785 MRI.constrainRegClass(desthi, &ARM::rGPRRegClass); 5786 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 5787 } 5788 5789 unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; 5790 unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD; 5791 5792 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5793 MachineBasicBlock *contBB = 0, *cont2BB = 0; 5794 if (IsCmpxchg || IsMinMax) 5795 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 5796 if (IsCmpxchg) 5797 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 5798 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5799 5800 MF->insert(It, loopMBB); 5801 if (IsCmpxchg || IsMinMax) MF->insert(It, contBB); 5802 if (IsCmpxchg) MF->insert(It, cont2BB); 5803 MF->insert(It, exitMBB); 5804 5805 // Transfer the remainder of BB and its successor edges to exitMBB. 5806 exitMBB->splice(exitMBB->begin(), BB, 5807 llvm::next(MachineBasicBlock::iterator(MI)), 5808 BB->end()); 5809 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5810 5811 const TargetRegisterClass *TRC = isThumb2 ? 5812 (const TargetRegisterClass*)&ARM::tGPRRegClass : 5813 (const TargetRegisterClass*)&ARM::GPRRegClass; 5814 unsigned storesuccess = MRI.createVirtualRegister(TRC); 5815 5816 // thisMBB: 5817 // ... 5818 // fallthrough --> loopMBB 5819 BB->addSuccessor(loopMBB); 5820 5821 // loopMBB: 5822 // ldrexd r2, r3, ptr 5823 // <binopa> r0, r2, incr 5824 // <binopb> r1, r3, incr 5825 // strexd storesuccess, r0, r1, ptr 5826 // cmp storesuccess, #0 5827 // bne- loopMBB 5828 // fallthrough --> exitMBB 5829 // 5830 // Note that the registers are explicitly specified because there is not any 5831 // way to force the register allocator to allocate a register pair. 5832 // 5833 // FIXME: The hardcoded registers are not necessary for Thumb2, but we 5834 // need to properly enforce the restriction that the two output registers 5835 // for ldrexd must be different. 5836 BB = loopMBB; 5837 // Load 5838 unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5839 unsigned GPRPair1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5840 unsigned GPRPair2; 5841 if (IsMinMax) { 5842 //We need an extra double register for doing min/max. 5843 unsigned undef = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5844 unsigned r1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5845 GPRPair2 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5846 BuildMI(BB, dl, TII->get(TargetOpcode::IMPLICIT_DEF), undef); 5847 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), r1) 5848 .addReg(undef) 5849 .addReg(vallo) 5850 .addImm(ARM::gsub_0); 5851 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), GPRPair2) 5852 .addReg(r1) 5853 .addReg(valhi) 5854 .addImm(ARM::gsub_1); 5855 } 5856 5857 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 5858 .addReg(GPRPair0, RegState::Define).addReg(ptr)); 5859 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 5860 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo) 5861 .addReg(GPRPair0, 0, ARM::gsub_0); 5862 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi) 5863 .addReg(GPRPair0, 0, ARM::gsub_1); 5864 5865 if (IsCmpxchg) { 5866 // Add early exit 5867 for (unsigned i = 0; i < 2; i++) { 5868 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 5869 ARM::CMPrr)) 5870 .addReg(i == 0 ? destlo : desthi) 5871 .addReg(i == 0 ? vallo : valhi)); 5872 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5873 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5874 BB->addSuccessor(exitMBB); 5875 BB->addSuccessor(i == 0 ? contBB : cont2BB); 5876 BB = (i == 0 ? contBB : cont2BB); 5877 } 5878 5879 // Copy to physregs for strexd 5880 unsigned setlo = MI->getOperand(5).getReg(); 5881 unsigned sethi = MI->getOperand(6).getReg(); 5882 unsigned undef = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5883 unsigned r1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5884 BuildMI(BB, dl, TII->get(TargetOpcode::IMPLICIT_DEF), undef); 5885 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), r1) 5886 .addReg(undef) 5887 .addReg(setlo) 5888 .addImm(ARM::gsub_0); 5889 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), GPRPair1) 5890 .addReg(r1) 5891 .addReg(sethi) 5892 .addImm(ARM::gsub_1); 5893 } else if (Op1) { 5894 // Perform binary operation 5895 unsigned tmpRegLo = MRI.createVirtualRegister(TRC); 5896 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), tmpRegLo) 5897 .addReg(destlo).addReg(vallo)) 5898 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 5899 unsigned tmpRegHi = MRI.createVirtualRegister(TRC); 5900 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), tmpRegHi) 5901 .addReg(desthi).addReg(valhi)) 5902 .addReg(IsMinMax ? ARM::CPSR : 0, getDefRegState(IsMinMax)); 5903 5904 unsigned UndefPair = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5905 BuildMI(BB, dl, TII->get(TargetOpcode::IMPLICIT_DEF), UndefPair); 5906 unsigned r1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5907 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), r1) 5908 .addReg(UndefPair) 5909 .addReg(tmpRegLo) 5910 .addImm(ARM::gsub_0); 5911 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), GPRPair1) 5912 .addReg(r1) 5913 .addReg(tmpRegHi) 5914 .addImm(ARM::gsub_1); 5915 } else { 5916 // Copy to physregs for strexd 5917 unsigned UndefPair = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5918 unsigned r1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 5919 BuildMI(BB, dl, TII->get(TargetOpcode::IMPLICIT_DEF), UndefPair); 5920 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), r1) 5921 .addReg(UndefPair) 5922 .addReg(vallo) 5923 .addImm(ARM::gsub_0); 5924 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), GPRPair1) 5925 .addReg(r1) 5926 .addReg(valhi) 5927 .addImm(ARM::gsub_1); 5928 } 5929 unsigned GPRPairStore = GPRPair1; 5930 if (IsMinMax) { 5931 // Compare and branch to exit block. 5932 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5933 .addMBB(exitMBB).addImm(CC).addReg(ARM::CPSR); 5934 BB->addSuccessor(exitMBB); 5935 BB->addSuccessor(contBB); 5936 BB = contBB; 5937 GPRPairStore = GPRPair2; 5938 } 5939 5940 // Store 5941 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 5942 .addReg(GPRPairStore).addReg(ptr)); 5943 // Cmp+jump 5944 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5945 .addReg(storesuccess).addImm(0)); 5946 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5947 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5948 5949 BB->addSuccessor(loopMBB); 5950 BB->addSuccessor(exitMBB); 5951 5952 // exitMBB: 5953 // ... 5954 BB = exitMBB; 5955 5956 MI->eraseFromParent(); // The instruction is gone now. 5957 5958 return BB; 5959} 5960 5961/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 5962/// registers the function context. 5963void ARMTargetLowering:: 5964SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 5965 MachineBasicBlock *DispatchBB, int FI) const { 5966 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5967 DebugLoc dl = MI->getDebugLoc(); 5968 MachineFunction *MF = MBB->getParent(); 5969 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5970 MachineConstantPool *MCP = MF->getConstantPool(); 5971 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5972 const Function *F = MF->getFunction(); 5973 5974 bool isThumb = Subtarget->isThumb(); 5975 bool isThumb2 = Subtarget->isThumb2(); 5976 5977 unsigned PCLabelId = AFI->createPICLabelUId(); 5978 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 5979 ARMConstantPoolValue *CPV = 5980 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 5981 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 5982 5983 const TargetRegisterClass *TRC = isThumb ? 5984 (const TargetRegisterClass*)&ARM::tGPRRegClass : 5985 (const TargetRegisterClass*)&ARM::GPRRegClass; 5986 5987 // Grab constant pool and fixed stack memory operands. 5988 MachineMemOperand *CPMMO = 5989 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 5990 MachineMemOperand::MOLoad, 4, 4); 5991 5992 MachineMemOperand *FIMMOSt = 5993 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5994 MachineMemOperand::MOStore, 4, 4); 5995 5996 // Load the address of the dispatch MBB into the jump buffer. 5997 if (isThumb2) { 5998 // Incoming value: jbuf 5999 // ldr.n r5, LCPI1_1 6000 // orr r5, r5, #1 6001 // add r5, pc 6002 // str r5, [$jbuf, #+4] ; &jbuf[1] 6003 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6004 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 6005 .addConstantPoolIndex(CPI) 6006 .addMemOperand(CPMMO)); 6007 // Set the low bit because of thumb mode. 6008 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6009 AddDefaultCC( 6010 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 6011 .addReg(NewVReg1, RegState::Kill) 6012 .addImm(0x01))); 6013 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6014 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 6015 .addReg(NewVReg2, RegState::Kill) 6016 .addImm(PCLabelId); 6017 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 6018 .addReg(NewVReg3, RegState::Kill) 6019 .addFrameIndex(FI) 6020 .addImm(36) // &jbuf[1] :: pc 6021 .addMemOperand(FIMMOSt)); 6022 } else if (isThumb) { 6023 // Incoming value: jbuf 6024 // ldr.n r1, LCPI1_4 6025 // add r1, pc 6026 // mov r2, #1 6027 // orrs r1, r2 6028 // add r2, $jbuf, #+4 ; &jbuf[1] 6029 // str r1, [r2] 6030 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6031 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 6032 .addConstantPoolIndex(CPI) 6033 .addMemOperand(CPMMO)); 6034 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6035 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 6036 .addReg(NewVReg1, RegState::Kill) 6037 .addImm(PCLabelId); 6038 // Set the low bit because of thumb mode. 6039 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6040 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 6041 .addReg(ARM::CPSR, RegState::Define) 6042 .addImm(1)); 6043 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6044 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 6045 .addReg(ARM::CPSR, RegState::Define) 6046 .addReg(NewVReg2, RegState::Kill) 6047 .addReg(NewVReg3, RegState::Kill)); 6048 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6049 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 6050 .addFrameIndex(FI) 6051 .addImm(36)); // &jbuf[1] :: pc 6052 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 6053 .addReg(NewVReg4, RegState::Kill) 6054 .addReg(NewVReg5, RegState::Kill) 6055 .addImm(0) 6056 .addMemOperand(FIMMOSt)); 6057 } else { 6058 // Incoming value: jbuf 6059 // ldr r1, LCPI1_1 6060 // add r1, pc, r1 6061 // str r1, [$jbuf, #+4] ; &jbuf[1] 6062 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6063 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 6064 .addConstantPoolIndex(CPI) 6065 .addImm(0) 6066 .addMemOperand(CPMMO)); 6067 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6068 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 6069 .addReg(NewVReg1, RegState::Kill) 6070 .addImm(PCLabelId)); 6071 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 6072 .addReg(NewVReg2, RegState::Kill) 6073 .addFrameIndex(FI) 6074 .addImm(36) // &jbuf[1] :: pc 6075 .addMemOperand(FIMMOSt)); 6076 } 6077} 6078 6079MachineBasicBlock *ARMTargetLowering:: 6080EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 6081 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6082 DebugLoc dl = MI->getDebugLoc(); 6083 MachineFunction *MF = MBB->getParent(); 6084 MachineRegisterInfo *MRI = &MF->getRegInfo(); 6085 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 6086 MachineFrameInfo *MFI = MF->getFrameInfo(); 6087 int FI = MFI->getFunctionContextIndex(); 6088 6089 const TargetRegisterClass *TRC = Subtarget->isThumb() ? 6090 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6091 (const TargetRegisterClass*)&ARM::GPRnopcRegClass; 6092 6093 // Get a mapping of the call site numbers to all of the landing pads they're 6094 // associated with. 6095 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 6096 unsigned MaxCSNum = 0; 6097 MachineModuleInfo &MMI = MF->getMMI(); 6098 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 6099 ++BB) { 6100 if (!BB->isLandingPad()) continue; 6101 6102 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 6103 // pad. 6104 for (MachineBasicBlock::iterator 6105 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 6106 if (!II->isEHLabel()) continue; 6107 6108 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 6109 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 6110 6111 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 6112 for (SmallVectorImpl<unsigned>::iterator 6113 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 6114 CSI != CSE; ++CSI) { 6115 CallSiteNumToLPad[*CSI].push_back(BB); 6116 MaxCSNum = std::max(MaxCSNum, *CSI); 6117 } 6118 break; 6119 } 6120 } 6121 6122 // Get an ordered list of the machine basic blocks for the jump table. 6123 std::vector<MachineBasicBlock*> LPadList; 6124 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 6125 LPadList.reserve(CallSiteNumToLPad.size()); 6126 for (unsigned I = 1; I <= MaxCSNum; ++I) { 6127 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 6128 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6129 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 6130 LPadList.push_back(*II); 6131 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 6132 } 6133 } 6134 6135 assert(!LPadList.empty() && 6136 "No landing pad destinations for the dispatch jump table!"); 6137 6138 // Create the jump table and associated information. 6139 MachineJumpTableInfo *JTI = 6140 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 6141 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 6142 unsigned UId = AFI->createJumpTableUId(); 6143 6144 // Create the MBBs for the dispatch code. 6145 6146 // Shove the dispatch's address into the return slot in the function context. 6147 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 6148 DispatchBB->setIsLandingPad(); 6149 6150 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 6151 BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); 6152 DispatchBB->addSuccessor(TrapBB); 6153 6154 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 6155 DispatchBB->addSuccessor(DispContBB); 6156 6157 // Insert and MBBs. 6158 MF->insert(MF->end(), DispatchBB); 6159 MF->insert(MF->end(), DispContBB); 6160 MF->insert(MF->end(), TrapBB); 6161 6162 // Insert code into the entry block that creates and registers the function 6163 // context. 6164 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 6165 6166 MachineMemOperand *FIMMOLd = 6167 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6168 MachineMemOperand::MOLoad | 6169 MachineMemOperand::MOVolatile, 4, 4); 6170 6171 MachineInstrBuilder MIB; 6172 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 6173 6174 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 6175 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 6176 6177 // Add a register mask with no preserved registers. This results in all 6178 // registers being marked as clobbered. 6179 MIB.addRegMask(RI.getNoPreservedMask()); 6180 6181 unsigned NumLPads = LPadList.size(); 6182 if (Subtarget->isThumb2()) { 6183 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6184 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 6185 .addFrameIndex(FI) 6186 .addImm(4) 6187 .addMemOperand(FIMMOLd)); 6188 6189 if (NumLPads < 256) { 6190 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 6191 .addReg(NewVReg1) 6192 .addImm(LPadList.size())); 6193 } else { 6194 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6195 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 6196 .addImm(NumLPads & 0xFFFF)); 6197 6198 unsigned VReg2 = VReg1; 6199 if ((NumLPads & 0xFFFF0000) != 0) { 6200 VReg2 = MRI->createVirtualRegister(TRC); 6201 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 6202 .addReg(VReg1) 6203 .addImm(NumLPads >> 16)); 6204 } 6205 6206 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 6207 .addReg(NewVReg1) 6208 .addReg(VReg2)); 6209 } 6210 6211 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 6212 .addMBB(TrapBB) 6213 .addImm(ARMCC::HI) 6214 .addReg(ARM::CPSR); 6215 6216 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6217 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 6218 .addJumpTableIndex(MJTI) 6219 .addImm(UId)); 6220 6221 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6222 AddDefaultCC( 6223 AddDefaultPred( 6224 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 6225 .addReg(NewVReg3, RegState::Kill) 6226 .addReg(NewVReg1) 6227 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6228 6229 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 6230 .addReg(NewVReg4, RegState::Kill) 6231 .addReg(NewVReg1) 6232 .addJumpTableIndex(MJTI) 6233 .addImm(UId); 6234 } else if (Subtarget->isThumb()) { 6235 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6236 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 6237 .addFrameIndex(FI) 6238 .addImm(1) 6239 .addMemOperand(FIMMOLd)); 6240 6241 if (NumLPads < 256) { 6242 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 6243 .addReg(NewVReg1) 6244 .addImm(NumLPads)); 6245 } else { 6246 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6247 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6248 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6249 6250 // MachineConstantPool wants an explicit alignment. 6251 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6252 if (Align == 0) 6253 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6254 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6255 6256 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6257 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 6258 .addReg(VReg1, RegState::Define) 6259 .addConstantPoolIndex(Idx)); 6260 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 6261 .addReg(NewVReg1) 6262 .addReg(VReg1)); 6263 } 6264 6265 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 6266 .addMBB(TrapBB) 6267 .addImm(ARMCC::HI) 6268 .addReg(ARM::CPSR); 6269 6270 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6271 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 6272 .addReg(ARM::CPSR, RegState::Define) 6273 .addReg(NewVReg1) 6274 .addImm(2)); 6275 6276 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6277 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 6278 .addJumpTableIndex(MJTI) 6279 .addImm(UId)); 6280 6281 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6282 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 6283 .addReg(ARM::CPSR, RegState::Define) 6284 .addReg(NewVReg2, RegState::Kill) 6285 .addReg(NewVReg3)); 6286 6287 MachineMemOperand *JTMMOLd = 6288 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6289 MachineMemOperand::MOLoad, 4, 4); 6290 6291 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6292 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 6293 .addReg(NewVReg4, RegState::Kill) 6294 .addImm(0) 6295 .addMemOperand(JTMMOLd)); 6296 6297 unsigned NewVReg6 = MRI->createVirtualRegister(TRC); 6298 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 6299 .addReg(ARM::CPSR, RegState::Define) 6300 .addReg(NewVReg5, RegState::Kill) 6301 .addReg(NewVReg3)); 6302 6303 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 6304 .addReg(NewVReg6, RegState::Kill) 6305 .addJumpTableIndex(MJTI) 6306 .addImm(UId); 6307 } else { 6308 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6309 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 6310 .addFrameIndex(FI) 6311 .addImm(4) 6312 .addMemOperand(FIMMOLd)); 6313 6314 if (NumLPads < 256) { 6315 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 6316 .addReg(NewVReg1) 6317 .addImm(NumLPads)); 6318 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 6319 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6320 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 6321 .addImm(NumLPads & 0xFFFF)); 6322 6323 unsigned VReg2 = VReg1; 6324 if ((NumLPads & 0xFFFF0000) != 0) { 6325 VReg2 = MRI->createVirtualRegister(TRC); 6326 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 6327 .addReg(VReg1) 6328 .addImm(NumLPads >> 16)); 6329 } 6330 6331 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6332 .addReg(NewVReg1) 6333 .addReg(VReg2)); 6334 } else { 6335 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6336 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6337 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6338 6339 // MachineConstantPool wants an explicit alignment. 6340 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6341 if (Align == 0) 6342 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6343 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6344 6345 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6346 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 6347 .addReg(VReg1, RegState::Define) 6348 .addConstantPoolIndex(Idx) 6349 .addImm(0)); 6350 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6351 .addReg(NewVReg1) 6352 .addReg(VReg1, RegState::Kill)); 6353 } 6354 6355 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 6356 .addMBB(TrapBB) 6357 .addImm(ARMCC::HI) 6358 .addReg(ARM::CPSR); 6359 6360 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6361 AddDefaultCC( 6362 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 6363 .addReg(NewVReg1) 6364 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6365 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6366 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 6367 .addJumpTableIndex(MJTI) 6368 .addImm(UId)); 6369 6370 MachineMemOperand *JTMMOLd = 6371 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6372 MachineMemOperand::MOLoad, 4, 4); 6373 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6374 AddDefaultPred( 6375 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 6376 .addReg(NewVReg3, RegState::Kill) 6377 .addReg(NewVReg4) 6378 .addImm(0) 6379 .addMemOperand(JTMMOLd)); 6380 6381 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 6382 .addReg(NewVReg5, RegState::Kill) 6383 .addReg(NewVReg4) 6384 .addJumpTableIndex(MJTI) 6385 .addImm(UId); 6386 } 6387 6388 // Add the jump table entries as successors to the MBB. 6389 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 6390 for (std::vector<MachineBasicBlock*>::iterator 6391 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 6392 MachineBasicBlock *CurMBB = *I; 6393 if (SeenMBBs.insert(CurMBB)) 6394 DispContBB->addSuccessor(CurMBB); 6395 } 6396 6397 // N.B. the order the invoke BBs are processed in doesn't matter here. 6398 const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF); 6399 SmallVector<MachineBasicBlock*, 64> MBBLPads; 6400 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 6401 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 6402 MachineBasicBlock *BB = *I; 6403 6404 // Remove the landing pad successor from the invoke block and replace it 6405 // with the new dispatch block. 6406 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 6407 BB->succ_end()); 6408 while (!Successors.empty()) { 6409 MachineBasicBlock *SMBB = Successors.pop_back_val(); 6410 if (SMBB->isLandingPad()) { 6411 BB->removeSuccessor(SMBB); 6412 MBBLPads.push_back(SMBB); 6413 } 6414 } 6415 6416 BB->addSuccessor(DispatchBB); 6417 6418 // Find the invoke call and mark all of the callee-saved registers as 6419 // 'implicit defined' so that they're spilled. This prevents code from 6420 // moving instructions to before the EH block, where they will never be 6421 // executed. 6422 for (MachineBasicBlock::reverse_iterator 6423 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 6424 if (!II->isCall()) continue; 6425 6426 DenseMap<unsigned, bool> DefRegs; 6427 for (MachineInstr::mop_iterator 6428 OI = II->operands_begin(), OE = II->operands_end(); 6429 OI != OE; ++OI) { 6430 if (!OI->isReg()) continue; 6431 DefRegs[OI->getReg()] = true; 6432 } 6433 6434 MachineInstrBuilder MIB(&*II); 6435 6436 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 6437 unsigned Reg = SavedRegs[i]; 6438 if (Subtarget->isThumb2() && 6439 !ARM::tGPRRegClass.contains(Reg) && 6440 !ARM::hGPRRegClass.contains(Reg)) 6441 continue; 6442 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 6443 continue; 6444 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 6445 continue; 6446 if (!DefRegs[Reg]) 6447 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 6448 } 6449 6450 break; 6451 } 6452 } 6453 6454 // Mark all former landing pads as non-landing pads. The dispatch is the only 6455 // landing pad now. 6456 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6457 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 6458 (*I)->setIsLandingPad(false); 6459 6460 // The instruction is gone now. 6461 MI->eraseFromParent(); 6462 6463 return MBB; 6464} 6465 6466static 6467MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 6468 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 6469 E = MBB->succ_end(); I != E; ++I) 6470 if (*I != Succ) 6471 return *I; 6472 llvm_unreachable("Expecting a BB with two successors!"); 6473} 6474 6475MachineBasicBlock *ARMTargetLowering:: 6476EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { 6477 // This pseudo instruction has 3 operands: dst, src, size 6478 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 6479 // Otherwise, we will generate unrolled scalar copies. 6480 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6481 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6482 MachineFunction::iterator It = BB; 6483 ++It; 6484 6485 unsigned dest = MI->getOperand(0).getReg(); 6486 unsigned src = MI->getOperand(1).getReg(); 6487 unsigned SizeVal = MI->getOperand(2).getImm(); 6488 unsigned Align = MI->getOperand(3).getImm(); 6489 DebugLoc dl = MI->getDebugLoc(); 6490 6491 bool isThumb2 = Subtarget->isThumb2(); 6492 MachineFunction *MF = BB->getParent(); 6493 MachineRegisterInfo &MRI = MF->getRegInfo(); 6494 unsigned ldrOpc, strOpc, UnitSize = 0; 6495 6496 const TargetRegisterClass *TRC = isThumb2 ? 6497 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6498 (const TargetRegisterClass*)&ARM::GPRRegClass; 6499 const TargetRegisterClass *TRC_Vec = 0; 6500 6501 if (Align & 1) { 6502 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6503 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6504 UnitSize = 1; 6505 } else if (Align & 2) { 6506 ldrOpc = isThumb2 ? ARM::t2LDRH_POST : ARM::LDRH_POST; 6507 strOpc = isThumb2 ? ARM::t2STRH_POST : ARM::STRH_POST; 6508 UnitSize = 2; 6509 } else { 6510 // Check whether we can use NEON instructions. 6511 if (!MF->getFunction()->getFnAttributes(). 6512 hasAttribute(Attributes::NoImplicitFloat) && 6513 Subtarget->hasNEON()) { 6514 if ((Align % 16 == 0) && SizeVal >= 16) { 6515 ldrOpc = ARM::VLD1q32wb_fixed; 6516 strOpc = ARM::VST1q32wb_fixed; 6517 UnitSize = 16; 6518 TRC_Vec = (const TargetRegisterClass*)&ARM::DPairRegClass; 6519 } 6520 else if ((Align % 8 == 0) && SizeVal >= 8) { 6521 ldrOpc = ARM::VLD1d32wb_fixed; 6522 strOpc = ARM::VST1d32wb_fixed; 6523 UnitSize = 8; 6524 TRC_Vec = (const TargetRegisterClass*)&ARM::DPRRegClass; 6525 } 6526 } 6527 // Can't use NEON instructions. 6528 if (UnitSize == 0) { 6529 ldrOpc = isThumb2 ? ARM::t2LDR_POST : ARM::LDR_POST_IMM; 6530 strOpc = isThumb2 ? ARM::t2STR_POST : ARM::STR_POST_IMM; 6531 UnitSize = 4; 6532 } 6533 } 6534 6535 unsigned BytesLeft = SizeVal % UnitSize; 6536 unsigned LoopSize = SizeVal - BytesLeft; 6537 6538 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 6539 // Use LDR and STR to copy. 6540 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 6541 // [destOut] = STR_POST(scratch, destIn, UnitSize) 6542 unsigned srcIn = src; 6543 unsigned destIn = dest; 6544 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 6545 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 6546 unsigned srcOut = MRI.createVirtualRegister(TRC); 6547 unsigned destOut = MRI.createVirtualRegister(TRC); 6548 if (UnitSize >= 8) { 6549 AddDefaultPred(BuildMI(*BB, MI, dl, 6550 TII->get(ldrOpc), scratch) 6551 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(0)); 6552 6553 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6554 .addReg(destIn).addImm(0).addReg(scratch)); 6555 } else if (isThumb2) { 6556 AddDefaultPred(BuildMI(*BB, MI, dl, 6557 TII->get(ldrOpc), scratch) 6558 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(UnitSize)); 6559 6560 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6561 .addReg(scratch).addReg(destIn) 6562 .addImm(UnitSize)); 6563 } else { 6564 AddDefaultPred(BuildMI(*BB, MI, dl, 6565 TII->get(ldrOpc), scratch) 6566 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0) 6567 .addImm(UnitSize)); 6568 6569 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6570 .addReg(scratch).addReg(destIn) 6571 .addReg(0).addImm(UnitSize)); 6572 } 6573 srcIn = srcOut; 6574 destIn = destOut; 6575 } 6576 6577 // Handle the leftover bytes with LDRB and STRB. 6578 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 6579 // [destOut] = STRB_POST(scratch, destIn, 1) 6580 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6581 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6582 for (unsigned i = 0; i < BytesLeft; i++) { 6583 unsigned scratch = MRI.createVirtualRegister(TRC); 6584 unsigned srcOut = MRI.createVirtualRegister(TRC); 6585 unsigned destOut = MRI.createVirtualRegister(TRC); 6586 if (isThumb2) { 6587 AddDefaultPred(BuildMI(*BB, MI, dl, 6588 TII->get(ldrOpc),scratch) 6589 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 6590 6591 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6592 .addReg(scratch).addReg(destIn) 6593 .addReg(0).addImm(1)); 6594 } else { 6595 AddDefaultPred(BuildMI(*BB, MI, dl, 6596 TII->get(ldrOpc),scratch) 6597 .addReg(srcOut, RegState::Define).addReg(srcIn) 6598 .addReg(0).addImm(1)); 6599 6600 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6601 .addReg(scratch).addReg(destIn) 6602 .addReg(0).addImm(1)); 6603 } 6604 srcIn = srcOut; 6605 destIn = destOut; 6606 } 6607 MI->eraseFromParent(); // The instruction is gone now. 6608 return BB; 6609 } 6610 6611 // Expand the pseudo op to a loop. 6612 // thisMBB: 6613 // ... 6614 // movw varEnd, # --> with thumb2 6615 // movt varEnd, # 6616 // ldrcp varEnd, idx --> without thumb2 6617 // fallthrough --> loopMBB 6618 // loopMBB: 6619 // PHI varPhi, varEnd, varLoop 6620 // PHI srcPhi, src, srcLoop 6621 // PHI destPhi, dst, destLoop 6622 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 6623 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 6624 // subs varLoop, varPhi, #UnitSize 6625 // bne loopMBB 6626 // fallthrough --> exitMBB 6627 // exitMBB: 6628 // epilogue to handle left-over bytes 6629 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 6630 // [destOut] = STRB_POST(scratch, destLoop, 1) 6631 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6632 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6633 MF->insert(It, loopMBB); 6634 MF->insert(It, exitMBB); 6635 6636 // Transfer the remainder of BB and its successor edges to exitMBB. 6637 exitMBB->splice(exitMBB->begin(), BB, 6638 llvm::next(MachineBasicBlock::iterator(MI)), 6639 BB->end()); 6640 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6641 6642 // Load an immediate to varEnd. 6643 unsigned varEnd = MRI.createVirtualRegister(TRC); 6644 if (isThumb2) { 6645 unsigned VReg1 = varEnd; 6646 if ((LoopSize & 0xFFFF0000) != 0) 6647 VReg1 = MRI.createVirtualRegister(TRC); 6648 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), VReg1) 6649 .addImm(LoopSize & 0xFFFF)); 6650 6651 if ((LoopSize & 0xFFFF0000) != 0) 6652 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVTi16), varEnd) 6653 .addReg(VReg1) 6654 .addImm(LoopSize >> 16)); 6655 } else { 6656 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6657 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6658 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 6659 6660 // MachineConstantPool wants an explicit alignment. 6661 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6662 if (Align == 0) 6663 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6664 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6665 6666 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp)) 6667 .addReg(varEnd, RegState::Define) 6668 .addConstantPoolIndex(Idx) 6669 .addImm(0)); 6670 } 6671 BB->addSuccessor(loopMBB); 6672 6673 // Generate the loop body: 6674 // varPhi = PHI(varLoop, varEnd) 6675 // srcPhi = PHI(srcLoop, src) 6676 // destPhi = PHI(destLoop, dst) 6677 MachineBasicBlock *entryBB = BB; 6678 BB = loopMBB; 6679 unsigned varLoop = MRI.createVirtualRegister(TRC); 6680 unsigned varPhi = MRI.createVirtualRegister(TRC); 6681 unsigned srcLoop = MRI.createVirtualRegister(TRC); 6682 unsigned srcPhi = MRI.createVirtualRegister(TRC); 6683 unsigned destLoop = MRI.createVirtualRegister(TRC); 6684 unsigned destPhi = MRI.createVirtualRegister(TRC); 6685 6686 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 6687 .addReg(varLoop).addMBB(loopMBB) 6688 .addReg(varEnd).addMBB(entryBB); 6689 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 6690 .addReg(srcLoop).addMBB(loopMBB) 6691 .addReg(src).addMBB(entryBB); 6692 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 6693 .addReg(destLoop).addMBB(loopMBB) 6694 .addReg(dest).addMBB(entryBB); 6695 6696 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 6697 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 6698 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 6699 if (UnitSize >= 8) { 6700 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 6701 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(0)); 6702 6703 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 6704 .addReg(destPhi).addImm(0).addReg(scratch)); 6705 } else if (isThumb2) { 6706 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 6707 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(UnitSize)); 6708 6709 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 6710 .addReg(scratch).addReg(destPhi) 6711 .addImm(UnitSize)); 6712 } else { 6713 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 6714 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addReg(0) 6715 .addImm(UnitSize)); 6716 6717 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 6718 .addReg(scratch).addReg(destPhi) 6719 .addReg(0).addImm(UnitSize)); 6720 } 6721 6722 // Decrement loop variable by UnitSize. 6723 MachineInstrBuilder MIB = BuildMI(BB, dl, 6724 TII->get(isThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 6725 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 6726 MIB->getOperand(5).setReg(ARM::CPSR); 6727 MIB->getOperand(5).setIsDef(true); 6728 6729 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6730 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6731 6732 // loopMBB can loop back to loopMBB or fall through to exitMBB. 6733 BB->addSuccessor(loopMBB); 6734 BB->addSuccessor(exitMBB); 6735 6736 // Add epilogue to handle BytesLeft. 6737 BB = exitMBB; 6738 MachineInstr *StartOfExit = exitMBB->begin(); 6739 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6740 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6741 6742 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 6743 // [destOut] = STRB_POST(scratch, destLoop, 1) 6744 unsigned srcIn = srcLoop; 6745 unsigned destIn = destLoop; 6746 for (unsigned i = 0; i < BytesLeft; i++) { 6747 unsigned scratch = MRI.createVirtualRegister(TRC); 6748 unsigned srcOut = MRI.createVirtualRegister(TRC); 6749 unsigned destOut = MRI.createVirtualRegister(TRC); 6750 if (isThumb2) { 6751 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 6752 TII->get(ldrOpc),scratch) 6753 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 6754 6755 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 6756 .addReg(scratch).addReg(destIn) 6757 .addImm(1)); 6758 } else { 6759 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 6760 TII->get(ldrOpc),scratch) 6761 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0).addImm(1)); 6762 6763 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 6764 .addReg(scratch).addReg(destIn) 6765 .addReg(0).addImm(1)); 6766 } 6767 srcIn = srcOut; 6768 destIn = destOut; 6769 } 6770 6771 MI->eraseFromParent(); // The instruction is gone now. 6772 return BB; 6773} 6774 6775MachineBasicBlock * 6776ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6777 MachineBasicBlock *BB) const { 6778 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6779 DebugLoc dl = MI->getDebugLoc(); 6780 bool isThumb2 = Subtarget->isThumb2(); 6781 switch (MI->getOpcode()) { 6782 default: { 6783 MI->dump(); 6784 llvm_unreachable("Unexpected instr type to insert"); 6785 } 6786 // The Thumb2 pre-indexed stores have the same MI operands, they just 6787 // define them differently in the .td files from the isel patterns, so 6788 // they need pseudos. 6789 case ARM::t2STR_preidx: 6790 MI->setDesc(TII->get(ARM::t2STR_PRE)); 6791 return BB; 6792 case ARM::t2STRB_preidx: 6793 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 6794 return BB; 6795 case ARM::t2STRH_preidx: 6796 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 6797 return BB; 6798 6799 case ARM::STRi_preidx: 6800 case ARM::STRBi_preidx: { 6801 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 6802 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 6803 // Decode the offset. 6804 unsigned Offset = MI->getOperand(4).getImm(); 6805 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 6806 Offset = ARM_AM::getAM2Offset(Offset); 6807 if (isSub) 6808 Offset = -Offset; 6809 6810 MachineMemOperand *MMO = *MI->memoperands_begin(); 6811 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 6812 .addOperand(MI->getOperand(0)) // Rn_wb 6813 .addOperand(MI->getOperand(1)) // Rt 6814 .addOperand(MI->getOperand(2)) // Rn 6815 .addImm(Offset) // offset (skip GPR==zero_reg) 6816 .addOperand(MI->getOperand(5)) // pred 6817 .addOperand(MI->getOperand(6)) 6818 .addMemOperand(MMO); 6819 MI->eraseFromParent(); 6820 return BB; 6821 } 6822 case ARM::STRr_preidx: 6823 case ARM::STRBr_preidx: 6824 case ARM::STRH_preidx: { 6825 unsigned NewOpc; 6826 switch (MI->getOpcode()) { 6827 default: llvm_unreachable("unexpected opcode!"); 6828 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 6829 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 6830 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 6831 } 6832 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 6833 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 6834 MIB.addOperand(MI->getOperand(i)); 6835 MI->eraseFromParent(); 6836 return BB; 6837 } 6838 case ARM::ATOMIC_LOAD_ADD_I8: 6839 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6840 case ARM::ATOMIC_LOAD_ADD_I16: 6841 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6842 case ARM::ATOMIC_LOAD_ADD_I32: 6843 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6844 6845 case ARM::ATOMIC_LOAD_AND_I8: 6846 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6847 case ARM::ATOMIC_LOAD_AND_I16: 6848 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6849 case ARM::ATOMIC_LOAD_AND_I32: 6850 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6851 6852 case ARM::ATOMIC_LOAD_OR_I8: 6853 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6854 case ARM::ATOMIC_LOAD_OR_I16: 6855 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6856 case ARM::ATOMIC_LOAD_OR_I32: 6857 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6858 6859 case ARM::ATOMIC_LOAD_XOR_I8: 6860 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6861 case ARM::ATOMIC_LOAD_XOR_I16: 6862 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6863 case ARM::ATOMIC_LOAD_XOR_I32: 6864 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6865 6866 case ARM::ATOMIC_LOAD_NAND_I8: 6867 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6868 case ARM::ATOMIC_LOAD_NAND_I16: 6869 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6870 case ARM::ATOMIC_LOAD_NAND_I32: 6871 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6872 6873 case ARM::ATOMIC_LOAD_SUB_I8: 6874 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6875 case ARM::ATOMIC_LOAD_SUB_I16: 6876 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6877 case ARM::ATOMIC_LOAD_SUB_I32: 6878 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6879 6880 case ARM::ATOMIC_LOAD_MIN_I8: 6881 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 6882 case ARM::ATOMIC_LOAD_MIN_I16: 6883 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 6884 case ARM::ATOMIC_LOAD_MIN_I32: 6885 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 6886 6887 case ARM::ATOMIC_LOAD_MAX_I8: 6888 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 6889 case ARM::ATOMIC_LOAD_MAX_I16: 6890 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 6891 case ARM::ATOMIC_LOAD_MAX_I32: 6892 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 6893 6894 case ARM::ATOMIC_LOAD_UMIN_I8: 6895 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 6896 case ARM::ATOMIC_LOAD_UMIN_I16: 6897 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 6898 case ARM::ATOMIC_LOAD_UMIN_I32: 6899 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 6900 6901 case ARM::ATOMIC_LOAD_UMAX_I8: 6902 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 6903 case ARM::ATOMIC_LOAD_UMAX_I16: 6904 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 6905 case ARM::ATOMIC_LOAD_UMAX_I32: 6906 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 6907 6908 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 6909 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 6910 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 6911 6912 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 6913 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 6914 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 6915 6916 6917 case ARM::ATOMADD6432: 6918 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 6919 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 6920 /*NeedsCarry*/ true); 6921 case ARM::ATOMSUB6432: 6922 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6923 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6924 /*NeedsCarry*/ true); 6925 case ARM::ATOMOR6432: 6926 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 6927 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6928 case ARM::ATOMXOR6432: 6929 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 6930 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6931 case ARM::ATOMAND6432: 6932 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 6933 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6934 case ARM::ATOMSWAP6432: 6935 return EmitAtomicBinary64(MI, BB, 0, 0, false); 6936 case ARM::ATOMCMPXCHG6432: 6937 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6938 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6939 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 6940 case ARM::ATOMMIN6432: 6941 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6942 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6943 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 6944 /*IsMinMax*/ true, ARMCC::LE); 6945 case ARM::ATOMMAX6432: 6946 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6947 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6948 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 6949 /*IsMinMax*/ true, ARMCC::GE); 6950 case ARM::ATOMUMIN6432: 6951 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6952 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6953 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 6954 /*IsMinMax*/ true, ARMCC::LS); 6955 case ARM::ATOMUMAX6432: 6956 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6957 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6958 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 6959 /*IsMinMax*/ true, ARMCC::HS); 6960 6961 case ARM::tMOVCCr_pseudo: { 6962 // To "insert" a SELECT_CC instruction, we actually have to insert the 6963 // diamond control-flow pattern. The incoming instruction knows the 6964 // destination vreg to set, the condition code register to branch on, the 6965 // true/false values to select between, and a branch opcode to use. 6966 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6967 MachineFunction::iterator It = BB; 6968 ++It; 6969 6970 // thisMBB: 6971 // ... 6972 // TrueVal = ... 6973 // cmpTY ccX, r1, r2 6974 // bCC copy1MBB 6975 // fallthrough --> copy0MBB 6976 MachineBasicBlock *thisMBB = BB; 6977 MachineFunction *F = BB->getParent(); 6978 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6979 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6980 F->insert(It, copy0MBB); 6981 F->insert(It, sinkMBB); 6982 6983 // Transfer the remainder of BB and its successor edges to sinkMBB. 6984 sinkMBB->splice(sinkMBB->begin(), BB, 6985 llvm::next(MachineBasicBlock::iterator(MI)), 6986 BB->end()); 6987 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6988 6989 BB->addSuccessor(copy0MBB); 6990 BB->addSuccessor(sinkMBB); 6991 6992 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 6993 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 6994 6995 // copy0MBB: 6996 // %FalseValue = ... 6997 // # fallthrough to sinkMBB 6998 BB = copy0MBB; 6999 7000 // Update machine-CFG edges 7001 BB->addSuccessor(sinkMBB); 7002 7003 // sinkMBB: 7004 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 7005 // ... 7006 BB = sinkMBB; 7007 BuildMI(*BB, BB->begin(), dl, 7008 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 7009 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 7010 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 7011 7012 MI->eraseFromParent(); // The pseudo instruction is gone now. 7013 return BB; 7014 } 7015 7016 case ARM::BCCi64: 7017 case ARM::BCCZi64: { 7018 // If there is an unconditional branch to the other successor, remove it. 7019 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 7020 7021 // Compare both parts that make up the double comparison separately for 7022 // equality. 7023 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 7024 7025 unsigned LHS1 = MI->getOperand(1).getReg(); 7026 unsigned LHS2 = MI->getOperand(2).getReg(); 7027 if (RHSisZero) { 7028 AddDefaultPred(BuildMI(BB, dl, 7029 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7030 .addReg(LHS1).addImm(0)); 7031 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7032 .addReg(LHS2).addImm(0) 7033 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7034 } else { 7035 unsigned RHS1 = MI->getOperand(3).getReg(); 7036 unsigned RHS2 = MI->getOperand(4).getReg(); 7037 AddDefaultPred(BuildMI(BB, dl, 7038 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7039 .addReg(LHS1).addReg(RHS1)); 7040 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7041 .addReg(LHS2).addReg(RHS2) 7042 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7043 } 7044 7045 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 7046 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 7047 if (MI->getOperand(0).getImm() == ARMCC::NE) 7048 std::swap(destMBB, exitMBB); 7049 7050 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7051 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 7052 if (isThumb2) 7053 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 7054 else 7055 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 7056 7057 MI->eraseFromParent(); // The pseudo instruction is gone now. 7058 return BB; 7059 } 7060 7061 case ARM::Int_eh_sjlj_setjmp: 7062 case ARM::Int_eh_sjlj_setjmp_nofp: 7063 case ARM::tInt_eh_sjlj_setjmp: 7064 case ARM::t2Int_eh_sjlj_setjmp: 7065 case ARM::t2Int_eh_sjlj_setjmp_nofp: 7066 EmitSjLjDispatchBlock(MI, BB); 7067 return BB; 7068 7069 case ARM::ABS: 7070 case ARM::t2ABS: { 7071 // To insert an ABS instruction, we have to insert the 7072 // diamond control-flow pattern. The incoming instruction knows the 7073 // source vreg to test against 0, the destination vreg to set, 7074 // the condition code register to branch on, the 7075 // true/false values to select between, and a branch opcode to use. 7076 // It transforms 7077 // V1 = ABS V0 7078 // into 7079 // V2 = MOVS V0 7080 // BCC (branch to SinkBB if V0 >= 0) 7081 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 7082 // SinkBB: V1 = PHI(V2, V3) 7083 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7084 MachineFunction::iterator BBI = BB; 7085 ++BBI; 7086 MachineFunction *Fn = BB->getParent(); 7087 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7088 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7089 Fn->insert(BBI, RSBBB); 7090 Fn->insert(BBI, SinkBB); 7091 7092 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 7093 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 7094 bool isThumb2 = Subtarget->isThumb2(); 7095 MachineRegisterInfo &MRI = Fn->getRegInfo(); 7096 // In Thumb mode S must not be specified if source register is the SP or 7097 // PC and if destination register is the SP, so restrict register class 7098 unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ? 7099 (const TargetRegisterClass*)&ARM::rGPRRegClass : 7100 (const TargetRegisterClass*)&ARM::GPRRegClass); 7101 7102 // Transfer the remainder of BB and its successor edges to sinkMBB. 7103 SinkBB->splice(SinkBB->begin(), BB, 7104 llvm::next(MachineBasicBlock::iterator(MI)), 7105 BB->end()); 7106 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 7107 7108 BB->addSuccessor(RSBBB); 7109 BB->addSuccessor(SinkBB); 7110 7111 // fall through to SinkMBB 7112 RSBBB->addSuccessor(SinkBB); 7113 7114 // insert a cmp at the end of BB 7115 AddDefaultPred(BuildMI(BB, dl, 7116 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7117 .addReg(ABSSrcReg).addImm(0)); 7118 7119 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 7120 BuildMI(BB, dl, 7121 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 7122 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 7123 7124 // insert rsbri in RSBBB 7125 // Note: BCC and rsbri will be converted into predicated rsbmi 7126 // by if-conversion pass 7127 BuildMI(*RSBBB, RSBBB->begin(), dl, 7128 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 7129 .addReg(ABSSrcReg, RegState::Kill) 7130 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 7131 7132 // insert PHI in SinkBB, 7133 // reuse ABSDstReg to not change uses of ABS instruction 7134 BuildMI(*SinkBB, SinkBB->begin(), dl, 7135 TII->get(ARM::PHI), ABSDstReg) 7136 .addReg(NewRsbDstReg).addMBB(RSBBB) 7137 .addReg(ABSSrcReg).addMBB(BB); 7138 7139 // remove ABS instruction 7140 MI->eraseFromParent(); 7141 7142 // return last added BB 7143 return SinkBB; 7144 } 7145 case ARM::COPY_STRUCT_BYVAL_I32: 7146 ++NumLoopByVals; 7147 return EmitStructByval(MI, BB); 7148 } 7149} 7150 7151void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 7152 SDNode *Node) const { 7153 if (!MI->hasPostISelHook()) { 7154 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 7155 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 7156 return; 7157 } 7158 7159 const MCInstrDesc *MCID = &MI->getDesc(); 7160 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 7161 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 7162 // operand is still set to noreg. If needed, set the optional operand's 7163 // register to CPSR, and remove the redundant implicit def. 7164 // 7165 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 7166 7167 // Rename pseudo opcodes. 7168 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 7169 if (NewOpc) { 7170 const ARMBaseInstrInfo *TII = 7171 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 7172 MCID = &TII->get(NewOpc); 7173 7174 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 7175 "converted opcode should be the same except for cc_out"); 7176 7177 MI->setDesc(*MCID); 7178 7179 // Add the optional cc_out operand 7180 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 7181 } 7182 unsigned ccOutIdx = MCID->getNumOperands() - 1; 7183 7184 // Any ARM instruction that sets the 's' bit should specify an optional 7185 // "cc_out" operand in the last operand position. 7186 if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 7187 assert(!NewOpc && "Optional cc_out operand required"); 7188 return; 7189 } 7190 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 7191 // since we already have an optional CPSR def. 7192 bool definesCPSR = false; 7193 bool deadCPSR = false; 7194 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 7195 i != e; ++i) { 7196 const MachineOperand &MO = MI->getOperand(i); 7197 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 7198 definesCPSR = true; 7199 if (MO.isDead()) 7200 deadCPSR = true; 7201 MI->RemoveOperand(i); 7202 break; 7203 } 7204 } 7205 if (!definesCPSR) { 7206 assert(!NewOpc && "Optional cc_out operand required"); 7207 return; 7208 } 7209 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 7210 if (deadCPSR) { 7211 assert(!MI->getOperand(ccOutIdx).getReg() && 7212 "expect uninitialized optional cc_out operand"); 7213 return; 7214 } 7215 7216 // If this instruction was defined with an optional CPSR def and its dag node 7217 // had a live implicit CPSR def, then activate the optional CPSR def. 7218 MachineOperand &MO = MI->getOperand(ccOutIdx); 7219 MO.setReg(ARM::CPSR); 7220 MO.setIsDef(true); 7221} 7222 7223//===----------------------------------------------------------------------===// 7224// ARM Optimization Hooks 7225//===----------------------------------------------------------------------===// 7226 7227// Helper function that checks if N is a null or all ones constant. 7228static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 7229 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N); 7230 if (!C) 7231 return false; 7232 return AllOnes ? C->isAllOnesValue() : C->isNullValue(); 7233} 7234 7235// Return true if N is conditionally 0 or all ones. 7236// Detects these expressions where cc is an i1 value: 7237// 7238// (select cc 0, y) [AllOnes=0] 7239// (select cc y, 0) [AllOnes=0] 7240// (zext cc) [AllOnes=0] 7241// (sext cc) [AllOnes=0/1] 7242// (select cc -1, y) [AllOnes=1] 7243// (select cc y, -1) [AllOnes=1] 7244// 7245// Invert is set when N is the null/all ones constant when CC is false. 7246// OtherOp is set to the alternative value of N. 7247static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 7248 SDValue &CC, bool &Invert, 7249 SDValue &OtherOp, 7250 SelectionDAG &DAG) { 7251 switch (N->getOpcode()) { 7252 default: return false; 7253 case ISD::SELECT: { 7254 CC = N->getOperand(0); 7255 SDValue N1 = N->getOperand(1); 7256 SDValue N2 = N->getOperand(2); 7257 if (isZeroOrAllOnes(N1, AllOnes)) { 7258 Invert = false; 7259 OtherOp = N2; 7260 return true; 7261 } 7262 if (isZeroOrAllOnes(N2, AllOnes)) { 7263 Invert = true; 7264 OtherOp = N1; 7265 return true; 7266 } 7267 return false; 7268 } 7269 case ISD::ZERO_EXTEND: 7270 // (zext cc) can never be the all ones value. 7271 if (AllOnes) 7272 return false; 7273 // Fall through. 7274 case ISD::SIGN_EXTEND: { 7275 EVT VT = N->getValueType(0); 7276 CC = N->getOperand(0); 7277 if (CC.getValueType() != MVT::i1) 7278 return false; 7279 Invert = !AllOnes; 7280 if (AllOnes) 7281 // When looking for an AllOnes constant, N is an sext, and the 'other' 7282 // value is 0. 7283 OtherOp = DAG.getConstant(0, VT); 7284 else if (N->getOpcode() == ISD::ZERO_EXTEND) 7285 // When looking for a 0 constant, N can be zext or sext. 7286 OtherOp = DAG.getConstant(1, VT); 7287 else 7288 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 7289 return true; 7290 } 7291 } 7292} 7293 7294// Combine a constant select operand into its use: 7295// 7296// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7297// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 7298// (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 7299// (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 7300// (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 7301// 7302// The transform is rejected if the select doesn't have a constant operand that 7303// is null, or all ones when AllOnes is set. 7304// 7305// Also recognize sext/zext from i1: 7306// 7307// (add (zext cc), x) -> (select cc (add x, 1), x) 7308// (add (sext cc), x) -> (select cc (add x, -1), x) 7309// 7310// These transformations eventually create predicated instructions. 7311// 7312// @param N The node to transform. 7313// @param Slct The N operand that is a select. 7314// @param OtherOp The other N operand (x above). 7315// @param DCI Context. 7316// @param AllOnes Require the select constant to be all ones instead of null. 7317// @returns The new node, or SDValue() on failure. 7318static 7319SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 7320 TargetLowering::DAGCombinerInfo &DCI, 7321 bool AllOnes = false) { 7322 SelectionDAG &DAG = DCI.DAG; 7323 EVT VT = N->getValueType(0); 7324 SDValue NonConstantVal; 7325 SDValue CCOp; 7326 bool SwapSelectOps; 7327 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 7328 NonConstantVal, DAG)) 7329 return SDValue(); 7330 7331 // Slct is now know to be the desired identity constant when CC is true. 7332 SDValue TrueVal = OtherOp; 7333 SDValue FalseVal = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT, 7334 OtherOp, NonConstantVal); 7335 // Unless SwapSelectOps says CC should be false. 7336 if (SwapSelectOps) 7337 std::swap(TrueVal, FalseVal); 7338 7339 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 7340 CCOp, TrueVal, FalseVal); 7341} 7342 7343// Attempt combineSelectAndUse on each operand of a commutative operator N. 7344static 7345SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 7346 TargetLowering::DAGCombinerInfo &DCI) { 7347 SDValue N0 = N->getOperand(0); 7348 SDValue N1 = N->getOperand(1); 7349 if (N0.getNode()->hasOneUse()) { 7350 SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes); 7351 if (Result.getNode()) 7352 return Result; 7353 } 7354 if (N1.getNode()->hasOneUse()) { 7355 SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes); 7356 if (Result.getNode()) 7357 return Result; 7358 } 7359 return SDValue(); 7360} 7361 7362// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 7363// (only after legalization). 7364static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 7365 TargetLowering::DAGCombinerInfo &DCI, 7366 const ARMSubtarget *Subtarget) { 7367 7368 // Only perform optimization if after legalize, and if NEON is available. We 7369 // also expected both operands to be BUILD_VECTORs. 7370 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 7371 || N0.getOpcode() != ISD::BUILD_VECTOR 7372 || N1.getOpcode() != ISD::BUILD_VECTOR) 7373 return SDValue(); 7374 7375 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 7376 EVT VT = N->getValueType(0); 7377 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 7378 return SDValue(); 7379 7380 // Check that the vector operands are of the right form. 7381 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 7382 // operands, where N is the size of the formed vector. 7383 // Each EXTRACT_VECTOR should have the same input vector and odd or even 7384 // index such that we have a pair wise add pattern. 7385 7386 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 7387 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7388 return SDValue(); 7389 SDValue Vec = N0->getOperand(0)->getOperand(0); 7390 SDNode *V = Vec.getNode(); 7391 unsigned nextIndex = 0; 7392 7393 // For each operands to the ADD which are BUILD_VECTORs, 7394 // check to see if each of their operands are an EXTRACT_VECTOR with 7395 // the same vector and appropriate index. 7396 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 7397 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 7398 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7399 7400 SDValue ExtVec0 = N0->getOperand(i); 7401 SDValue ExtVec1 = N1->getOperand(i); 7402 7403 // First operand is the vector, verify its the same. 7404 if (V != ExtVec0->getOperand(0).getNode() || 7405 V != ExtVec1->getOperand(0).getNode()) 7406 return SDValue(); 7407 7408 // Second is the constant, verify its correct. 7409 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 7410 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 7411 7412 // For the constant, we want to see all the even or all the odd. 7413 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 7414 || C1->getZExtValue() != nextIndex+1) 7415 return SDValue(); 7416 7417 // Increment index. 7418 nextIndex+=2; 7419 } else 7420 return SDValue(); 7421 } 7422 7423 // Create VPADDL node. 7424 SelectionDAG &DAG = DCI.DAG; 7425 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7426 7427 // Build operand list. 7428 SmallVector<SDValue, 8> Ops; 7429 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 7430 TLI.getPointerTy())); 7431 7432 // Input is the vector. 7433 Ops.push_back(Vec); 7434 7435 // Get widened type and narrowed type. 7436 MVT widenType; 7437 unsigned numElem = VT.getVectorNumElements(); 7438 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 7439 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 7440 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 7441 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 7442 default: 7443 llvm_unreachable("Invalid vector element type for padd optimization."); 7444 } 7445 7446 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7447 widenType, &Ops[0], Ops.size()); 7448 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 7449} 7450 7451static SDValue findMUL_LOHI(SDValue V) { 7452 if (V->getOpcode() == ISD::UMUL_LOHI || 7453 V->getOpcode() == ISD::SMUL_LOHI) 7454 return V; 7455 return SDValue(); 7456} 7457 7458static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 7459 TargetLowering::DAGCombinerInfo &DCI, 7460 const ARMSubtarget *Subtarget) { 7461 7462 if (Subtarget->isThumb1Only()) return SDValue(); 7463 7464 // Only perform the checks after legalize when the pattern is available. 7465 if (DCI.isBeforeLegalize()) return SDValue(); 7466 7467 // Look for multiply add opportunities. 7468 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 7469 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 7470 // a glue link from the first add to the second add. 7471 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 7472 // a S/UMLAL instruction. 7473 // loAdd UMUL_LOHI 7474 // \ / :lo \ :hi 7475 // \ / \ [no multiline comment] 7476 // ADDC | hiAdd 7477 // \ :glue / / 7478 // \ / / 7479 // ADDE 7480 // 7481 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 7482 SDValue AddcOp0 = AddcNode->getOperand(0); 7483 SDValue AddcOp1 = AddcNode->getOperand(1); 7484 7485 // Check if the two operands are from the same mul_lohi node. 7486 if (AddcOp0.getNode() == AddcOp1.getNode()) 7487 return SDValue(); 7488 7489 assert(AddcNode->getNumValues() == 2 && 7490 AddcNode->getValueType(0) == MVT::i32 && 7491 AddcNode->getValueType(1) == MVT::Glue && 7492 "Expect ADDC with two result values: i32, glue"); 7493 7494 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 7495 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 7496 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 7497 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 7498 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 7499 return SDValue(); 7500 7501 // Look for the glued ADDE. 7502 SDNode* AddeNode = AddcNode->getGluedUser(); 7503 if (AddeNode == NULL) 7504 return SDValue(); 7505 7506 // Make sure it is really an ADDE. 7507 if (AddeNode->getOpcode() != ISD::ADDE) 7508 return SDValue(); 7509 7510 assert(AddeNode->getNumOperands() == 3 && 7511 AddeNode->getOperand(2).getValueType() == MVT::Glue && 7512 "ADDE node has the wrong inputs"); 7513 7514 // Check for the triangle shape. 7515 SDValue AddeOp0 = AddeNode->getOperand(0); 7516 SDValue AddeOp1 = AddeNode->getOperand(1); 7517 7518 // Make sure that the ADDE operands are not coming from the same node. 7519 if (AddeOp0.getNode() == AddeOp1.getNode()) 7520 return SDValue(); 7521 7522 // Find the MUL_LOHI node walking up ADDE's operands. 7523 bool IsLeftOperandMUL = false; 7524 SDValue MULOp = findMUL_LOHI(AddeOp0); 7525 if (MULOp == SDValue()) 7526 MULOp = findMUL_LOHI(AddeOp1); 7527 else 7528 IsLeftOperandMUL = true; 7529 if (MULOp == SDValue()) 7530 return SDValue(); 7531 7532 // Figure out the right opcode. 7533 unsigned Opc = MULOp->getOpcode(); 7534 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 7535 7536 // Figure out the high and low input values to the MLAL node. 7537 SDValue* HiMul = &MULOp; 7538 SDValue* HiAdd = NULL; 7539 SDValue* LoMul = NULL; 7540 SDValue* LowAdd = NULL; 7541 7542 if (IsLeftOperandMUL) 7543 HiAdd = &AddeOp1; 7544 else 7545 HiAdd = &AddeOp0; 7546 7547 7548 if (AddcOp0->getOpcode() == Opc) { 7549 LoMul = &AddcOp0; 7550 LowAdd = &AddcOp1; 7551 } 7552 if (AddcOp1->getOpcode() == Opc) { 7553 LoMul = &AddcOp1; 7554 LowAdd = &AddcOp0; 7555 } 7556 7557 if (LoMul == NULL) 7558 return SDValue(); 7559 7560 if (LoMul->getNode() != HiMul->getNode()) 7561 return SDValue(); 7562 7563 // Create the merged node. 7564 SelectionDAG &DAG = DCI.DAG; 7565 7566 // Build operand list. 7567 SmallVector<SDValue, 8> Ops; 7568 Ops.push_back(LoMul->getOperand(0)); 7569 Ops.push_back(LoMul->getOperand(1)); 7570 Ops.push_back(*LowAdd); 7571 Ops.push_back(*HiAdd); 7572 7573 SDValue MLALNode = DAG.getNode(FinalOpc, AddcNode->getDebugLoc(), 7574 DAG.getVTList(MVT::i32, MVT::i32), 7575 &Ops[0], Ops.size()); 7576 7577 // Replace the ADDs' nodes uses by the MLA node's values. 7578 SDValue HiMLALResult(MLALNode.getNode(), 1); 7579 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 7580 7581 SDValue LoMLALResult(MLALNode.getNode(), 0); 7582 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 7583 7584 // Return original node to notify the driver to stop replacing. 7585 SDValue resNode(AddcNode, 0); 7586 return resNode; 7587} 7588 7589/// PerformADDCCombine - Target-specific dag combine transform from 7590/// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. 7591static SDValue PerformADDCCombine(SDNode *N, 7592 TargetLowering::DAGCombinerInfo &DCI, 7593 const ARMSubtarget *Subtarget) { 7594 7595 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 7596 7597} 7598 7599/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 7600/// operands N0 and N1. This is a helper for PerformADDCombine that is 7601/// called with the default operands, and if that fails, with commuted 7602/// operands. 7603static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 7604 TargetLowering::DAGCombinerInfo &DCI, 7605 const ARMSubtarget *Subtarget){ 7606 7607 // Attempt to create vpaddl for this add. 7608 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 7609 if (Result.getNode()) 7610 return Result; 7611 7612 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7613 if (N0.getNode()->hasOneUse()) { 7614 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 7615 if (Result.getNode()) return Result; 7616 } 7617 return SDValue(); 7618} 7619 7620/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 7621/// 7622static SDValue PerformADDCombine(SDNode *N, 7623 TargetLowering::DAGCombinerInfo &DCI, 7624 const ARMSubtarget *Subtarget) { 7625 SDValue N0 = N->getOperand(0); 7626 SDValue N1 = N->getOperand(1); 7627 7628 // First try with the default operand order. 7629 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 7630 if (Result.getNode()) 7631 return Result; 7632 7633 // If that didn't work, try again with the operands commuted. 7634 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 7635} 7636 7637/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 7638/// 7639static SDValue PerformSUBCombine(SDNode *N, 7640 TargetLowering::DAGCombinerInfo &DCI) { 7641 SDValue N0 = N->getOperand(0); 7642 SDValue N1 = N->getOperand(1); 7643 7644 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 7645 if (N1.getNode()->hasOneUse()) { 7646 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 7647 if (Result.getNode()) return Result; 7648 } 7649 7650 return SDValue(); 7651} 7652 7653/// PerformVMULCombine 7654/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 7655/// special multiplier accumulator forwarding. 7656/// vmul d3, d0, d2 7657/// vmla d3, d1, d2 7658/// is faster than 7659/// vadd d3, d0, d1 7660/// vmul d3, d3, d2 7661static SDValue PerformVMULCombine(SDNode *N, 7662 TargetLowering::DAGCombinerInfo &DCI, 7663 const ARMSubtarget *Subtarget) { 7664 if (!Subtarget->hasVMLxForwarding()) 7665 return SDValue(); 7666 7667 SelectionDAG &DAG = DCI.DAG; 7668 SDValue N0 = N->getOperand(0); 7669 SDValue N1 = N->getOperand(1); 7670 unsigned Opcode = N0.getOpcode(); 7671 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 7672 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 7673 Opcode = N1.getOpcode(); 7674 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 7675 Opcode != ISD::FADD && Opcode != ISD::FSUB) 7676 return SDValue(); 7677 std::swap(N0, N1); 7678 } 7679 7680 EVT VT = N->getValueType(0); 7681 DebugLoc DL = N->getDebugLoc(); 7682 SDValue N00 = N0->getOperand(0); 7683 SDValue N01 = N0->getOperand(1); 7684 return DAG.getNode(Opcode, DL, VT, 7685 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 7686 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 7687} 7688 7689static SDValue PerformMULCombine(SDNode *N, 7690 TargetLowering::DAGCombinerInfo &DCI, 7691 const ARMSubtarget *Subtarget) { 7692 SelectionDAG &DAG = DCI.DAG; 7693 7694 if (Subtarget->isThumb1Only()) 7695 return SDValue(); 7696 7697 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 7698 return SDValue(); 7699 7700 EVT VT = N->getValueType(0); 7701 if (VT.is64BitVector() || VT.is128BitVector()) 7702 return PerformVMULCombine(N, DCI, Subtarget); 7703 if (VT != MVT::i32) 7704 return SDValue(); 7705 7706 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7707 if (!C) 7708 return SDValue(); 7709 7710 int64_t MulAmt = C->getSExtValue(); 7711 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 7712 7713 ShiftAmt = ShiftAmt & (32 - 1); 7714 SDValue V = N->getOperand(0); 7715 DebugLoc DL = N->getDebugLoc(); 7716 7717 SDValue Res; 7718 MulAmt >>= ShiftAmt; 7719 7720 if (MulAmt >= 0) { 7721 if (isPowerOf2_32(MulAmt - 1)) { 7722 // (mul x, 2^N + 1) => (add (shl x, N), x) 7723 Res = DAG.getNode(ISD::ADD, DL, VT, 7724 V, 7725 DAG.getNode(ISD::SHL, DL, VT, 7726 V, 7727 DAG.getConstant(Log2_32(MulAmt - 1), 7728 MVT::i32))); 7729 } else if (isPowerOf2_32(MulAmt + 1)) { 7730 // (mul x, 2^N - 1) => (sub (shl x, N), x) 7731 Res = DAG.getNode(ISD::SUB, DL, VT, 7732 DAG.getNode(ISD::SHL, DL, VT, 7733 V, 7734 DAG.getConstant(Log2_32(MulAmt + 1), 7735 MVT::i32)), 7736 V); 7737 } else 7738 return SDValue(); 7739 } else { 7740 uint64_t MulAmtAbs = -MulAmt; 7741 if (isPowerOf2_32(MulAmtAbs + 1)) { 7742 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 7743 Res = DAG.getNode(ISD::SUB, DL, VT, 7744 V, 7745 DAG.getNode(ISD::SHL, DL, VT, 7746 V, 7747 DAG.getConstant(Log2_32(MulAmtAbs + 1), 7748 MVT::i32))); 7749 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 7750 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 7751 Res = DAG.getNode(ISD::ADD, DL, VT, 7752 V, 7753 DAG.getNode(ISD::SHL, DL, VT, 7754 V, 7755 DAG.getConstant(Log2_32(MulAmtAbs-1), 7756 MVT::i32))); 7757 Res = DAG.getNode(ISD::SUB, DL, VT, 7758 DAG.getConstant(0, MVT::i32),Res); 7759 7760 } else 7761 return SDValue(); 7762 } 7763 7764 if (ShiftAmt != 0) 7765 Res = DAG.getNode(ISD::SHL, DL, VT, 7766 Res, DAG.getConstant(ShiftAmt, MVT::i32)); 7767 7768 // Do not add new nodes to DAG combiner worklist. 7769 DCI.CombineTo(N, Res, false); 7770 return SDValue(); 7771} 7772 7773static SDValue PerformANDCombine(SDNode *N, 7774 TargetLowering::DAGCombinerInfo &DCI, 7775 const ARMSubtarget *Subtarget) { 7776 7777 // Attempt to use immediate-form VBIC 7778 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 7779 DebugLoc dl = N->getDebugLoc(); 7780 EVT VT = N->getValueType(0); 7781 SelectionDAG &DAG = DCI.DAG; 7782 7783 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 7784 return SDValue(); 7785 7786 APInt SplatBits, SplatUndef; 7787 unsigned SplatBitSize; 7788 bool HasAnyUndefs; 7789 if (BVN && 7790 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 7791 if (SplatBitSize <= 64) { 7792 EVT VbicVT; 7793 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 7794 SplatUndef.getZExtValue(), SplatBitSize, 7795 DAG, VbicVT, VT.is128BitVector(), 7796 OtherModImm); 7797 if (Val.getNode()) { 7798 SDValue Input = 7799 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 7800 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 7801 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 7802 } 7803 } 7804 } 7805 7806 if (!Subtarget->isThumb1Only()) { 7807 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 7808 SDValue Result = combineSelectAndUseCommutative(N, true, DCI); 7809 if (Result.getNode()) 7810 return Result; 7811 } 7812 7813 return SDValue(); 7814} 7815 7816/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 7817static SDValue PerformORCombine(SDNode *N, 7818 TargetLowering::DAGCombinerInfo &DCI, 7819 const ARMSubtarget *Subtarget) { 7820 // Attempt to use immediate-form VORR 7821 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 7822 DebugLoc dl = N->getDebugLoc(); 7823 EVT VT = N->getValueType(0); 7824 SelectionDAG &DAG = DCI.DAG; 7825 7826 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 7827 return SDValue(); 7828 7829 APInt SplatBits, SplatUndef; 7830 unsigned SplatBitSize; 7831 bool HasAnyUndefs; 7832 if (BVN && Subtarget->hasNEON() && 7833 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 7834 if (SplatBitSize <= 64) { 7835 EVT VorrVT; 7836 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 7837 SplatUndef.getZExtValue(), SplatBitSize, 7838 DAG, VorrVT, VT.is128BitVector(), 7839 OtherModImm); 7840 if (Val.getNode()) { 7841 SDValue Input = 7842 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 7843 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 7844 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 7845 } 7846 } 7847 } 7848 7849 if (!Subtarget->isThumb1Only()) { 7850 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 7851 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 7852 if (Result.getNode()) 7853 return Result; 7854 } 7855 7856 // The code below optimizes (or (and X, Y), Z). 7857 // The AND operand needs to have a single user to make these optimizations 7858 // profitable. 7859 SDValue N0 = N->getOperand(0); 7860 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 7861 return SDValue(); 7862 SDValue N1 = N->getOperand(1); 7863 7864 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 7865 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 7866 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 7867 APInt SplatUndef; 7868 unsigned SplatBitSize; 7869 bool HasAnyUndefs; 7870 7871 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 7872 APInt SplatBits0; 7873 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 7874 HasAnyUndefs) && !HasAnyUndefs) { 7875 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 7876 APInt SplatBits1; 7877 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 7878 HasAnyUndefs) && !HasAnyUndefs && 7879 SplatBits0 == ~SplatBits1) { 7880 // Canonicalize the vector type to make instruction selection simpler. 7881 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 7882 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 7883 N0->getOperand(1), N0->getOperand(0), 7884 N1->getOperand(0)); 7885 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 7886 } 7887 } 7888 } 7889 7890 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 7891 // reasonable. 7892 7893 // BFI is only available on V6T2+ 7894 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 7895 return SDValue(); 7896 7897 DebugLoc DL = N->getDebugLoc(); 7898 // 1) or (and A, mask), val => ARMbfi A, val, mask 7899 // iff (val & mask) == val 7900 // 7901 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 7902 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 7903 // && mask == ~mask2 7904 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 7905 // && ~mask == mask2 7906 // (i.e., copy a bitfield value into another bitfield of the same width) 7907 7908 if (VT != MVT::i32) 7909 return SDValue(); 7910 7911 SDValue N00 = N0.getOperand(0); 7912 7913 // The value and the mask need to be constants so we can verify this is 7914 // actually a bitfield set. If the mask is 0xffff, we can do better 7915 // via a movt instruction, so don't use BFI in that case. 7916 SDValue MaskOp = N0.getOperand(1); 7917 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 7918 if (!MaskC) 7919 return SDValue(); 7920 unsigned Mask = MaskC->getZExtValue(); 7921 if (Mask == 0xffff) 7922 return SDValue(); 7923 SDValue Res; 7924 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 7925 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 7926 if (N1C) { 7927 unsigned Val = N1C->getZExtValue(); 7928 if ((Val & ~Mask) != Val) 7929 return SDValue(); 7930 7931 if (ARM::isBitFieldInvertedMask(Mask)) { 7932 Val >>= CountTrailingZeros_32(~Mask); 7933 7934 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 7935 DAG.getConstant(Val, MVT::i32), 7936 DAG.getConstant(Mask, MVT::i32)); 7937 7938 // Do not add new nodes to DAG combiner worklist. 7939 DCI.CombineTo(N, Res, false); 7940 return SDValue(); 7941 } 7942 } else if (N1.getOpcode() == ISD::AND) { 7943 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 7944 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 7945 if (!N11C) 7946 return SDValue(); 7947 unsigned Mask2 = N11C->getZExtValue(); 7948 7949 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 7950 // as is to match. 7951 if (ARM::isBitFieldInvertedMask(Mask) && 7952 (Mask == ~Mask2)) { 7953 // The pack halfword instruction works better for masks that fit it, 7954 // so use that when it's available. 7955 if (Subtarget->hasT2ExtractPack() && 7956 (Mask == 0xffff || Mask == 0xffff0000)) 7957 return SDValue(); 7958 // 2a 7959 unsigned amt = CountTrailingZeros_32(Mask2); 7960 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 7961 DAG.getConstant(amt, MVT::i32)); 7962 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 7963 DAG.getConstant(Mask, MVT::i32)); 7964 // Do not add new nodes to DAG combiner worklist. 7965 DCI.CombineTo(N, Res, false); 7966 return SDValue(); 7967 } else if (ARM::isBitFieldInvertedMask(~Mask) && 7968 (~Mask == Mask2)) { 7969 // The pack halfword instruction works better for masks that fit it, 7970 // so use that when it's available. 7971 if (Subtarget->hasT2ExtractPack() && 7972 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 7973 return SDValue(); 7974 // 2b 7975 unsigned lsb = CountTrailingZeros_32(Mask); 7976 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 7977 DAG.getConstant(lsb, MVT::i32)); 7978 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 7979 DAG.getConstant(Mask2, MVT::i32)); 7980 // Do not add new nodes to DAG combiner worklist. 7981 DCI.CombineTo(N, Res, false); 7982 return SDValue(); 7983 } 7984 } 7985 7986 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 7987 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 7988 ARM::isBitFieldInvertedMask(~Mask)) { 7989 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 7990 // where lsb(mask) == #shamt and masked bits of B are known zero. 7991 SDValue ShAmt = N00.getOperand(1); 7992 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 7993 unsigned LSB = CountTrailingZeros_32(Mask); 7994 if (ShAmtC != LSB) 7995 return SDValue(); 7996 7997 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 7998 DAG.getConstant(~Mask, MVT::i32)); 7999 8000 // Do not add new nodes to DAG combiner worklist. 8001 DCI.CombineTo(N, Res, false); 8002 } 8003 8004 return SDValue(); 8005} 8006 8007static SDValue PerformXORCombine(SDNode *N, 8008 TargetLowering::DAGCombinerInfo &DCI, 8009 const ARMSubtarget *Subtarget) { 8010 EVT VT = N->getValueType(0); 8011 SelectionDAG &DAG = DCI.DAG; 8012 8013 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 8014 return SDValue(); 8015 8016 if (!Subtarget->isThumb1Only()) { 8017 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 8018 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 8019 if (Result.getNode()) 8020 return Result; 8021 } 8022 8023 return SDValue(); 8024} 8025 8026/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 8027/// the bits being cleared by the AND are not demanded by the BFI. 8028static SDValue PerformBFICombine(SDNode *N, 8029 TargetLowering::DAGCombinerInfo &DCI) { 8030 SDValue N1 = N->getOperand(1); 8031 if (N1.getOpcode() == ISD::AND) { 8032 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 8033 if (!N11C) 8034 return SDValue(); 8035 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 8036 unsigned LSB = CountTrailingZeros_32(~InvMask); 8037 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 8038 unsigned Mask = (1 << Width)-1; 8039 unsigned Mask2 = N11C->getZExtValue(); 8040 if ((Mask & (~Mask2)) == 0) 8041 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 8042 N->getOperand(0), N1.getOperand(0), 8043 N->getOperand(2)); 8044 } 8045 return SDValue(); 8046} 8047 8048/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 8049/// ARMISD::VMOVRRD. 8050static SDValue PerformVMOVRRDCombine(SDNode *N, 8051 TargetLowering::DAGCombinerInfo &DCI) { 8052 // vmovrrd(vmovdrr x, y) -> x,y 8053 SDValue InDouble = N->getOperand(0); 8054 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 8055 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 8056 8057 // vmovrrd(load f64) -> (load i32), (load i32) 8058 SDNode *InNode = InDouble.getNode(); 8059 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 8060 InNode->getValueType(0) == MVT::f64 && 8061 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 8062 !cast<LoadSDNode>(InNode)->isVolatile()) { 8063 // TODO: Should this be done for non-FrameIndex operands? 8064 LoadSDNode *LD = cast<LoadSDNode>(InNode); 8065 8066 SelectionDAG &DAG = DCI.DAG; 8067 DebugLoc DL = LD->getDebugLoc(); 8068 SDValue BasePtr = LD->getBasePtr(); 8069 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 8070 LD->getPointerInfo(), LD->isVolatile(), 8071 LD->isNonTemporal(), LD->isInvariant(), 8072 LD->getAlignment()); 8073 8074 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 8075 DAG.getConstant(4, MVT::i32)); 8076 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 8077 LD->getPointerInfo(), LD->isVolatile(), 8078 LD->isNonTemporal(), LD->isInvariant(), 8079 std::min(4U, LD->getAlignment() / 2)); 8080 8081 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 8082 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 8083 DCI.RemoveFromWorklist(LD); 8084 DAG.DeleteNode(LD); 8085 return Result; 8086 } 8087 8088 return SDValue(); 8089} 8090 8091/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 8092/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 8093static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 8094 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 8095 SDValue Op0 = N->getOperand(0); 8096 SDValue Op1 = N->getOperand(1); 8097 if (Op0.getOpcode() == ISD::BITCAST) 8098 Op0 = Op0.getOperand(0); 8099 if (Op1.getOpcode() == ISD::BITCAST) 8100 Op1 = Op1.getOperand(0); 8101 if (Op0.getOpcode() == ARMISD::VMOVRRD && 8102 Op0.getNode() == Op1.getNode() && 8103 Op0.getResNo() == 0 && Op1.getResNo() == 1) 8104 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 8105 N->getValueType(0), Op0.getOperand(0)); 8106 return SDValue(); 8107} 8108 8109/// PerformSTORECombine - Target-specific dag combine xforms for 8110/// ISD::STORE. 8111static SDValue PerformSTORECombine(SDNode *N, 8112 TargetLowering::DAGCombinerInfo &DCI) { 8113 StoreSDNode *St = cast<StoreSDNode>(N); 8114 if (St->isVolatile()) 8115 return SDValue(); 8116 8117 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 8118 // pack all of the elements in one place. Next, store to memory in fewer 8119 // chunks. 8120 SDValue StVal = St->getValue(); 8121 EVT VT = StVal.getValueType(); 8122 if (St->isTruncatingStore() && VT.isVector()) { 8123 SelectionDAG &DAG = DCI.DAG; 8124 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8125 EVT StVT = St->getMemoryVT(); 8126 unsigned NumElems = VT.getVectorNumElements(); 8127 assert(StVT != VT && "Cannot truncate to the same type"); 8128 unsigned FromEltSz = VT.getVectorElementType().getSizeInBits(); 8129 unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits(); 8130 8131 // From, To sizes and ElemCount must be pow of two 8132 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 8133 8134 // We are going to use the original vector elt for storing. 8135 // Accumulated smaller vector elements must be a multiple of the store size. 8136 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 8137 8138 unsigned SizeRatio = FromEltSz / ToEltSz; 8139 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 8140 8141 // Create a type on which we perform the shuffle. 8142 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 8143 NumElems*SizeRatio); 8144 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 8145 8146 DebugLoc DL = St->getDebugLoc(); 8147 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 8148 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 8149 for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio; 8150 8151 // Can't shuffle using an illegal type. 8152 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 8153 8154 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 8155 DAG.getUNDEF(WideVec.getValueType()), 8156 ShuffleVec.data()); 8157 // At this point all of the data is stored at the bottom of the 8158 // register. We now need to save it to mem. 8159 8160 // Find the largest store unit 8161 MVT StoreType = MVT::i8; 8162 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 8163 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 8164 MVT Tp = (MVT::SimpleValueType)tp; 8165 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 8166 StoreType = Tp; 8167 } 8168 // Didn't find a legal store type. 8169 if (!TLI.isTypeLegal(StoreType)) 8170 return SDValue(); 8171 8172 // Bitcast the original vector into a vector of store-size units 8173 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 8174 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 8175 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 8176 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 8177 SmallVector<SDValue, 8> Chains; 8178 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 8179 TLI.getPointerTy()); 8180 SDValue BasePtr = St->getBasePtr(); 8181 8182 // Perform one or more big stores into memory. 8183 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 8184 for (unsigned I = 0; I < E; I++) { 8185 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 8186 StoreType, ShuffWide, 8187 DAG.getIntPtrConstant(I)); 8188 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 8189 St->getPointerInfo(), St->isVolatile(), 8190 St->isNonTemporal(), St->getAlignment()); 8191 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 8192 Increment); 8193 Chains.push_back(Ch); 8194 } 8195 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0], 8196 Chains.size()); 8197 } 8198 8199 if (!ISD::isNormalStore(St)) 8200 return SDValue(); 8201 8202 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 8203 // ARM stores of arguments in the same cache line. 8204 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 8205 StVal.getNode()->hasOneUse()) { 8206 SelectionDAG &DAG = DCI.DAG; 8207 DebugLoc DL = St->getDebugLoc(); 8208 SDValue BasePtr = St->getBasePtr(); 8209 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 8210 StVal.getNode()->getOperand(0), BasePtr, 8211 St->getPointerInfo(), St->isVolatile(), 8212 St->isNonTemporal(), St->getAlignment()); 8213 8214 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 8215 DAG.getConstant(4, MVT::i32)); 8216 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 8217 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 8218 St->isNonTemporal(), 8219 std::min(4U, St->getAlignment() / 2)); 8220 } 8221 8222 if (StVal.getValueType() != MVT::i64 || 8223 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8224 return SDValue(); 8225 8226 // Bitcast an i64 store extracted from a vector to f64. 8227 // Otherwise, the i64 value will be legalized to a pair of i32 values. 8228 SelectionDAG &DAG = DCI.DAG; 8229 DebugLoc dl = StVal.getDebugLoc(); 8230 SDValue IntVec = StVal.getOperand(0); 8231 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 8232 IntVec.getValueType().getVectorNumElements()); 8233 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 8234 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8235 Vec, StVal.getOperand(1)); 8236 dl = N->getDebugLoc(); 8237 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 8238 // Make the DAGCombiner fold the bitcasts. 8239 DCI.AddToWorklist(Vec.getNode()); 8240 DCI.AddToWorklist(ExtElt.getNode()); 8241 DCI.AddToWorklist(V.getNode()); 8242 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 8243 St->getPointerInfo(), St->isVolatile(), 8244 St->isNonTemporal(), St->getAlignment(), 8245 St->getTBAAInfo()); 8246} 8247 8248/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 8249/// are normal, non-volatile loads. If so, it is profitable to bitcast an 8250/// i64 vector to have f64 elements, since the value can then be loaded 8251/// directly into a VFP register. 8252static bool hasNormalLoadOperand(SDNode *N) { 8253 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 8254 for (unsigned i = 0; i < NumElts; ++i) { 8255 SDNode *Elt = N->getOperand(i).getNode(); 8256 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 8257 return true; 8258 } 8259 return false; 8260} 8261 8262/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 8263/// ISD::BUILD_VECTOR. 8264static SDValue PerformBUILD_VECTORCombine(SDNode *N, 8265 TargetLowering::DAGCombinerInfo &DCI){ 8266 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 8267 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 8268 // into a pair of GPRs, which is fine when the value is used as a scalar, 8269 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 8270 SelectionDAG &DAG = DCI.DAG; 8271 if (N->getNumOperands() == 2) { 8272 SDValue RV = PerformVMOVDRRCombine(N, DAG); 8273 if (RV.getNode()) 8274 return RV; 8275 } 8276 8277 // Load i64 elements as f64 values so that type legalization does not split 8278 // them up into i32 values. 8279 EVT VT = N->getValueType(0); 8280 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 8281 return SDValue(); 8282 DebugLoc dl = N->getDebugLoc(); 8283 SmallVector<SDValue, 8> Ops; 8284 unsigned NumElts = VT.getVectorNumElements(); 8285 for (unsigned i = 0; i < NumElts; ++i) { 8286 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 8287 Ops.push_back(V); 8288 // Make the DAGCombiner fold the bitcast. 8289 DCI.AddToWorklist(V.getNode()); 8290 } 8291 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 8292 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 8293 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 8294} 8295 8296/// PerformInsertEltCombine - Target-specific dag combine xforms for 8297/// ISD::INSERT_VECTOR_ELT. 8298static SDValue PerformInsertEltCombine(SDNode *N, 8299 TargetLowering::DAGCombinerInfo &DCI) { 8300 // Bitcast an i64 load inserted into a vector to f64. 8301 // Otherwise, the i64 value will be legalized to a pair of i32 values. 8302 EVT VT = N->getValueType(0); 8303 SDNode *Elt = N->getOperand(1).getNode(); 8304 if (VT.getVectorElementType() != MVT::i64 || 8305 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 8306 return SDValue(); 8307 8308 SelectionDAG &DAG = DCI.DAG; 8309 DebugLoc dl = N->getDebugLoc(); 8310 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 8311 VT.getVectorNumElements()); 8312 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 8313 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 8314 // Make the DAGCombiner fold the bitcasts. 8315 DCI.AddToWorklist(Vec.getNode()); 8316 DCI.AddToWorklist(V.getNode()); 8317 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 8318 Vec, V, N->getOperand(2)); 8319 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 8320} 8321 8322/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 8323/// ISD::VECTOR_SHUFFLE. 8324static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 8325 // The LLVM shufflevector instruction does not require the shuffle mask 8326 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 8327 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 8328 // operands do not match the mask length, they are extended by concatenating 8329 // them with undef vectors. That is probably the right thing for other 8330 // targets, but for NEON it is better to concatenate two double-register 8331 // size vector operands into a single quad-register size vector. Do that 8332 // transformation here: 8333 // shuffle(concat(v1, undef), concat(v2, undef)) -> 8334 // shuffle(concat(v1, v2), undef) 8335 SDValue Op0 = N->getOperand(0); 8336 SDValue Op1 = N->getOperand(1); 8337 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 8338 Op1.getOpcode() != ISD::CONCAT_VECTORS || 8339 Op0.getNumOperands() != 2 || 8340 Op1.getNumOperands() != 2) 8341 return SDValue(); 8342 SDValue Concat0Op1 = Op0.getOperand(1); 8343 SDValue Concat1Op1 = Op1.getOperand(1); 8344 if (Concat0Op1.getOpcode() != ISD::UNDEF || 8345 Concat1Op1.getOpcode() != ISD::UNDEF) 8346 return SDValue(); 8347 // Skip the transformation if any of the types are illegal. 8348 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8349 EVT VT = N->getValueType(0); 8350 if (!TLI.isTypeLegal(VT) || 8351 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 8352 !TLI.isTypeLegal(Concat1Op1.getValueType())) 8353 return SDValue(); 8354 8355 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 8356 Op0.getOperand(0), Op1.getOperand(0)); 8357 // Translate the shuffle mask. 8358 SmallVector<int, 16> NewMask; 8359 unsigned NumElts = VT.getVectorNumElements(); 8360 unsigned HalfElts = NumElts/2; 8361 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 8362 for (unsigned n = 0; n < NumElts; ++n) { 8363 int MaskElt = SVN->getMaskElt(n); 8364 int NewElt = -1; 8365 if (MaskElt < (int)HalfElts) 8366 NewElt = MaskElt; 8367 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 8368 NewElt = HalfElts + MaskElt - NumElts; 8369 NewMask.push_back(NewElt); 8370 } 8371 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 8372 DAG.getUNDEF(VT), NewMask.data()); 8373} 8374 8375/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 8376/// NEON load/store intrinsics to merge base address updates. 8377static SDValue CombineBaseUpdate(SDNode *N, 8378 TargetLowering::DAGCombinerInfo &DCI) { 8379 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 8380 return SDValue(); 8381 8382 SelectionDAG &DAG = DCI.DAG; 8383 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 8384 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 8385 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 8386 SDValue Addr = N->getOperand(AddrOpIdx); 8387 8388 // Search for a use of the address operand that is an increment. 8389 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 8390 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 8391 SDNode *User = *UI; 8392 if (User->getOpcode() != ISD::ADD || 8393 UI.getUse().getResNo() != Addr.getResNo()) 8394 continue; 8395 8396 // Check that the add is independent of the load/store. Otherwise, folding 8397 // it would create a cycle. 8398 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 8399 continue; 8400 8401 // Find the new opcode for the updating load/store. 8402 bool isLoad = true; 8403 bool isLaneOp = false; 8404 unsigned NewOpc = 0; 8405 unsigned NumVecs = 0; 8406 if (isIntrinsic) { 8407 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 8408 switch (IntNo) { 8409 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 8410 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 8411 NumVecs = 1; break; 8412 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 8413 NumVecs = 2; break; 8414 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 8415 NumVecs = 3; break; 8416 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 8417 NumVecs = 4; break; 8418 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 8419 NumVecs = 2; isLaneOp = true; break; 8420 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 8421 NumVecs = 3; isLaneOp = true; break; 8422 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 8423 NumVecs = 4; isLaneOp = true; break; 8424 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 8425 NumVecs = 1; isLoad = false; break; 8426 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 8427 NumVecs = 2; isLoad = false; break; 8428 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 8429 NumVecs = 3; isLoad = false; break; 8430 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 8431 NumVecs = 4; isLoad = false; break; 8432 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 8433 NumVecs = 2; isLoad = false; isLaneOp = true; break; 8434 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 8435 NumVecs = 3; isLoad = false; isLaneOp = true; break; 8436 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 8437 NumVecs = 4; isLoad = false; isLaneOp = true; break; 8438 } 8439 } else { 8440 isLaneOp = true; 8441 switch (N->getOpcode()) { 8442 default: llvm_unreachable("unexpected opcode for Neon base update"); 8443 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 8444 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 8445 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 8446 } 8447 } 8448 8449 // Find the size of memory referenced by the load/store. 8450 EVT VecTy; 8451 if (isLoad) 8452 VecTy = N->getValueType(0); 8453 else 8454 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 8455 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 8456 if (isLaneOp) 8457 NumBytes /= VecTy.getVectorNumElements(); 8458 8459 // If the increment is a constant, it must match the memory ref size. 8460 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 8461 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 8462 uint64_t IncVal = CInc->getZExtValue(); 8463 if (IncVal != NumBytes) 8464 continue; 8465 } else if (NumBytes >= 3 * 16) { 8466 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 8467 // separate instructions that make it harder to use a non-constant update. 8468 continue; 8469 } 8470 8471 // Create the new updating load/store node. 8472 EVT Tys[6]; 8473 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 8474 unsigned n; 8475 for (n = 0; n < NumResultVecs; ++n) 8476 Tys[n] = VecTy; 8477 Tys[n++] = MVT::i32; 8478 Tys[n] = MVT::Other; 8479 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 8480 SmallVector<SDValue, 8> Ops; 8481 Ops.push_back(N->getOperand(0)); // incoming chain 8482 Ops.push_back(N->getOperand(AddrOpIdx)); 8483 Ops.push_back(Inc); 8484 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 8485 Ops.push_back(N->getOperand(i)); 8486 } 8487 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 8488 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 8489 Ops.data(), Ops.size(), 8490 MemInt->getMemoryVT(), 8491 MemInt->getMemOperand()); 8492 8493 // Update the uses. 8494 std::vector<SDValue> NewResults; 8495 for (unsigned i = 0; i < NumResultVecs; ++i) { 8496 NewResults.push_back(SDValue(UpdN.getNode(), i)); 8497 } 8498 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 8499 DCI.CombineTo(N, NewResults); 8500 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 8501 8502 break; 8503 } 8504 return SDValue(); 8505} 8506 8507/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 8508/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 8509/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 8510/// return true. 8511static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 8512 SelectionDAG &DAG = DCI.DAG; 8513 EVT VT = N->getValueType(0); 8514 // vldN-dup instructions only support 64-bit vectors for N > 1. 8515 if (!VT.is64BitVector()) 8516 return false; 8517 8518 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 8519 SDNode *VLD = N->getOperand(0).getNode(); 8520 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 8521 return false; 8522 unsigned NumVecs = 0; 8523 unsigned NewOpc = 0; 8524 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 8525 if (IntNo == Intrinsic::arm_neon_vld2lane) { 8526 NumVecs = 2; 8527 NewOpc = ARMISD::VLD2DUP; 8528 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 8529 NumVecs = 3; 8530 NewOpc = ARMISD::VLD3DUP; 8531 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 8532 NumVecs = 4; 8533 NewOpc = ARMISD::VLD4DUP; 8534 } else { 8535 return false; 8536 } 8537 8538 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 8539 // numbers match the load. 8540 unsigned VLDLaneNo = 8541 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 8542 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 8543 UI != UE; ++UI) { 8544 // Ignore uses of the chain result. 8545 if (UI.getUse().getResNo() == NumVecs) 8546 continue; 8547 SDNode *User = *UI; 8548 if (User->getOpcode() != ARMISD::VDUPLANE || 8549 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 8550 return false; 8551 } 8552 8553 // Create the vldN-dup node. 8554 EVT Tys[5]; 8555 unsigned n; 8556 for (n = 0; n < NumVecs; ++n) 8557 Tys[n] = VT; 8558 Tys[n] = MVT::Other; 8559 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 8560 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 8561 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 8562 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 8563 Ops, 2, VLDMemInt->getMemoryVT(), 8564 VLDMemInt->getMemOperand()); 8565 8566 // Update the uses. 8567 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 8568 UI != UE; ++UI) { 8569 unsigned ResNo = UI.getUse().getResNo(); 8570 // Ignore uses of the chain result. 8571 if (ResNo == NumVecs) 8572 continue; 8573 SDNode *User = *UI; 8574 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 8575 } 8576 8577 // Now the vldN-lane intrinsic is dead except for its chain result. 8578 // Update uses of the chain. 8579 std::vector<SDValue> VLDDupResults; 8580 for (unsigned n = 0; n < NumVecs; ++n) 8581 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 8582 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 8583 DCI.CombineTo(VLD, VLDDupResults); 8584 8585 return true; 8586} 8587 8588/// PerformVDUPLANECombine - Target-specific dag combine xforms for 8589/// ARMISD::VDUPLANE. 8590static SDValue PerformVDUPLANECombine(SDNode *N, 8591 TargetLowering::DAGCombinerInfo &DCI) { 8592 SDValue Op = N->getOperand(0); 8593 8594 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 8595 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 8596 if (CombineVLDDUP(N, DCI)) 8597 return SDValue(N, 0); 8598 8599 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 8600 // redundant. Ignore bit_converts for now; element sizes are checked below. 8601 while (Op.getOpcode() == ISD::BITCAST) 8602 Op = Op.getOperand(0); 8603 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 8604 return SDValue(); 8605 8606 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 8607 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 8608 // The canonical VMOV for a zero vector uses a 32-bit element size. 8609 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8610 unsigned EltBits; 8611 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 8612 EltSize = 8; 8613 EVT VT = N->getValueType(0); 8614 if (EltSize > VT.getVectorElementType().getSizeInBits()) 8615 return SDValue(); 8616 8617 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 8618} 8619 8620// isConstVecPow2 - Return true if each vector element is a power of 2, all 8621// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 8622static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 8623{ 8624 integerPart cN; 8625 integerPart c0 = 0; 8626 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 8627 I != E; I++) { 8628 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 8629 if (!C) 8630 return false; 8631 8632 bool isExact; 8633 APFloat APF = C->getValueAPF(); 8634 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 8635 != APFloat::opOK || !isExact) 8636 return false; 8637 8638 c0 = (I == 0) ? cN : c0; 8639 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 8640 return false; 8641 } 8642 C = c0; 8643 return true; 8644} 8645 8646/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 8647/// can replace combinations of VMUL and VCVT (floating-point to integer) 8648/// when the VMUL has a constant operand that is a power of 2. 8649/// 8650/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 8651/// vmul.f32 d16, d17, d16 8652/// vcvt.s32.f32 d16, d16 8653/// becomes: 8654/// vcvt.s32.f32 d16, d16, #3 8655static SDValue PerformVCVTCombine(SDNode *N, 8656 TargetLowering::DAGCombinerInfo &DCI, 8657 const ARMSubtarget *Subtarget) { 8658 SelectionDAG &DAG = DCI.DAG; 8659 SDValue Op = N->getOperand(0); 8660 8661 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 8662 Op.getOpcode() != ISD::FMUL) 8663 return SDValue(); 8664 8665 uint64_t C; 8666 SDValue N0 = Op->getOperand(0); 8667 SDValue ConstVec = Op->getOperand(1); 8668 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 8669 8670 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 8671 !isConstVecPow2(ConstVec, isSigned, C)) 8672 return SDValue(); 8673 8674 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 8675 Intrinsic::arm_neon_vcvtfp2fxu; 8676 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 8677 N->getValueType(0), 8678 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 8679 DAG.getConstant(Log2_64(C), MVT::i32)); 8680} 8681 8682/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 8683/// can replace combinations of VCVT (integer to floating-point) and VDIV 8684/// when the VDIV has a constant operand that is a power of 2. 8685/// 8686/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 8687/// vcvt.f32.s32 d16, d16 8688/// vdiv.f32 d16, d17, d16 8689/// becomes: 8690/// vcvt.f32.s32 d16, d16, #3 8691static SDValue PerformVDIVCombine(SDNode *N, 8692 TargetLowering::DAGCombinerInfo &DCI, 8693 const ARMSubtarget *Subtarget) { 8694 SelectionDAG &DAG = DCI.DAG; 8695 SDValue Op = N->getOperand(0); 8696 unsigned OpOpcode = Op.getNode()->getOpcode(); 8697 8698 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 8699 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 8700 return SDValue(); 8701 8702 uint64_t C; 8703 SDValue ConstVec = N->getOperand(1); 8704 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 8705 8706 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 8707 !isConstVecPow2(ConstVec, isSigned, C)) 8708 return SDValue(); 8709 8710 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 8711 Intrinsic::arm_neon_vcvtfxu2fp; 8712 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 8713 Op.getValueType(), 8714 DAG.getConstant(IntrinsicOpcode, MVT::i32), 8715 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 8716} 8717 8718/// Getvshiftimm - Check if this is a valid build_vector for the immediate 8719/// operand of a vector shift operation, where all the elements of the 8720/// build_vector must have the same constant integer value. 8721static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 8722 // Ignore bit_converts. 8723 while (Op.getOpcode() == ISD::BITCAST) 8724 Op = Op.getOperand(0); 8725 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8726 APInt SplatBits, SplatUndef; 8727 unsigned SplatBitSize; 8728 bool HasAnyUndefs; 8729 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 8730 HasAnyUndefs, ElementBits) || 8731 SplatBitSize > ElementBits) 8732 return false; 8733 Cnt = SplatBits.getSExtValue(); 8734 return true; 8735} 8736 8737/// isVShiftLImm - Check if this is a valid build_vector for the immediate 8738/// operand of a vector shift left operation. That value must be in the range: 8739/// 0 <= Value < ElementBits for a left shift; or 8740/// 0 <= Value <= ElementBits for a long left shift. 8741static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 8742 assert(VT.isVector() && "vector shift count is not a vector type"); 8743 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 8744 if (! getVShiftImm(Op, ElementBits, Cnt)) 8745 return false; 8746 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 8747} 8748 8749/// isVShiftRImm - Check if this is a valid build_vector for the immediate 8750/// operand of a vector shift right operation. For a shift opcode, the value 8751/// is positive, but for an intrinsic the value count must be negative. The 8752/// absolute value must be in the range: 8753/// 1 <= |Value| <= ElementBits for a right shift; or 8754/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 8755static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 8756 int64_t &Cnt) { 8757 assert(VT.isVector() && "vector shift count is not a vector type"); 8758 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 8759 if (! getVShiftImm(Op, ElementBits, Cnt)) 8760 return false; 8761 if (isIntrinsic) 8762 Cnt = -Cnt; 8763 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 8764} 8765 8766/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 8767static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 8768 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 8769 switch (IntNo) { 8770 default: 8771 // Don't do anything for most intrinsics. 8772 break; 8773 8774 // Vector shifts: check for immediate versions and lower them. 8775 // Note: This is done during DAG combining instead of DAG legalizing because 8776 // the build_vectors for 64-bit vector element shift counts are generally 8777 // not legal, and it is hard to see their values after they get legalized to 8778 // loads from a constant pool. 8779 case Intrinsic::arm_neon_vshifts: 8780 case Intrinsic::arm_neon_vshiftu: 8781 case Intrinsic::arm_neon_vshiftls: 8782 case Intrinsic::arm_neon_vshiftlu: 8783 case Intrinsic::arm_neon_vshiftn: 8784 case Intrinsic::arm_neon_vrshifts: 8785 case Intrinsic::arm_neon_vrshiftu: 8786 case Intrinsic::arm_neon_vrshiftn: 8787 case Intrinsic::arm_neon_vqshifts: 8788 case Intrinsic::arm_neon_vqshiftu: 8789 case Intrinsic::arm_neon_vqshiftsu: 8790 case Intrinsic::arm_neon_vqshiftns: 8791 case Intrinsic::arm_neon_vqshiftnu: 8792 case Intrinsic::arm_neon_vqshiftnsu: 8793 case Intrinsic::arm_neon_vqrshiftns: 8794 case Intrinsic::arm_neon_vqrshiftnu: 8795 case Intrinsic::arm_neon_vqrshiftnsu: { 8796 EVT VT = N->getOperand(1).getValueType(); 8797 int64_t Cnt; 8798 unsigned VShiftOpc = 0; 8799 8800 switch (IntNo) { 8801 case Intrinsic::arm_neon_vshifts: 8802 case Intrinsic::arm_neon_vshiftu: 8803 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 8804 VShiftOpc = ARMISD::VSHL; 8805 break; 8806 } 8807 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 8808 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 8809 ARMISD::VSHRs : ARMISD::VSHRu); 8810 break; 8811 } 8812 return SDValue(); 8813 8814 case Intrinsic::arm_neon_vshiftls: 8815 case Intrinsic::arm_neon_vshiftlu: 8816 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 8817 break; 8818 llvm_unreachable("invalid shift count for vshll intrinsic"); 8819 8820 case Intrinsic::arm_neon_vrshifts: 8821 case Intrinsic::arm_neon_vrshiftu: 8822 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 8823 break; 8824 return SDValue(); 8825 8826 case Intrinsic::arm_neon_vqshifts: 8827 case Intrinsic::arm_neon_vqshiftu: 8828 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 8829 break; 8830 return SDValue(); 8831 8832 case Intrinsic::arm_neon_vqshiftsu: 8833 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 8834 break; 8835 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 8836 8837 case Intrinsic::arm_neon_vshiftn: 8838 case Intrinsic::arm_neon_vrshiftn: 8839 case Intrinsic::arm_neon_vqshiftns: 8840 case Intrinsic::arm_neon_vqshiftnu: 8841 case Intrinsic::arm_neon_vqshiftnsu: 8842 case Intrinsic::arm_neon_vqrshiftns: 8843 case Intrinsic::arm_neon_vqrshiftnu: 8844 case Intrinsic::arm_neon_vqrshiftnsu: 8845 // Narrowing shifts require an immediate right shift. 8846 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 8847 break; 8848 llvm_unreachable("invalid shift count for narrowing vector shift " 8849 "intrinsic"); 8850 8851 default: 8852 llvm_unreachable("unhandled vector shift"); 8853 } 8854 8855 switch (IntNo) { 8856 case Intrinsic::arm_neon_vshifts: 8857 case Intrinsic::arm_neon_vshiftu: 8858 // Opcode already set above. 8859 break; 8860 case Intrinsic::arm_neon_vshiftls: 8861 case Intrinsic::arm_neon_vshiftlu: 8862 if (Cnt == VT.getVectorElementType().getSizeInBits()) 8863 VShiftOpc = ARMISD::VSHLLi; 8864 else 8865 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 8866 ARMISD::VSHLLs : ARMISD::VSHLLu); 8867 break; 8868 case Intrinsic::arm_neon_vshiftn: 8869 VShiftOpc = ARMISD::VSHRN; break; 8870 case Intrinsic::arm_neon_vrshifts: 8871 VShiftOpc = ARMISD::VRSHRs; break; 8872 case Intrinsic::arm_neon_vrshiftu: 8873 VShiftOpc = ARMISD::VRSHRu; break; 8874 case Intrinsic::arm_neon_vrshiftn: 8875 VShiftOpc = ARMISD::VRSHRN; break; 8876 case Intrinsic::arm_neon_vqshifts: 8877 VShiftOpc = ARMISD::VQSHLs; break; 8878 case Intrinsic::arm_neon_vqshiftu: 8879 VShiftOpc = ARMISD::VQSHLu; break; 8880 case Intrinsic::arm_neon_vqshiftsu: 8881 VShiftOpc = ARMISD::VQSHLsu; break; 8882 case Intrinsic::arm_neon_vqshiftns: 8883 VShiftOpc = ARMISD::VQSHRNs; break; 8884 case Intrinsic::arm_neon_vqshiftnu: 8885 VShiftOpc = ARMISD::VQSHRNu; break; 8886 case Intrinsic::arm_neon_vqshiftnsu: 8887 VShiftOpc = ARMISD::VQSHRNsu; break; 8888 case Intrinsic::arm_neon_vqrshiftns: 8889 VShiftOpc = ARMISD::VQRSHRNs; break; 8890 case Intrinsic::arm_neon_vqrshiftnu: 8891 VShiftOpc = ARMISD::VQRSHRNu; break; 8892 case Intrinsic::arm_neon_vqrshiftnsu: 8893 VShiftOpc = ARMISD::VQRSHRNsu; break; 8894 } 8895 8896 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 8897 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 8898 } 8899 8900 case Intrinsic::arm_neon_vshiftins: { 8901 EVT VT = N->getOperand(1).getValueType(); 8902 int64_t Cnt; 8903 unsigned VShiftOpc = 0; 8904 8905 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 8906 VShiftOpc = ARMISD::VSLI; 8907 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 8908 VShiftOpc = ARMISD::VSRI; 8909 else { 8910 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 8911 } 8912 8913 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 8914 N->getOperand(1), N->getOperand(2), 8915 DAG.getConstant(Cnt, MVT::i32)); 8916 } 8917 8918 case Intrinsic::arm_neon_vqrshifts: 8919 case Intrinsic::arm_neon_vqrshiftu: 8920 // No immediate versions of these to check for. 8921 break; 8922 } 8923 8924 return SDValue(); 8925} 8926 8927/// PerformShiftCombine - Checks for immediate versions of vector shifts and 8928/// lowers them. As with the vector shift intrinsics, this is done during DAG 8929/// combining instead of DAG legalizing because the build_vectors for 64-bit 8930/// vector element shift counts are generally not legal, and it is hard to see 8931/// their values after they get legalized to loads from a constant pool. 8932static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 8933 const ARMSubtarget *ST) { 8934 EVT VT = N->getValueType(0); 8935 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 8936 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 8937 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 8938 SDValue N1 = N->getOperand(1); 8939 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 8940 SDValue N0 = N->getOperand(0); 8941 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 8942 DAG.MaskedValueIsZero(N0.getOperand(0), 8943 APInt::getHighBitsSet(32, 16))) 8944 return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, N0, N1); 8945 } 8946 } 8947 8948 // Nothing to be done for scalar shifts. 8949 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8950 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 8951 return SDValue(); 8952 8953 assert(ST->hasNEON() && "unexpected vector shift"); 8954 int64_t Cnt; 8955 8956 switch (N->getOpcode()) { 8957 default: llvm_unreachable("unexpected shift opcode"); 8958 8959 case ISD::SHL: 8960 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 8961 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 8962 DAG.getConstant(Cnt, MVT::i32)); 8963 break; 8964 8965 case ISD::SRA: 8966 case ISD::SRL: 8967 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 8968 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 8969 ARMISD::VSHRs : ARMISD::VSHRu); 8970 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 8971 DAG.getConstant(Cnt, MVT::i32)); 8972 } 8973 } 8974 return SDValue(); 8975} 8976 8977/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 8978/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 8979static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 8980 const ARMSubtarget *ST) { 8981 SDValue N0 = N->getOperand(0); 8982 8983 // Check for sign- and zero-extensions of vector extract operations of 8- 8984 // and 16-bit vector elements. NEON supports these directly. They are 8985 // handled during DAG combining because type legalization will promote them 8986 // to 32-bit types and it is messy to recognize the operations after that. 8987 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 8988 SDValue Vec = N0.getOperand(0); 8989 SDValue Lane = N0.getOperand(1); 8990 EVT VT = N->getValueType(0); 8991 EVT EltVT = N0.getValueType(); 8992 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8993 8994 if (VT == MVT::i32 && 8995 (EltVT == MVT::i8 || EltVT == MVT::i16) && 8996 TLI.isTypeLegal(Vec.getValueType()) && 8997 isa<ConstantSDNode>(Lane)) { 8998 8999 unsigned Opc = 0; 9000 switch (N->getOpcode()) { 9001 default: llvm_unreachable("unexpected opcode"); 9002 case ISD::SIGN_EXTEND: 9003 Opc = ARMISD::VGETLANEs; 9004 break; 9005 case ISD::ZERO_EXTEND: 9006 case ISD::ANY_EXTEND: 9007 Opc = ARMISD::VGETLANEu; 9008 break; 9009 } 9010 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 9011 } 9012 } 9013 9014 return SDValue(); 9015} 9016 9017/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 9018/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 9019static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 9020 const ARMSubtarget *ST) { 9021 // If the target supports NEON, try to use vmax/vmin instructions for f32 9022 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 9023 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 9024 // a NaN; only do the transformation when it matches that behavior. 9025 9026 // For now only do this when using NEON for FP operations; if using VFP, it 9027 // is not obvious that the benefit outweighs the cost of switching to the 9028 // NEON pipeline. 9029 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 9030 N->getValueType(0) != MVT::f32) 9031 return SDValue(); 9032 9033 SDValue CondLHS = N->getOperand(0); 9034 SDValue CondRHS = N->getOperand(1); 9035 SDValue LHS = N->getOperand(2); 9036 SDValue RHS = N->getOperand(3); 9037 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 9038 9039 unsigned Opcode = 0; 9040 bool IsReversed; 9041 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 9042 IsReversed = false; // x CC y ? x : y 9043 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 9044 IsReversed = true ; // x CC y ? y : x 9045 } else { 9046 return SDValue(); 9047 } 9048 9049 bool IsUnordered; 9050 switch (CC) { 9051 default: break; 9052 case ISD::SETOLT: 9053 case ISD::SETOLE: 9054 case ISD::SETLT: 9055 case ISD::SETLE: 9056 case ISD::SETULT: 9057 case ISD::SETULE: 9058 // If LHS is NaN, an ordered comparison will be false and the result will 9059 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 9060 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 9061 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 9062 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 9063 break; 9064 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 9065 // will return -0, so vmin can only be used for unsafe math or if one of 9066 // the operands is known to be nonzero. 9067 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 9068 !DAG.getTarget().Options.UnsafeFPMath && 9069 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 9070 break; 9071 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 9072 break; 9073 9074 case ISD::SETOGT: 9075 case ISD::SETOGE: 9076 case ISD::SETGT: 9077 case ISD::SETGE: 9078 case ISD::SETUGT: 9079 case ISD::SETUGE: 9080 // If LHS is NaN, an ordered comparison will be false and the result will 9081 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 9082 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 9083 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 9084 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 9085 break; 9086 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 9087 // will return +0, so vmax can only be used for unsafe math or if one of 9088 // the operands is known to be nonzero. 9089 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 9090 !DAG.getTarget().Options.UnsafeFPMath && 9091 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 9092 break; 9093 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 9094 break; 9095 } 9096 9097 if (!Opcode) 9098 return SDValue(); 9099 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 9100} 9101 9102/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 9103SDValue 9104ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 9105 SDValue Cmp = N->getOperand(4); 9106 if (Cmp.getOpcode() != ARMISD::CMPZ) 9107 // Only looking at EQ and NE cases. 9108 return SDValue(); 9109 9110 EVT VT = N->getValueType(0); 9111 DebugLoc dl = N->getDebugLoc(); 9112 SDValue LHS = Cmp.getOperand(0); 9113 SDValue RHS = Cmp.getOperand(1); 9114 SDValue FalseVal = N->getOperand(0); 9115 SDValue TrueVal = N->getOperand(1); 9116 SDValue ARMcc = N->getOperand(2); 9117 ARMCC::CondCodes CC = 9118 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 9119 9120 // Simplify 9121 // mov r1, r0 9122 // cmp r1, x 9123 // mov r0, y 9124 // moveq r0, x 9125 // to 9126 // cmp r0, x 9127 // movne r0, y 9128 // 9129 // mov r1, r0 9130 // cmp r1, x 9131 // mov r0, x 9132 // movne r0, y 9133 // to 9134 // cmp r0, x 9135 // movne r0, y 9136 /// FIXME: Turn this into a target neutral optimization? 9137 SDValue Res; 9138 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 9139 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 9140 N->getOperand(3), Cmp); 9141 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 9142 SDValue ARMcc; 9143 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 9144 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 9145 N->getOperand(3), NewCmp); 9146 } 9147 9148 if (Res.getNode()) { 9149 APInt KnownZero, KnownOne; 9150 DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne); 9151 // Capture demanded bits information that would be otherwise lost. 9152 if (KnownZero == 0xfffffffe) 9153 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 9154 DAG.getValueType(MVT::i1)); 9155 else if (KnownZero == 0xffffff00) 9156 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 9157 DAG.getValueType(MVT::i8)); 9158 else if (KnownZero == 0xffff0000) 9159 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 9160 DAG.getValueType(MVT::i16)); 9161 } 9162 9163 return Res; 9164} 9165 9166SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 9167 DAGCombinerInfo &DCI) const { 9168 switch (N->getOpcode()) { 9169 default: break; 9170 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); 9171 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 9172 case ISD::SUB: return PerformSUBCombine(N, DCI); 9173 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 9174 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 9175 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 9176 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 9177 case ARMISD::BFI: return PerformBFICombine(N, DCI); 9178 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 9179 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 9180 case ISD::STORE: return PerformSTORECombine(N, DCI); 9181 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 9182 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 9183 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 9184 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 9185 case ISD::FP_TO_SINT: 9186 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 9187 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 9188 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 9189 case ISD::SHL: 9190 case ISD::SRA: 9191 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 9192 case ISD::SIGN_EXTEND: 9193 case ISD::ZERO_EXTEND: 9194 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 9195 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 9196 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 9197 case ARMISD::VLD2DUP: 9198 case ARMISD::VLD3DUP: 9199 case ARMISD::VLD4DUP: 9200 return CombineBaseUpdate(N, DCI); 9201 case ISD::INTRINSIC_VOID: 9202 case ISD::INTRINSIC_W_CHAIN: 9203 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9204 case Intrinsic::arm_neon_vld1: 9205 case Intrinsic::arm_neon_vld2: 9206 case Intrinsic::arm_neon_vld3: 9207 case Intrinsic::arm_neon_vld4: 9208 case Intrinsic::arm_neon_vld2lane: 9209 case Intrinsic::arm_neon_vld3lane: 9210 case Intrinsic::arm_neon_vld4lane: 9211 case Intrinsic::arm_neon_vst1: 9212 case Intrinsic::arm_neon_vst2: 9213 case Intrinsic::arm_neon_vst3: 9214 case Intrinsic::arm_neon_vst4: 9215 case Intrinsic::arm_neon_vst2lane: 9216 case Intrinsic::arm_neon_vst3lane: 9217 case Intrinsic::arm_neon_vst4lane: 9218 return CombineBaseUpdate(N, DCI); 9219 default: break; 9220 } 9221 break; 9222 } 9223 return SDValue(); 9224} 9225 9226bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 9227 EVT VT) const { 9228 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 9229} 9230 9231bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 9232 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 9233 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 9234 9235 switch (VT.getSimpleVT().SimpleTy) { 9236 default: 9237 return false; 9238 case MVT::i8: 9239 case MVT::i16: 9240 case MVT::i32: 9241 // Unaligned access can use (for example) LRDB, LRDH, LDR 9242 return AllowsUnaligned; 9243 case MVT::f64: 9244 case MVT::v2f64: 9245 // For any little-endian targets with neon, we can support unaligned ld/st 9246 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 9247 // A big-endian target may also explictly support unaligned accesses 9248 return Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian()); 9249 } 9250} 9251 9252static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 9253 unsigned AlignCheck) { 9254 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 9255 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 9256} 9257 9258EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 9259 unsigned DstAlign, unsigned SrcAlign, 9260 bool IsZeroVal, 9261 bool MemcpyStrSrc, 9262 MachineFunction &MF) const { 9263 const Function *F = MF.getFunction(); 9264 9265 // See if we can use NEON instructions for this... 9266 if (IsZeroVal && 9267 !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat) && 9268 Subtarget->hasNEON()) { 9269 if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) { 9270 return MVT::v4i32; 9271 } else if (memOpAlign(SrcAlign, DstAlign, 8) && Size >= 8) { 9272 return MVT::v2i32; 9273 } 9274 } 9275 9276 // Lowering to i32/i16 if the size permits. 9277 if (Size >= 4) { 9278 return MVT::i32; 9279 } else if (Size >= 2) { 9280 return MVT::i16; 9281 } 9282 9283 // Let the target-independent logic figure it out. 9284 return MVT::Other; 9285} 9286 9287static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 9288 if (V < 0) 9289 return false; 9290 9291 unsigned Scale = 1; 9292 switch (VT.getSimpleVT().SimpleTy) { 9293 default: return false; 9294 case MVT::i1: 9295 case MVT::i8: 9296 // Scale == 1; 9297 break; 9298 case MVT::i16: 9299 // Scale == 2; 9300 Scale = 2; 9301 break; 9302 case MVT::i32: 9303 // Scale == 4; 9304 Scale = 4; 9305 break; 9306 } 9307 9308 if ((V & (Scale - 1)) != 0) 9309 return false; 9310 V /= Scale; 9311 return V == (V & ((1LL << 5) - 1)); 9312} 9313 9314static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 9315 const ARMSubtarget *Subtarget) { 9316 bool isNeg = false; 9317 if (V < 0) { 9318 isNeg = true; 9319 V = - V; 9320 } 9321 9322 switch (VT.getSimpleVT().SimpleTy) { 9323 default: return false; 9324 case MVT::i1: 9325 case MVT::i8: 9326 case MVT::i16: 9327 case MVT::i32: 9328 // + imm12 or - imm8 9329 if (isNeg) 9330 return V == (V & ((1LL << 8) - 1)); 9331 return V == (V & ((1LL << 12) - 1)); 9332 case MVT::f32: 9333 case MVT::f64: 9334 // Same as ARM mode. FIXME: NEON? 9335 if (!Subtarget->hasVFP2()) 9336 return false; 9337 if ((V & 3) != 0) 9338 return false; 9339 V >>= 2; 9340 return V == (V & ((1LL << 8) - 1)); 9341 } 9342} 9343 9344/// isLegalAddressImmediate - Return true if the integer value can be used 9345/// as the offset of the target addressing mode for load / store of the 9346/// given type. 9347static bool isLegalAddressImmediate(int64_t V, EVT VT, 9348 const ARMSubtarget *Subtarget) { 9349 if (V == 0) 9350 return true; 9351 9352 if (!VT.isSimple()) 9353 return false; 9354 9355 if (Subtarget->isThumb1Only()) 9356 return isLegalT1AddressImmediate(V, VT); 9357 else if (Subtarget->isThumb2()) 9358 return isLegalT2AddressImmediate(V, VT, Subtarget); 9359 9360 // ARM mode. 9361 if (V < 0) 9362 V = - V; 9363 switch (VT.getSimpleVT().SimpleTy) { 9364 default: return false; 9365 case MVT::i1: 9366 case MVT::i8: 9367 case MVT::i32: 9368 // +- imm12 9369 return V == (V & ((1LL << 12) - 1)); 9370 case MVT::i16: 9371 // +- imm8 9372 return V == (V & ((1LL << 8) - 1)); 9373 case MVT::f32: 9374 case MVT::f64: 9375 if (!Subtarget->hasVFP2()) // FIXME: NEON? 9376 return false; 9377 if ((V & 3) != 0) 9378 return false; 9379 V >>= 2; 9380 return V == (V & ((1LL << 8) - 1)); 9381 } 9382} 9383 9384bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 9385 EVT VT) const { 9386 int Scale = AM.Scale; 9387 if (Scale < 0) 9388 return false; 9389 9390 switch (VT.getSimpleVT().SimpleTy) { 9391 default: return false; 9392 case MVT::i1: 9393 case MVT::i8: 9394 case MVT::i16: 9395 case MVT::i32: 9396 if (Scale == 1) 9397 return true; 9398 // r + r << imm 9399 Scale = Scale & ~1; 9400 return Scale == 2 || Scale == 4 || Scale == 8; 9401 case MVT::i64: 9402 // r + r 9403 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 9404 return true; 9405 return false; 9406 case MVT::isVoid: 9407 // Note, we allow "void" uses (basically, uses that aren't loads or 9408 // stores), because arm allows folding a scale into many arithmetic 9409 // operations. This should be made more precise and revisited later. 9410 9411 // Allow r << imm, but the imm has to be a multiple of two. 9412 if (Scale & 1) return false; 9413 return isPowerOf2_32(Scale); 9414 } 9415} 9416 9417/// isLegalAddressingMode - Return true if the addressing mode represented 9418/// by AM is legal for this target, for a load/store of the specified type. 9419bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 9420 Type *Ty) const { 9421 EVT VT = getValueType(Ty, true); 9422 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 9423 return false; 9424 9425 // Can never fold addr of global into load/store. 9426 if (AM.BaseGV) 9427 return false; 9428 9429 switch (AM.Scale) { 9430 case 0: // no scale reg, must be "r+i" or "r", or "i". 9431 break; 9432 case 1: 9433 if (Subtarget->isThumb1Only()) 9434 return false; 9435 // FALL THROUGH. 9436 default: 9437 // ARM doesn't support any R+R*scale+imm addr modes. 9438 if (AM.BaseOffs) 9439 return false; 9440 9441 if (!VT.isSimple()) 9442 return false; 9443 9444 if (Subtarget->isThumb2()) 9445 return isLegalT2ScaledAddressingMode(AM, VT); 9446 9447 int Scale = AM.Scale; 9448 switch (VT.getSimpleVT().SimpleTy) { 9449 default: return false; 9450 case MVT::i1: 9451 case MVT::i8: 9452 case MVT::i32: 9453 if (Scale < 0) Scale = -Scale; 9454 if (Scale == 1) 9455 return true; 9456 // r + r << imm 9457 return isPowerOf2_32(Scale & ~1); 9458 case MVT::i16: 9459 case MVT::i64: 9460 // r + r 9461 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 9462 return true; 9463 return false; 9464 9465 case MVT::isVoid: 9466 // Note, we allow "void" uses (basically, uses that aren't loads or 9467 // stores), because arm allows folding a scale into many arithmetic 9468 // operations. This should be made more precise and revisited later. 9469 9470 // Allow r << imm, but the imm has to be a multiple of two. 9471 if (Scale & 1) return false; 9472 return isPowerOf2_32(Scale); 9473 } 9474 } 9475 return true; 9476} 9477 9478/// isLegalICmpImmediate - Return true if the specified immediate is legal 9479/// icmp immediate, that is the target has icmp instructions which can compare 9480/// a register against the immediate without having to materialize the 9481/// immediate into a register. 9482bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 9483 // Thumb2 and ARM modes can use cmn for negative immediates. 9484 if (!Subtarget->isThumb()) 9485 return ARM_AM::getSOImmVal(llvm::abs64(Imm)) != -1; 9486 if (Subtarget->isThumb2()) 9487 return ARM_AM::getT2SOImmVal(llvm::abs64(Imm)) != -1; 9488 // Thumb1 doesn't have cmn, and only 8-bit immediates. 9489 return Imm >= 0 && Imm <= 255; 9490} 9491 9492/// isLegalAddImmediate - Return true if the specified immediate is a legal add 9493/// *or sub* immediate, that is the target has add or sub instructions which can 9494/// add a register with the immediate without having to materialize the 9495/// immediate into a register. 9496bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 9497 // Same encoding for add/sub, just flip the sign. 9498 int64_t AbsImm = llvm::abs64(Imm); 9499 if (!Subtarget->isThumb()) 9500 return ARM_AM::getSOImmVal(AbsImm) != -1; 9501 if (Subtarget->isThumb2()) 9502 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 9503 // Thumb1 only has 8-bit unsigned immediate. 9504 return AbsImm >= 0 && AbsImm <= 255; 9505} 9506 9507static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 9508 bool isSEXTLoad, SDValue &Base, 9509 SDValue &Offset, bool &isInc, 9510 SelectionDAG &DAG) { 9511 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 9512 return false; 9513 9514 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 9515 // AddressingMode 3 9516 Base = Ptr->getOperand(0); 9517 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9518 int RHSC = (int)RHS->getZExtValue(); 9519 if (RHSC < 0 && RHSC > -256) { 9520 assert(Ptr->getOpcode() == ISD::ADD); 9521 isInc = false; 9522 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9523 return true; 9524 } 9525 } 9526 isInc = (Ptr->getOpcode() == ISD::ADD); 9527 Offset = Ptr->getOperand(1); 9528 return true; 9529 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 9530 // AddressingMode 2 9531 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9532 int RHSC = (int)RHS->getZExtValue(); 9533 if (RHSC < 0 && RHSC > -0x1000) { 9534 assert(Ptr->getOpcode() == ISD::ADD); 9535 isInc = false; 9536 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9537 Base = Ptr->getOperand(0); 9538 return true; 9539 } 9540 } 9541 9542 if (Ptr->getOpcode() == ISD::ADD) { 9543 isInc = true; 9544 ARM_AM::ShiftOpc ShOpcVal= 9545 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 9546 if (ShOpcVal != ARM_AM::no_shift) { 9547 Base = Ptr->getOperand(1); 9548 Offset = Ptr->getOperand(0); 9549 } else { 9550 Base = Ptr->getOperand(0); 9551 Offset = Ptr->getOperand(1); 9552 } 9553 return true; 9554 } 9555 9556 isInc = (Ptr->getOpcode() == ISD::ADD); 9557 Base = Ptr->getOperand(0); 9558 Offset = Ptr->getOperand(1); 9559 return true; 9560 } 9561 9562 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 9563 return false; 9564} 9565 9566static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 9567 bool isSEXTLoad, SDValue &Base, 9568 SDValue &Offset, bool &isInc, 9569 SelectionDAG &DAG) { 9570 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 9571 return false; 9572 9573 Base = Ptr->getOperand(0); 9574 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9575 int RHSC = (int)RHS->getZExtValue(); 9576 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 9577 assert(Ptr->getOpcode() == ISD::ADD); 9578 isInc = false; 9579 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9580 return true; 9581 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 9582 isInc = Ptr->getOpcode() == ISD::ADD; 9583 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 9584 return true; 9585 } 9586 } 9587 9588 return false; 9589} 9590 9591/// getPreIndexedAddressParts - returns true by value, base pointer and 9592/// offset pointer and addressing mode by reference if the node's address 9593/// can be legally represented as pre-indexed load / store address. 9594bool 9595ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 9596 SDValue &Offset, 9597 ISD::MemIndexedMode &AM, 9598 SelectionDAG &DAG) const { 9599 if (Subtarget->isThumb1Only()) 9600 return false; 9601 9602 EVT VT; 9603 SDValue Ptr; 9604 bool isSEXTLoad = false; 9605 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 9606 Ptr = LD->getBasePtr(); 9607 VT = LD->getMemoryVT(); 9608 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 9609 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 9610 Ptr = ST->getBasePtr(); 9611 VT = ST->getMemoryVT(); 9612 } else 9613 return false; 9614 9615 bool isInc; 9616 bool isLegal = false; 9617 if (Subtarget->isThumb2()) 9618 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 9619 Offset, isInc, DAG); 9620 else 9621 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 9622 Offset, isInc, DAG); 9623 if (!isLegal) 9624 return false; 9625 9626 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 9627 return true; 9628} 9629 9630/// getPostIndexedAddressParts - returns true by value, base pointer and 9631/// offset pointer and addressing mode by reference if this node can be 9632/// combined with a load / store to form a post-indexed load / store. 9633bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 9634 SDValue &Base, 9635 SDValue &Offset, 9636 ISD::MemIndexedMode &AM, 9637 SelectionDAG &DAG) const { 9638 if (Subtarget->isThumb1Only()) 9639 return false; 9640 9641 EVT VT; 9642 SDValue Ptr; 9643 bool isSEXTLoad = false; 9644 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 9645 VT = LD->getMemoryVT(); 9646 Ptr = LD->getBasePtr(); 9647 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 9648 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 9649 VT = ST->getMemoryVT(); 9650 Ptr = ST->getBasePtr(); 9651 } else 9652 return false; 9653 9654 bool isInc; 9655 bool isLegal = false; 9656 if (Subtarget->isThumb2()) 9657 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 9658 isInc, DAG); 9659 else 9660 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 9661 isInc, DAG); 9662 if (!isLegal) 9663 return false; 9664 9665 if (Ptr != Base) { 9666 // Swap base ptr and offset to catch more post-index load / store when 9667 // it's legal. In Thumb2 mode, offset must be an immediate. 9668 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 9669 !Subtarget->isThumb2()) 9670 std::swap(Base, Offset); 9671 9672 // Post-indexed load / store update the base pointer. 9673 if (Ptr != Base) 9674 return false; 9675 } 9676 9677 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 9678 return true; 9679} 9680 9681void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 9682 APInt &KnownZero, 9683 APInt &KnownOne, 9684 const SelectionDAG &DAG, 9685 unsigned Depth) const { 9686 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); 9687 switch (Op.getOpcode()) { 9688 default: break; 9689 case ARMISD::CMOV: { 9690 // Bits are known zero/one if known on the LHS and RHS. 9691 DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 9692 if (KnownZero == 0 && KnownOne == 0) return; 9693 9694 APInt KnownZeroRHS, KnownOneRHS; 9695 DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1); 9696 KnownZero &= KnownZeroRHS; 9697 KnownOne &= KnownOneRHS; 9698 return; 9699 } 9700 } 9701} 9702 9703//===----------------------------------------------------------------------===// 9704// ARM Inline Assembly Support 9705//===----------------------------------------------------------------------===// 9706 9707bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 9708 // Looking for "rev" which is V6+. 9709 if (!Subtarget->hasV6Ops()) 9710 return false; 9711 9712 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 9713 std::string AsmStr = IA->getAsmString(); 9714 SmallVector<StringRef, 4> AsmPieces; 9715 SplitString(AsmStr, AsmPieces, ";\n"); 9716 9717 switch (AsmPieces.size()) { 9718 default: return false; 9719 case 1: 9720 AsmStr = AsmPieces[0]; 9721 AsmPieces.clear(); 9722 SplitString(AsmStr, AsmPieces, " \t,"); 9723 9724 // rev $0, $1 9725 if (AsmPieces.size() == 3 && 9726 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 9727 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 9728 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 9729 if (Ty && Ty->getBitWidth() == 32) 9730 return IntrinsicLowering::LowerToByteSwap(CI); 9731 } 9732 break; 9733 } 9734 9735 return false; 9736} 9737 9738/// getConstraintType - Given a constraint letter, return the type of 9739/// constraint it is for this target. 9740ARMTargetLowering::ConstraintType 9741ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 9742 if (Constraint.size() == 1) { 9743 switch (Constraint[0]) { 9744 default: break; 9745 case 'l': return C_RegisterClass; 9746 case 'w': return C_RegisterClass; 9747 case 'h': return C_RegisterClass; 9748 case 'x': return C_RegisterClass; 9749 case 't': return C_RegisterClass; 9750 case 'j': return C_Other; // Constant for movw. 9751 // An address with a single base register. Due to the way we 9752 // currently handle addresses it is the same as an 'r' memory constraint. 9753 case 'Q': return C_Memory; 9754 } 9755 } else if (Constraint.size() == 2) { 9756 switch (Constraint[0]) { 9757 default: break; 9758 // All 'U+' constraints are addresses. 9759 case 'U': return C_Memory; 9760 } 9761 } 9762 return TargetLowering::getConstraintType(Constraint); 9763} 9764 9765/// Examine constraint type and operand type and determine a weight value. 9766/// This object must already have been set up with the operand type 9767/// and the current alternative constraint selected. 9768TargetLowering::ConstraintWeight 9769ARMTargetLowering::getSingleConstraintMatchWeight( 9770 AsmOperandInfo &info, const char *constraint) const { 9771 ConstraintWeight weight = CW_Invalid; 9772 Value *CallOperandVal = info.CallOperandVal; 9773 // If we don't have a value, we can't do a match, 9774 // but allow it at the lowest weight. 9775 if (CallOperandVal == NULL) 9776 return CW_Default; 9777 Type *type = CallOperandVal->getType(); 9778 // Look at the constraint type. 9779 switch (*constraint) { 9780 default: 9781 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 9782 break; 9783 case 'l': 9784 if (type->isIntegerTy()) { 9785 if (Subtarget->isThumb()) 9786 weight = CW_SpecificReg; 9787 else 9788 weight = CW_Register; 9789 } 9790 break; 9791 case 'w': 9792 if (type->isFloatingPointTy()) 9793 weight = CW_Register; 9794 break; 9795 } 9796 return weight; 9797} 9798 9799typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 9800RCPair 9801ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 9802 EVT VT) const { 9803 if (Constraint.size() == 1) { 9804 // GCC ARM Constraint Letters 9805 switch (Constraint[0]) { 9806 case 'l': // Low regs or general regs. 9807 if (Subtarget->isThumb()) 9808 return RCPair(0U, &ARM::tGPRRegClass); 9809 return RCPair(0U, &ARM::GPRRegClass); 9810 case 'h': // High regs or no regs. 9811 if (Subtarget->isThumb()) 9812 return RCPair(0U, &ARM::hGPRRegClass); 9813 break; 9814 case 'r': 9815 return RCPair(0U, &ARM::GPRRegClass); 9816 case 'w': 9817 if (VT == MVT::f32) 9818 return RCPair(0U, &ARM::SPRRegClass); 9819 if (VT.getSizeInBits() == 64) 9820 return RCPair(0U, &ARM::DPRRegClass); 9821 if (VT.getSizeInBits() == 128) 9822 return RCPair(0U, &ARM::QPRRegClass); 9823 break; 9824 case 'x': 9825 if (VT == MVT::f32) 9826 return RCPair(0U, &ARM::SPR_8RegClass); 9827 if (VT.getSizeInBits() == 64) 9828 return RCPair(0U, &ARM::DPR_8RegClass); 9829 if (VT.getSizeInBits() == 128) 9830 return RCPair(0U, &ARM::QPR_8RegClass); 9831 break; 9832 case 't': 9833 if (VT == MVT::f32) 9834 return RCPair(0U, &ARM::SPRRegClass); 9835 break; 9836 } 9837 } 9838 if (StringRef("{cc}").equals_lower(Constraint)) 9839 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 9840 9841 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 9842} 9843 9844/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 9845/// vector. If it is invalid, don't add anything to Ops. 9846void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 9847 std::string &Constraint, 9848 std::vector<SDValue>&Ops, 9849 SelectionDAG &DAG) const { 9850 SDValue Result(0, 0); 9851 9852 // Currently only support length 1 constraints. 9853 if (Constraint.length() != 1) return; 9854 9855 char ConstraintLetter = Constraint[0]; 9856 switch (ConstraintLetter) { 9857 default: break; 9858 case 'j': 9859 case 'I': case 'J': case 'K': case 'L': 9860 case 'M': case 'N': case 'O': 9861 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 9862 if (!C) 9863 return; 9864 9865 int64_t CVal64 = C->getSExtValue(); 9866 int CVal = (int) CVal64; 9867 // None of these constraints allow values larger than 32 bits. Check 9868 // that the value fits in an int. 9869 if (CVal != CVal64) 9870 return; 9871 9872 switch (ConstraintLetter) { 9873 case 'j': 9874 // Constant suitable for movw, must be between 0 and 9875 // 65535. 9876 if (Subtarget->hasV6T2Ops()) 9877 if (CVal >= 0 && CVal <= 65535) 9878 break; 9879 return; 9880 case 'I': 9881 if (Subtarget->isThumb1Only()) { 9882 // This must be a constant between 0 and 255, for ADD 9883 // immediates. 9884 if (CVal >= 0 && CVal <= 255) 9885 break; 9886 } else if (Subtarget->isThumb2()) { 9887 // A constant that can be used as an immediate value in a 9888 // data-processing instruction. 9889 if (ARM_AM::getT2SOImmVal(CVal) != -1) 9890 break; 9891 } else { 9892 // A constant that can be used as an immediate value in a 9893 // data-processing instruction. 9894 if (ARM_AM::getSOImmVal(CVal) != -1) 9895 break; 9896 } 9897 return; 9898 9899 case 'J': 9900 if (Subtarget->isThumb()) { // FIXME thumb2 9901 // This must be a constant between -255 and -1, for negated ADD 9902 // immediates. This can be used in GCC with an "n" modifier that 9903 // prints the negated value, for use with SUB instructions. It is 9904 // not useful otherwise but is implemented for compatibility. 9905 if (CVal >= -255 && CVal <= -1) 9906 break; 9907 } else { 9908 // This must be a constant between -4095 and 4095. It is not clear 9909 // what this constraint is intended for. Implemented for 9910 // compatibility with GCC. 9911 if (CVal >= -4095 && CVal <= 4095) 9912 break; 9913 } 9914 return; 9915 9916 case 'K': 9917 if (Subtarget->isThumb1Only()) { 9918 // A 32-bit value where only one byte has a nonzero value. Exclude 9919 // zero to match GCC. This constraint is used by GCC internally for 9920 // constants that can be loaded with a move/shift combination. 9921 // It is not useful otherwise but is implemented for compatibility. 9922 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 9923 break; 9924 } else if (Subtarget->isThumb2()) { 9925 // A constant whose bitwise inverse can be used as an immediate 9926 // value in a data-processing instruction. This can be used in GCC 9927 // with a "B" modifier that prints the inverted value, for use with 9928 // BIC and MVN instructions. It is not useful otherwise but is 9929 // implemented for compatibility. 9930 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 9931 break; 9932 } else { 9933 // A constant whose bitwise inverse can be used as an immediate 9934 // value in a data-processing instruction. This can be used in GCC 9935 // with a "B" modifier that prints the inverted value, for use with 9936 // BIC and MVN instructions. It is not useful otherwise but is 9937 // implemented for compatibility. 9938 if (ARM_AM::getSOImmVal(~CVal) != -1) 9939 break; 9940 } 9941 return; 9942 9943 case 'L': 9944 if (Subtarget->isThumb1Only()) { 9945 // This must be a constant between -7 and 7, 9946 // for 3-operand ADD/SUB immediate instructions. 9947 if (CVal >= -7 && CVal < 7) 9948 break; 9949 } else if (Subtarget->isThumb2()) { 9950 // A constant whose negation can be used as an immediate value in a 9951 // data-processing instruction. This can be used in GCC with an "n" 9952 // modifier that prints the negated value, for use with SUB 9953 // instructions. It is not useful otherwise but is implemented for 9954 // compatibility. 9955 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 9956 break; 9957 } else { 9958 // A constant whose negation can be used as an immediate value in a 9959 // data-processing instruction. This can be used in GCC with an "n" 9960 // modifier that prints the negated value, for use with SUB 9961 // instructions. It is not useful otherwise but is implemented for 9962 // compatibility. 9963 if (ARM_AM::getSOImmVal(-CVal) != -1) 9964 break; 9965 } 9966 return; 9967 9968 case 'M': 9969 if (Subtarget->isThumb()) { // FIXME thumb2 9970 // This must be a multiple of 4 between 0 and 1020, for 9971 // ADD sp + immediate. 9972 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 9973 break; 9974 } else { 9975 // A power of two or a constant between 0 and 32. This is used in 9976 // GCC for the shift amount on shifted register operands, but it is 9977 // useful in general for any shift amounts. 9978 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 9979 break; 9980 } 9981 return; 9982 9983 case 'N': 9984 if (Subtarget->isThumb()) { // FIXME thumb2 9985 // This must be a constant between 0 and 31, for shift amounts. 9986 if (CVal >= 0 && CVal <= 31) 9987 break; 9988 } 9989 return; 9990 9991 case 'O': 9992 if (Subtarget->isThumb()) { // FIXME thumb2 9993 // This must be a multiple of 4 between -508 and 508, for 9994 // ADD/SUB sp = sp + immediate. 9995 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 9996 break; 9997 } 9998 return; 9999 } 10000 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 10001 break; 10002 } 10003 10004 if (Result.getNode()) { 10005 Ops.push_back(Result); 10006 return; 10007 } 10008 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 10009} 10010 10011bool 10012ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 10013 // The ARM target isn't yet aware of offsets. 10014 return false; 10015} 10016 10017bool ARM::isBitFieldInvertedMask(unsigned v) { 10018 if (v == 0xffffffff) 10019 return 0; 10020 // there can be 1's on either or both "outsides", all the "inside" 10021 // bits must be 0's 10022 unsigned int lsb = 0, msb = 31; 10023 while (v & (1 << msb)) --msb; 10024 while (v & (1 << lsb)) ++lsb; 10025 for (unsigned int i = lsb; i <= msb; ++i) { 10026 if (v & (1 << i)) 10027 return 0; 10028 } 10029 return 1; 10030} 10031 10032/// isFPImmLegal - Returns true if the target can instruction select the 10033/// specified FP immediate natively. If false, the legalizer will 10034/// materialize the FP immediate as a load from a constant pool. 10035bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 10036 if (!Subtarget->hasVFP3()) 10037 return false; 10038 if (VT == MVT::f32) 10039 return ARM_AM::getFP32Imm(Imm) != -1; 10040 if (VT == MVT::f64) 10041 return ARM_AM::getFP64Imm(Imm) != -1; 10042 return false; 10043} 10044 10045/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 10046/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 10047/// specified in the intrinsic calls. 10048bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 10049 const CallInst &I, 10050 unsigned Intrinsic) const { 10051 switch (Intrinsic) { 10052 case Intrinsic::arm_neon_vld1: 10053 case Intrinsic::arm_neon_vld2: 10054 case Intrinsic::arm_neon_vld3: 10055 case Intrinsic::arm_neon_vld4: 10056 case Intrinsic::arm_neon_vld2lane: 10057 case Intrinsic::arm_neon_vld3lane: 10058 case Intrinsic::arm_neon_vld4lane: { 10059 Info.opc = ISD::INTRINSIC_W_CHAIN; 10060 // Conservatively set memVT to the entire set of vectors loaded. 10061 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 10062 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 10063 Info.ptrVal = I.getArgOperand(0); 10064 Info.offset = 0; 10065 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 10066 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 10067 Info.vol = false; // volatile loads with NEON intrinsics not supported 10068 Info.readMem = true; 10069 Info.writeMem = false; 10070 return true; 10071 } 10072 case Intrinsic::arm_neon_vst1: 10073 case Intrinsic::arm_neon_vst2: 10074 case Intrinsic::arm_neon_vst3: 10075 case Intrinsic::arm_neon_vst4: 10076 case Intrinsic::arm_neon_vst2lane: 10077 case Intrinsic::arm_neon_vst3lane: 10078 case Intrinsic::arm_neon_vst4lane: { 10079 Info.opc = ISD::INTRINSIC_VOID; 10080 // Conservatively set memVT to the entire set of vectors stored. 10081 unsigned NumElts = 0; 10082 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 10083 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 10084 if (!ArgTy->isVectorTy()) 10085 break; 10086 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 10087 } 10088 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 10089 Info.ptrVal = I.getArgOperand(0); 10090 Info.offset = 0; 10091 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 10092 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 10093 Info.vol = false; // volatile stores with NEON intrinsics not supported 10094 Info.readMem = false; 10095 Info.writeMem = true; 10096 return true; 10097 } 10098 case Intrinsic::arm_strexd: { 10099 Info.opc = ISD::INTRINSIC_W_CHAIN; 10100 Info.memVT = MVT::i64; 10101 Info.ptrVal = I.getArgOperand(2); 10102 Info.offset = 0; 10103 Info.align = 8; 10104 Info.vol = true; 10105 Info.readMem = false; 10106 Info.writeMem = true; 10107 return true; 10108 } 10109 case Intrinsic::arm_ldrexd: { 10110 Info.opc = ISD::INTRINSIC_W_CHAIN; 10111 Info.memVT = MVT::i64; 10112 Info.ptrVal = I.getArgOperand(0); 10113 Info.offset = 0; 10114 Info.align = 8; 10115 Info.vol = true; 10116 Info.readMem = true; 10117 Info.writeMem = false; 10118 return true; 10119 } 10120 default: 10121 break; 10122 } 10123 10124 return false; 10125} 10126