ARMISelLowering.cpp revision 26b4f62e52845638a6e353b58ea72326a0aa7b06
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMCallingConv.h" 18#include "ARMConstantPoolValue.h" 19#include "ARMISelLowering.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMRegisterInfo.h" 23#include "ARMSubtarget.h" 24#include "ARMTargetMachine.h" 25#include "ARMTargetObjectFile.h" 26#include "MCTargetDesc/ARMAddressingModes.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineModuleInfo.h" 42#include "llvm/CodeGen/MachineRegisterInfo.h" 43#include "llvm/CodeGen/PseudoSourceValue.h" 44#include "llvm/CodeGen/SelectionDAG.h" 45#include "llvm/MC/MCSectionMachO.h" 46#include "llvm/Target/TargetOptions.h" 47#include "llvm/ADT/VectorExtras.h" 48#include "llvm/ADT/StringExtras.h" 49#include "llvm/ADT/Statistic.h" 50#include "llvm/Support/CommandLine.h" 51#include "llvm/Support/ErrorHandling.h" 52#include "llvm/Support/MathExtras.h" 53#include "llvm/Support/raw_ostream.h" 54#include <sstream> 55using namespace llvm; 56 57STATISTIC(NumTailCalls, "Number of tail calls"); 58STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 59 60// This option should go away when tail calls fully work. 61static cl::opt<bool> 62EnableARMTailCalls("arm-tail-calls", cl::Hidden, 63 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 64 cl::init(false)); 65 66cl::opt<bool> 67EnableARMLongCalls("arm-long-calls", cl::Hidden, 68 cl::desc("Generate calls via indirect call instructions"), 69 cl::init(false)); 70 71static cl::opt<bool> 72ARMInterworking("arm-interworking", cl::Hidden, 73 cl::desc("Enable / disable ARM interworking (for debugging only)"), 74 cl::init(true)); 75 76namespace llvm { 77 class ARMCCState : public CCState { 78 public: 79 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 80 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 81 LLVMContext &C, ParmContext PC) 82 : CCState(CC, isVarArg, MF, TM, locs, C) { 83 assert(((PC == Call) || (PC == Prologue)) && 84 "ARMCCState users must specify whether their context is call" 85 "or prologue generation."); 86 CallOrPrologue = PC; 87 } 88 }; 89} 90 91// The APCS parameter registers. 92static const unsigned GPRArgRegs[] = { 93 ARM::R0, ARM::R1, ARM::R2, ARM::R3 94}; 95 96void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 97 EVT PromotedBitwiseVT) { 98 if (VT != PromotedLdStVT) { 99 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 100 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 101 PromotedLdStVT.getSimpleVT()); 102 103 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 104 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 105 PromotedLdStVT.getSimpleVT()); 106 } 107 108 EVT ElemTy = VT.getVectorElementType(); 109 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 110 setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom); 111 setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom); 112 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 113 if (ElemTy != MVT::i32) { 114 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 115 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 116 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 117 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 118 } 119 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 120 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 121 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 122 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 123 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 124 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 125 if (VT.isInteger()) { 126 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 127 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 128 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 129 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 130 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 131 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 132 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 133 setTruncStoreAction(VT.getSimpleVT(), 134 (MVT::SimpleValueType)InnerVT, Expand); 135 } 136 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 137 138 // Promote all bit-wise operations. 139 if (VT.isInteger() && VT != PromotedBitwiseVT) { 140 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 141 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 142 PromotedBitwiseVT.getSimpleVT()); 143 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 144 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 145 PromotedBitwiseVT.getSimpleVT()); 146 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 147 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 148 PromotedBitwiseVT.getSimpleVT()); 149 } 150 151 // Neon does not support vector divide/remainder operations. 152 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 153 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 154 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 155 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 156 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 157 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 158} 159 160void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 161 addRegisterClass(VT, ARM::DPRRegisterClass); 162 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 163} 164 165void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 166 addRegisterClass(VT, ARM::QPRRegisterClass); 167 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 168} 169 170static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 171 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 172 return new TargetLoweringObjectFileMachO(); 173 174 return new ARMElfTargetObjectFile(); 175} 176 177ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 178 : TargetLowering(TM, createTLOF(TM)) { 179 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 180 RegInfo = TM.getRegisterInfo(); 181 Itins = TM.getInstrItineraryData(); 182 183 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 184 185 if (Subtarget->isTargetDarwin()) { 186 // Uses VFP for Thumb libfuncs if available. 187 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 188 // Single-precision floating-point arithmetic. 189 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 190 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 191 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 192 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 193 194 // Double-precision floating-point arithmetic. 195 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 196 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 197 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 198 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 199 200 // Single-precision comparisons. 201 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 202 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 203 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 204 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 205 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 206 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 207 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 208 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 209 210 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 216 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 217 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 218 219 // Double-precision comparisons. 220 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 221 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 222 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 223 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 224 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 225 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 226 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 227 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 228 229 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 230 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 231 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 232 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 233 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 234 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 235 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 236 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 237 238 // Floating-point to integer conversions. 239 // i64 conversions are done via library routines even when generating VFP 240 // instructions, so use the same ones. 241 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 242 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 243 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 244 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 245 246 // Conversions between floating types. 247 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 248 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 249 250 // Integer to floating-point conversions. 251 // i64 conversions are done via library routines even when generating VFP 252 // instructions, so use the same ones. 253 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 254 // e.g., __floatunsidf vs. __floatunssidfvfp. 255 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 256 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 257 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 258 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 259 } 260 } 261 262 // These libcalls are not available in 32-bit. 263 setLibcallName(RTLIB::SHL_I128, 0); 264 setLibcallName(RTLIB::SRL_I128, 0); 265 setLibcallName(RTLIB::SRA_I128, 0); 266 267 if (Subtarget->isAAPCS_ABI()) { 268 // Double-precision floating-point arithmetic helper functions 269 // RTABI chapter 4.1.2, Table 2 270 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 271 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 272 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 273 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 274 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 277 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 278 279 // Double-precision floating-point comparison helper functions 280 // RTABI chapter 4.1.2, Table 3 281 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 282 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 283 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 284 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 285 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 286 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 287 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 288 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 289 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 290 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 291 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 292 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 293 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 294 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 295 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 296 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 297 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 298 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 299 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 300 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 301 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 302 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 303 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 304 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 305 306 // Single-precision floating-point arithmetic helper functions 307 // RTABI chapter 4.1.2, Table 4 308 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 309 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 310 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 311 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 312 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 315 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 316 317 // Single-precision floating-point comparison helper functions 318 // RTABI chapter 4.1.2, Table 5 319 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 320 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 321 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 322 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 323 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 324 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 325 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 326 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 327 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 328 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 329 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 330 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 331 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 332 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 333 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 334 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 335 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 342 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 343 344 // Floating-point to integer conversions. 345 // RTABI chapter 4.1.2, Table 6 346 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 347 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 348 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 349 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 350 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 351 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 352 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 353 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 354 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 361 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 362 363 // Conversions between floating types. 364 // RTABI chapter 4.1.2, Table 7 365 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 366 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 367 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 368 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 369 370 // Integer to floating-point conversions. 371 // RTABI chapter 4.1.2, Table 8 372 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 373 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 374 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 375 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 376 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 377 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 378 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 379 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 380 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 384 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 385 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 387 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 388 389 // Long long helper functions 390 // RTABI chapter 4.2, Table 9 391 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 392 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 393 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 394 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 395 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 396 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 397 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 399 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 400 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 401 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 402 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 403 404 // Integer division functions 405 // RTABI chapter 4.3.1 406 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 407 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 408 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 409 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 410 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 411 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 412 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 413 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 414 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 415 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 416 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 417 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 418 419 // Memory operations 420 // RTABI chapter 4.3.4 421 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 422 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 423 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 424 } 425 426 // Use divmod compiler-rt calls for iOS 5.0 and later. 427 if (Subtarget->getTargetTriple().getOS() == Triple::IOS && 428 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 429 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 430 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 431 } 432 433 if (Subtarget->isThumb1Only()) 434 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 435 else 436 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 437 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 438 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 439 if (!Subtarget->isFPOnlySP()) 440 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 441 442 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 443 } 444 445 if (Subtarget->hasNEON()) { 446 addDRTypeForNEON(MVT::v2f32); 447 addDRTypeForNEON(MVT::v8i8); 448 addDRTypeForNEON(MVT::v4i16); 449 addDRTypeForNEON(MVT::v2i32); 450 addDRTypeForNEON(MVT::v1i64); 451 452 addQRTypeForNEON(MVT::v4f32); 453 addQRTypeForNEON(MVT::v2f64); 454 addQRTypeForNEON(MVT::v16i8); 455 addQRTypeForNEON(MVT::v8i16); 456 addQRTypeForNEON(MVT::v4i32); 457 addQRTypeForNEON(MVT::v2i64); 458 459 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 460 // neither Neon nor VFP support any arithmetic operations on it. 461 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 462 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 463 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 464 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 465 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 466 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 467 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 468 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 469 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 470 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 471 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 472 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 473 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 474 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 475 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 476 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 477 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 478 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 479 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 480 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 481 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 482 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 483 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 484 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 485 486 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 487 488 // Neon does not support some operations on v1i64 and v2i64 types. 489 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 490 // Custom handling for some quad-vector types to detect VMULL. 491 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 492 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 493 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 494 // Custom handling for some vector types to avoid expensive expansions 495 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 496 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 497 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 498 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 499 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 500 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 501 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 502 // a destination type that is wider than the source. 503 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 504 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 505 506 setTargetDAGCombine(ISD::INTRINSIC_VOID); 507 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 508 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 509 setTargetDAGCombine(ISD::SHL); 510 setTargetDAGCombine(ISD::SRL); 511 setTargetDAGCombine(ISD::SRA); 512 setTargetDAGCombine(ISD::SIGN_EXTEND); 513 setTargetDAGCombine(ISD::ZERO_EXTEND); 514 setTargetDAGCombine(ISD::ANY_EXTEND); 515 setTargetDAGCombine(ISD::SELECT_CC); 516 setTargetDAGCombine(ISD::BUILD_VECTOR); 517 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 518 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 519 setTargetDAGCombine(ISD::STORE); 520 setTargetDAGCombine(ISD::FP_TO_SINT); 521 setTargetDAGCombine(ISD::FP_TO_UINT); 522 setTargetDAGCombine(ISD::FDIV); 523 524 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand); 525 } 526 527 computeRegisterProperties(); 528 529 // ARM does not have f32 extending load. 530 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 531 532 // ARM does not have i1 sign extending load. 533 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 534 535 // ARM supports all 4 flavors of integer indexed load / store. 536 if (!Subtarget->isThumb1Only()) { 537 for (unsigned im = (unsigned)ISD::PRE_INC; 538 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 539 setIndexedLoadAction(im, MVT::i1, Legal); 540 setIndexedLoadAction(im, MVT::i8, Legal); 541 setIndexedLoadAction(im, MVT::i16, Legal); 542 setIndexedLoadAction(im, MVT::i32, Legal); 543 setIndexedStoreAction(im, MVT::i1, Legal); 544 setIndexedStoreAction(im, MVT::i8, Legal); 545 setIndexedStoreAction(im, MVT::i16, Legal); 546 setIndexedStoreAction(im, MVT::i32, Legal); 547 } 548 } 549 550 // i64 operation support. 551 setOperationAction(ISD::MUL, MVT::i64, Expand); 552 setOperationAction(ISD::MULHU, MVT::i32, Expand); 553 if (Subtarget->isThumb1Only()) { 554 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 555 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 556 } 557 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 558 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 559 setOperationAction(ISD::MULHS, MVT::i32, Expand); 560 561 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 562 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 563 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 564 setOperationAction(ISD::SRL, MVT::i64, Custom); 565 setOperationAction(ISD::SRA, MVT::i64, Custom); 566 567 if (!Subtarget->isThumb1Only()) { 568 // FIXME: We should do this for Thumb1 as well. 569 setOperationAction(ISD::ADDC, MVT::i32, Custom); 570 setOperationAction(ISD::ADDE, MVT::i32, Custom); 571 setOperationAction(ISD::SUBC, MVT::i32, Custom); 572 setOperationAction(ISD::SUBE, MVT::i32, Custom); 573 } 574 575 // ARM does not have ROTL. 576 setOperationAction(ISD::ROTL, MVT::i32, Expand); 577 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 578 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 579 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 580 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 581 582 // Only ARMv6 has BSWAP. 583 if (!Subtarget->hasV6Ops()) 584 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 585 586 // These are expanded into libcalls. 587 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 588 // v7M has a hardware divider 589 setOperationAction(ISD::SDIV, MVT::i32, Expand); 590 setOperationAction(ISD::UDIV, MVT::i32, Expand); 591 } 592 setOperationAction(ISD::SREM, MVT::i32, Expand); 593 setOperationAction(ISD::UREM, MVT::i32, Expand); 594 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 595 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 596 597 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 598 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 599 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 600 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 601 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 602 603 setOperationAction(ISD::TRAP, MVT::Other, Legal); 604 605 // Use the default implementation. 606 setOperationAction(ISD::VASTART, MVT::Other, Custom); 607 setOperationAction(ISD::VAARG, MVT::Other, Expand); 608 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 609 setOperationAction(ISD::VAEND, MVT::Other, Expand); 610 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 611 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 612 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 613 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 614 setExceptionPointerRegister(ARM::R0); 615 setExceptionSelectorRegister(ARM::R1); 616 617 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 618 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 619 // the default expansion. 620 // FIXME: This should be checking for v6k, not just v6. 621 if (Subtarget->hasDataBarrier() || 622 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 623 // membarrier needs custom lowering; the rest are legal and handled 624 // normally. 625 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 626 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 627 // Custom lowering for 64-bit ops 628 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 629 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 630 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 631 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 632 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 633 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 634 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 635 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 636 setInsertFencesForAtomic(true); 637 } else { 638 // Set them all for expansion, which will force libcalls. 639 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 640 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 641 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 642 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 643 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 644 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 645 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 646 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 647 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 648 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 649 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 650 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 651 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 652 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 653 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 654 // Unordered/Monotonic case. 655 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 656 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 657 // Since the libcalls include locking, fold in the fences 658 setShouldFoldAtomicFences(true); 659 } 660 661 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 662 663 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 664 if (!Subtarget->hasV6Ops()) { 665 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 666 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 667 } 668 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 669 670 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 671 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 672 // iff target supports vfp2. 673 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 674 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 675 } 676 677 // We want to custom lower some of our intrinsics. 678 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 679 if (Subtarget->isTargetDarwin()) { 680 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 681 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 682 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 683 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 684 } 685 686 setOperationAction(ISD::SETCC, MVT::i32, Expand); 687 setOperationAction(ISD::SETCC, MVT::f32, Expand); 688 setOperationAction(ISD::SETCC, MVT::f64, Expand); 689 setOperationAction(ISD::SELECT, MVT::i32, Custom); 690 setOperationAction(ISD::SELECT, MVT::f32, Custom); 691 setOperationAction(ISD::SELECT, MVT::f64, Custom); 692 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 693 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 694 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 695 696 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 697 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 698 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 699 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 700 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 701 702 // We don't support sin/cos/fmod/copysign/pow 703 setOperationAction(ISD::FSIN, MVT::f64, Expand); 704 setOperationAction(ISD::FSIN, MVT::f32, Expand); 705 setOperationAction(ISD::FCOS, MVT::f32, Expand); 706 setOperationAction(ISD::FCOS, MVT::f64, Expand); 707 setOperationAction(ISD::FREM, MVT::f64, Expand); 708 setOperationAction(ISD::FREM, MVT::f32, Expand); 709 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 710 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 711 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 712 } 713 setOperationAction(ISD::FPOW, MVT::f64, Expand); 714 setOperationAction(ISD::FPOW, MVT::f32, Expand); 715 716 setOperationAction(ISD::FMA, MVT::f64, Expand); 717 setOperationAction(ISD::FMA, MVT::f32, Expand); 718 719 // Various VFP goodness 720 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 721 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 722 if (Subtarget->hasVFP2()) { 723 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 724 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 725 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 726 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 727 } 728 // Special handling for half-precision FP. 729 if (!Subtarget->hasFP16()) { 730 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 731 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 732 } 733 } 734 735 // We have target-specific dag combine patterns for the following nodes: 736 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 737 setTargetDAGCombine(ISD::ADD); 738 setTargetDAGCombine(ISD::SUB); 739 setTargetDAGCombine(ISD::MUL); 740 741 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 742 setTargetDAGCombine(ISD::OR); 743 if (Subtarget->hasNEON()) 744 setTargetDAGCombine(ISD::AND); 745 746 setStackPointerRegisterToSaveRestore(ARM::SP); 747 748 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 749 setSchedulingPreference(Sched::RegPressure); 750 else 751 setSchedulingPreference(Sched::Hybrid); 752 753 //// temporary - rewrite interface to use type 754 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 755 maxStoresPerMemset = 16; 756 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 757 758 // On ARM arguments smaller than 4 bytes are extended, so all arguments 759 // are at least 4 bytes aligned. 760 setMinStackArgumentAlignment(4); 761 762 benefitFromCodePlacementOpt = true; 763 764 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 765} 766 767// FIXME: It might make sense to define the representative register class as the 768// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 769// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 770// SPR's representative would be DPR_VFP2. This should work well if register 771// pressure tracking were modified such that a register use would increment the 772// pressure of the register class's representative and all of it's super 773// classes' representatives transitively. We have not implemented this because 774// of the difficulty prior to coalescing of modeling operand register classes 775// due to the common occurrence of cross class copies and subregister insertions 776// and extractions. 777std::pair<const TargetRegisterClass*, uint8_t> 778ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 779 const TargetRegisterClass *RRC = 0; 780 uint8_t Cost = 1; 781 switch (VT.getSimpleVT().SimpleTy) { 782 default: 783 return TargetLowering::findRepresentativeClass(VT); 784 // Use DPR as representative register class for all floating point 785 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 786 // the cost is 1 for both f32 and f64. 787 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 788 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 789 RRC = ARM::DPRRegisterClass; 790 // When NEON is used for SP, only half of the register file is available 791 // because operations that define both SP and DP results will be constrained 792 // to the VFP2 class (D0-D15). We currently model this constraint prior to 793 // coalescing by double-counting the SP regs. See the FIXME above. 794 if (Subtarget->useNEONForSinglePrecisionFP()) 795 Cost = 2; 796 break; 797 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 798 case MVT::v4f32: case MVT::v2f64: 799 RRC = ARM::DPRRegisterClass; 800 Cost = 2; 801 break; 802 case MVT::v4i64: 803 RRC = ARM::DPRRegisterClass; 804 Cost = 4; 805 break; 806 case MVT::v8i64: 807 RRC = ARM::DPRRegisterClass; 808 Cost = 8; 809 break; 810 } 811 return std::make_pair(RRC, Cost); 812} 813 814const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 815 switch (Opcode) { 816 default: return 0; 817 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 818 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 819 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 820 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 821 case ARMISD::CALL: return "ARMISD::CALL"; 822 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 823 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 824 case ARMISD::tCALL: return "ARMISD::tCALL"; 825 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 826 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 827 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 828 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 829 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 830 case ARMISD::CMP: return "ARMISD::CMP"; 831 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 832 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 833 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 834 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 835 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 836 case ARMISD::CMOV: return "ARMISD::CMOV"; 837 838 case ARMISD::RBIT: return "ARMISD::RBIT"; 839 840 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 841 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 842 case ARMISD::SITOF: return "ARMISD::SITOF"; 843 case ARMISD::UITOF: return "ARMISD::UITOF"; 844 845 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 846 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 847 case ARMISD::RRX: return "ARMISD::RRX"; 848 849 case ARMISD::ADDC: return "ARMISD::ADDC"; 850 case ARMISD::ADDE: return "ARMISD::ADDE"; 851 case ARMISD::SUBC: return "ARMISD::SUBC"; 852 case ARMISD::SUBE: return "ARMISD::SUBE"; 853 854 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 855 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 856 857 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 858 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 859 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 860 861 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 862 863 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 864 865 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 866 867 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 868 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 869 870 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 871 872 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 873 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 874 case ARMISD::VCGE: return "ARMISD::VCGE"; 875 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 876 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 877 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 878 case ARMISD::VCGT: return "ARMISD::VCGT"; 879 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 880 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 881 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 882 case ARMISD::VTST: return "ARMISD::VTST"; 883 884 case ARMISD::VSHL: return "ARMISD::VSHL"; 885 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 886 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 887 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 888 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 889 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 890 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 891 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 892 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 893 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 894 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 895 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 896 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 897 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 898 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 899 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 900 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 901 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 902 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 903 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 904 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 905 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 906 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 907 case ARMISD::VDUP: return "ARMISD::VDUP"; 908 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 909 case ARMISD::VEXT: return "ARMISD::VEXT"; 910 case ARMISD::VREV64: return "ARMISD::VREV64"; 911 case ARMISD::VREV32: return "ARMISD::VREV32"; 912 case ARMISD::VREV16: return "ARMISD::VREV16"; 913 case ARMISD::VZIP: return "ARMISD::VZIP"; 914 case ARMISD::VUZP: return "ARMISD::VUZP"; 915 case ARMISD::VTRN: return "ARMISD::VTRN"; 916 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 917 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 918 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 919 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 920 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 921 case ARMISD::FMAX: return "ARMISD::FMAX"; 922 case ARMISD::FMIN: return "ARMISD::FMIN"; 923 case ARMISD::BFI: return "ARMISD::BFI"; 924 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 925 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 926 case ARMISD::VBSL: return "ARMISD::VBSL"; 927 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 928 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 929 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 930 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 931 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 932 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 933 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 934 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 935 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 936 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 937 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 938 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 939 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 940 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 941 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 942 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 943 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 944 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 945 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 946 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 947 } 948} 949 950EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 951 if (!VT.isVector()) return getPointerTy(); 952 return VT.changeVectorElementTypeToInteger(); 953} 954 955/// getRegClassFor - Return the register class that should be used for the 956/// specified value type. 957TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 958 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 959 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 960 // load / store 4 to 8 consecutive D registers. 961 if (Subtarget->hasNEON()) { 962 if (VT == MVT::v4i64) 963 return ARM::QQPRRegisterClass; 964 else if (VT == MVT::v8i64) 965 return ARM::QQQQPRRegisterClass; 966 } 967 return TargetLowering::getRegClassFor(VT); 968} 969 970// Create a fast isel object. 971FastISel * 972ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 973 return ARM::createFastISel(funcInfo); 974} 975 976/// getMaximalGlobalOffset - Returns the maximal possible offset which can 977/// be used for loads / stores from the global. 978unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 979 return (Subtarget->isThumb1Only() ? 127 : 4095); 980} 981 982Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 983 unsigned NumVals = N->getNumValues(); 984 if (!NumVals) 985 return Sched::RegPressure; 986 987 for (unsigned i = 0; i != NumVals; ++i) { 988 EVT VT = N->getValueType(i); 989 if (VT == MVT::Glue || VT == MVT::Other) 990 continue; 991 if (VT.isFloatingPoint() || VT.isVector()) 992 return Sched::ILP; 993 } 994 995 if (!N->isMachineOpcode()) 996 return Sched::RegPressure; 997 998 // Load are scheduled for latency even if there instruction itinerary 999 // is not available. 1000 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1001 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1002 1003 if (MCID.getNumDefs() == 0) 1004 return Sched::RegPressure; 1005 if (!Itins->isEmpty() && 1006 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1007 return Sched::ILP; 1008 1009 return Sched::RegPressure; 1010} 1011 1012//===----------------------------------------------------------------------===// 1013// Lowering Code 1014//===----------------------------------------------------------------------===// 1015 1016/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1017static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1018 switch (CC) { 1019 default: llvm_unreachable("Unknown condition code!"); 1020 case ISD::SETNE: return ARMCC::NE; 1021 case ISD::SETEQ: return ARMCC::EQ; 1022 case ISD::SETGT: return ARMCC::GT; 1023 case ISD::SETGE: return ARMCC::GE; 1024 case ISD::SETLT: return ARMCC::LT; 1025 case ISD::SETLE: return ARMCC::LE; 1026 case ISD::SETUGT: return ARMCC::HI; 1027 case ISD::SETUGE: return ARMCC::HS; 1028 case ISD::SETULT: return ARMCC::LO; 1029 case ISD::SETULE: return ARMCC::LS; 1030 } 1031} 1032 1033/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1034static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1035 ARMCC::CondCodes &CondCode2) { 1036 CondCode2 = ARMCC::AL; 1037 switch (CC) { 1038 default: llvm_unreachable("Unknown FP condition!"); 1039 case ISD::SETEQ: 1040 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1041 case ISD::SETGT: 1042 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1043 case ISD::SETGE: 1044 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1045 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1046 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1047 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1048 case ISD::SETO: CondCode = ARMCC::VC; break; 1049 case ISD::SETUO: CondCode = ARMCC::VS; break; 1050 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1051 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1052 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1053 case ISD::SETLT: 1054 case ISD::SETULT: CondCode = ARMCC::LT; break; 1055 case ISD::SETLE: 1056 case ISD::SETULE: CondCode = ARMCC::LE; break; 1057 case ISD::SETNE: 1058 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1059 } 1060} 1061 1062//===----------------------------------------------------------------------===// 1063// Calling Convention Implementation 1064//===----------------------------------------------------------------------===// 1065 1066#include "ARMGenCallingConv.inc" 1067 1068/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1069/// given CallingConvention value. 1070CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1071 bool Return, 1072 bool isVarArg) const { 1073 switch (CC) { 1074 default: 1075 llvm_unreachable("Unsupported calling convention"); 1076 case CallingConv::Fast: 1077 if (Subtarget->hasVFP2() && !isVarArg) { 1078 if (!Subtarget->isAAPCS_ABI()) 1079 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1080 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1081 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1082 } 1083 // Fallthrough 1084 case CallingConv::C: { 1085 // Use target triple & subtarget features to do actual dispatch. 1086 if (!Subtarget->isAAPCS_ABI()) 1087 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1088 else if (Subtarget->hasVFP2() && 1089 FloatABIType == FloatABI::Hard && !isVarArg) 1090 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1091 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1092 } 1093 case CallingConv::ARM_AAPCS_VFP: 1094 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1095 case CallingConv::ARM_AAPCS: 1096 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1097 case CallingConv::ARM_APCS: 1098 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1099 } 1100} 1101 1102/// LowerCallResult - Lower the result values of a call into the 1103/// appropriate copies out of appropriate physical registers. 1104SDValue 1105ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1106 CallingConv::ID CallConv, bool isVarArg, 1107 const SmallVectorImpl<ISD::InputArg> &Ins, 1108 DebugLoc dl, SelectionDAG &DAG, 1109 SmallVectorImpl<SDValue> &InVals) const { 1110 1111 // Assign locations to each value returned by this call. 1112 SmallVector<CCValAssign, 16> RVLocs; 1113 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1114 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1115 CCInfo.AnalyzeCallResult(Ins, 1116 CCAssignFnForNode(CallConv, /* Return*/ true, 1117 isVarArg)); 1118 1119 // Copy all of the result registers out of their specified physreg. 1120 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1121 CCValAssign VA = RVLocs[i]; 1122 1123 SDValue Val; 1124 if (VA.needsCustom()) { 1125 // Handle f64 or half of a v2f64. 1126 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1127 InFlag); 1128 Chain = Lo.getValue(1); 1129 InFlag = Lo.getValue(2); 1130 VA = RVLocs[++i]; // skip ahead to next loc 1131 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1132 InFlag); 1133 Chain = Hi.getValue(1); 1134 InFlag = Hi.getValue(2); 1135 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1136 1137 if (VA.getLocVT() == MVT::v2f64) { 1138 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1139 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1140 DAG.getConstant(0, MVT::i32)); 1141 1142 VA = RVLocs[++i]; // skip ahead to next loc 1143 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1144 Chain = Lo.getValue(1); 1145 InFlag = Lo.getValue(2); 1146 VA = RVLocs[++i]; // skip ahead to next loc 1147 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1148 Chain = Hi.getValue(1); 1149 InFlag = Hi.getValue(2); 1150 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1151 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1152 DAG.getConstant(1, MVT::i32)); 1153 } 1154 } else { 1155 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1156 InFlag); 1157 Chain = Val.getValue(1); 1158 InFlag = Val.getValue(2); 1159 } 1160 1161 switch (VA.getLocInfo()) { 1162 default: llvm_unreachable("Unknown loc info!"); 1163 case CCValAssign::Full: break; 1164 case CCValAssign::BCvt: 1165 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1166 break; 1167 } 1168 1169 InVals.push_back(Val); 1170 } 1171 1172 return Chain; 1173} 1174 1175/// LowerMemOpCallTo - Store the argument to the stack. 1176SDValue 1177ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1178 SDValue StackPtr, SDValue Arg, 1179 DebugLoc dl, SelectionDAG &DAG, 1180 const CCValAssign &VA, 1181 ISD::ArgFlagsTy Flags) const { 1182 unsigned LocMemOffset = VA.getLocMemOffset(); 1183 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1184 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1185 return DAG.getStore(Chain, dl, Arg, PtrOff, 1186 MachinePointerInfo::getStack(LocMemOffset), 1187 false, false, 0); 1188} 1189 1190void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1191 SDValue Chain, SDValue &Arg, 1192 RegsToPassVector &RegsToPass, 1193 CCValAssign &VA, CCValAssign &NextVA, 1194 SDValue &StackPtr, 1195 SmallVector<SDValue, 8> &MemOpChains, 1196 ISD::ArgFlagsTy Flags) const { 1197 1198 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1199 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1200 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1201 1202 if (NextVA.isRegLoc()) 1203 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1204 else { 1205 assert(NextVA.isMemLoc()); 1206 if (StackPtr.getNode() == 0) 1207 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1208 1209 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1210 dl, DAG, NextVA, 1211 Flags)); 1212 } 1213} 1214 1215/// LowerCall - Lowering a call into a callseq_start <- 1216/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1217/// nodes. 1218SDValue 1219ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1220 CallingConv::ID CallConv, bool isVarArg, 1221 bool &isTailCall, 1222 const SmallVectorImpl<ISD::OutputArg> &Outs, 1223 const SmallVectorImpl<SDValue> &OutVals, 1224 const SmallVectorImpl<ISD::InputArg> &Ins, 1225 DebugLoc dl, SelectionDAG &DAG, 1226 SmallVectorImpl<SDValue> &InVals) const { 1227 MachineFunction &MF = DAG.getMachineFunction(); 1228 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1229 bool IsSibCall = false; 1230 // Disable tail calls if they're not supported. 1231 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 1232 isTailCall = false; 1233 if (isTailCall) { 1234 // Check if it's really possible to do a tail call. 1235 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1236 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1237 Outs, OutVals, Ins, DAG); 1238 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1239 // detected sibcalls. 1240 if (isTailCall) { 1241 ++NumTailCalls; 1242 IsSibCall = true; 1243 } 1244 } 1245 1246 // Analyze operands of the call, assigning locations to each operand. 1247 SmallVector<CCValAssign, 16> ArgLocs; 1248 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1249 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1250 CCInfo.AnalyzeCallOperands(Outs, 1251 CCAssignFnForNode(CallConv, /* Return*/ false, 1252 isVarArg)); 1253 1254 // Get a count of how many bytes are to be pushed on the stack. 1255 unsigned NumBytes = CCInfo.getNextStackOffset(); 1256 1257 // For tail calls, memory operands are available in our caller's stack. 1258 if (IsSibCall) 1259 NumBytes = 0; 1260 1261 // Adjust the stack pointer for the new arguments... 1262 // These operations are automatically eliminated by the prolog/epilog pass 1263 if (!IsSibCall) 1264 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1265 1266 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1267 1268 RegsToPassVector RegsToPass; 1269 SmallVector<SDValue, 8> MemOpChains; 1270 1271 // Walk the register/memloc assignments, inserting copies/loads. In the case 1272 // of tail call optimization, arguments are handled later. 1273 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1274 i != e; 1275 ++i, ++realArgIdx) { 1276 CCValAssign &VA = ArgLocs[i]; 1277 SDValue Arg = OutVals[realArgIdx]; 1278 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1279 bool isByVal = Flags.isByVal(); 1280 1281 // Promote the value if needed. 1282 switch (VA.getLocInfo()) { 1283 default: llvm_unreachable("Unknown loc info!"); 1284 case CCValAssign::Full: break; 1285 case CCValAssign::SExt: 1286 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1287 break; 1288 case CCValAssign::ZExt: 1289 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1290 break; 1291 case CCValAssign::AExt: 1292 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1293 break; 1294 case CCValAssign::BCvt: 1295 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1296 break; 1297 } 1298 1299 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1300 if (VA.needsCustom()) { 1301 if (VA.getLocVT() == MVT::v2f64) { 1302 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1303 DAG.getConstant(0, MVT::i32)); 1304 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1305 DAG.getConstant(1, MVT::i32)); 1306 1307 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1308 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1309 1310 VA = ArgLocs[++i]; // skip ahead to next loc 1311 if (VA.isRegLoc()) { 1312 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1313 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1314 } else { 1315 assert(VA.isMemLoc()); 1316 1317 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1318 dl, DAG, VA, Flags)); 1319 } 1320 } else { 1321 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1322 StackPtr, MemOpChains, Flags); 1323 } 1324 } else if (VA.isRegLoc()) { 1325 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1326 } else if (isByVal) { 1327 assert(VA.isMemLoc()); 1328 unsigned offset = 0; 1329 1330 // True if this byval aggregate will be split between registers 1331 // and memory. 1332 if (CCInfo.isFirstByValRegValid()) { 1333 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1334 unsigned int i, j; 1335 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1336 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1337 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1338 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1339 MachinePointerInfo(), 1340 false, false, 0); 1341 MemOpChains.push_back(Load.getValue(1)); 1342 RegsToPass.push_back(std::make_pair(j, Load)); 1343 } 1344 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1345 CCInfo.clearFirstByValReg(); 1346 } 1347 1348 unsigned LocMemOffset = VA.getLocMemOffset(); 1349 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1350 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1351 StkPtrOff); 1352 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1353 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1354 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1355 MVT::i32); 1356 MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 1357 Flags.getByValAlign(), 1358 /*isVolatile=*/false, 1359 /*AlwaysInline=*/false, 1360 MachinePointerInfo(0), 1361 MachinePointerInfo(0))); 1362 1363 } else if (!IsSibCall) { 1364 assert(VA.isMemLoc()); 1365 1366 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1367 dl, DAG, VA, Flags)); 1368 } 1369 } 1370 1371 if (!MemOpChains.empty()) 1372 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1373 &MemOpChains[0], MemOpChains.size()); 1374 1375 // Build a sequence of copy-to-reg nodes chained together with token chain 1376 // and flag operands which copy the outgoing args into the appropriate regs. 1377 SDValue InFlag; 1378 // Tail call byval lowering might overwrite argument registers so in case of 1379 // tail call optimization the copies to registers are lowered later. 1380 if (!isTailCall) 1381 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1382 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1383 RegsToPass[i].second, InFlag); 1384 InFlag = Chain.getValue(1); 1385 } 1386 1387 // For tail calls lower the arguments to the 'real' stack slot. 1388 if (isTailCall) { 1389 // Force all the incoming stack arguments to be loaded from the stack 1390 // before any new outgoing arguments are stored to the stack, because the 1391 // outgoing stack slots may alias the incoming argument stack slots, and 1392 // the alias isn't otherwise explicit. This is slightly more conservative 1393 // than necessary, because it means that each store effectively depends 1394 // on every argument instead of just those arguments it would clobber. 1395 1396 // Do not flag preceding copytoreg stuff together with the following stuff. 1397 InFlag = SDValue(); 1398 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1399 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1400 RegsToPass[i].second, InFlag); 1401 InFlag = Chain.getValue(1); 1402 } 1403 InFlag =SDValue(); 1404 } 1405 1406 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1407 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1408 // node so that legalize doesn't hack it. 1409 bool isDirect = false; 1410 bool isARMFunc = false; 1411 bool isLocalARMFunc = false; 1412 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1413 1414 if (EnableARMLongCalls) { 1415 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1416 && "long-calls with non-static relocation model!"); 1417 // Handle a global address or an external symbol. If it's not one of 1418 // those, the target's already in a register, so we don't need to do 1419 // anything extra. 1420 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1421 const GlobalValue *GV = G->getGlobal(); 1422 // Create a constant pool entry for the callee address 1423 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1424 ARMConstantPoolValue *CPV = 1425 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1426 1427 // Get the address of the callee into a register 1428 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1429 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1430 Callee = DAG.getLoad(getPointerTy(), dl, 1431 DAG.getEntryNode(), CPAddr, 1432 MachinePointerInfo::getConstantPool(), 1433 false, false, 0); 1434 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1435 const char *Sym = S->getSymbol(); 1436 1437 // Create a constant pool entry for the callee address 1438 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1439 ARMConstantPoolValue *CPV = 1440 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1441 ARMPCLabelIndex, 0); 1442 // Get the address of the callee into a register 1443 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1444 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1445 Callee = DAG.getLoad(getPointerTy(), dl, 1446 DAG.getEntryNode(), CPAddr, 1447 MachinePointerInfo::getConstantPool(), 1448 false, false, 0); 1449 } 1450 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1451 const GlobalValue *GV = G->getGlobal(); 1452 isDirect = true; 1453 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1454 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1455 getTargetMachine().getRelocationModel() != Reloc::Static; 1456 isARMFunc = !Subtarget->isThumb() || isStub; 1457 // ARM call to a local ARM function is predicable. 1458 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1459 // tBX takes a register source operand. 1460 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1461 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1462 ARMConstantPoolValue *CPV = 1463 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1464 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1465 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1466 Callee = DAG.getLoad(getPointerTy(), dl, 1467 DAG.getEntryNode(), CPAddr, 1468 MachinePointerInfo::getConstantPool(), 1469 false, false, 0); 1470 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1471 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1472 getPointerTy(), Callee, PICLabel); 1473 } else { 1474 // On ELF targets for PIC code, direct calls should go through the PLT 1475 unsigned OpFlags = 0; 1476 if (Subtarget->isTargetELF() && 1477 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1478 OpFlags = ARMII::MO_PLT; 1479 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1480 } 1481 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1482 isDirect = true; 1483 bool isStub = Subtarget->isTargetDarwin() && 1484 getTargetMachine().getRelocationModel() != Reloc::Static; 1485 isARMFunc = !Subtarget->isThumb() || isStub; 1486 // tBX takes a register source operand. 1487 const char *Sym = S->getSymbol(); 1488 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1489 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1490 ARMConstantPoolValue *CPV = 1491 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1492 ARMPCLabelIndex, 4); 1493 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1494 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1495 Callee = DAG.getLoad(getPointerTy(), dl, 1496 DAG.getEntryNode(), CPAddr, 1497 MachinePointerInfo::getConstantPool(), 1498 false, false, 0); 1499 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1500 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1501 getPointerTy(), Callee, PICLabel); 1502 } else { 1503 unsigned OpFlags = 0; 1504 // On ELF targets for PIC code, direct calls should go through the PLT 1505 if (Subtarget->isTargetELF() && 1506 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1507 OpFlags = ARMII::MO_PLT; 1508 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1509 } 1510 } 1511 1512 // FIXME: handle tail calls differently. 1513 unsigned CallOpc; 1514 if (Subtarget->isThumb()) { 1515 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1516 CallOpc = ARMISD::CALL_NOLINK; 1517 else 1518 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1519 } else { 1520 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1521 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1522 : ARMISD::CALL_NOLINK; 1523 } 1524 1525 std::vector<SDValue> Ops; 1526 Ops.push_back(Chain); 1527 Ops.push_back(Callee); 1528 1529 // Add argument registers to the end of the list so that they are known live 1530 // into the call. 1531 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1532 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1533 RegsToPass[i].second.getValueType())); 1534 1535 if (InFlag.getNode()) 1536 Ops.push_back(InFlag); 1537 1538 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1539 if (isTailCall) 1540 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1541 1542 // Returns a chain and a flag for retval copy to use. 1543 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1544 InFlag = Chain.getValue(1); 1545 1546 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1547 DAG.getIntPtrConstant(0, true), InFlag); 1548 if (!Ins.empty()) 1549 InFlag = Chain.getValue(1); 1550 1551 // Handle result values, copying them out of physregs into vregs that we 1552 // return. 1553 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1554 dl, DAG, InVals); 1555} 1556 1557/// HandleByVal - Every parameter *after* a byval parameter is passed 1558/// on the stack. Remember the next parameter register to allocate, 1559/// and then confiscate the rest of the parameter registers to insure 1560/// this. 1561void 1562llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { 1563 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1564 assert((State->getCallOrPrologue() == Prologue || 1565 State->getCallOrPrologue() == Call) && 1566 "unhandled ParmContext"); 1567 if ((!State->isFirstByValRegValid()) && 1568 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1569 State->setFirstByValReg(reg); 1570 // At a call site, a byval parameter that is split between 1571 // registers and memory needs its size truncated here. In a 1572 // function prologue, such byval parameters are reassembled in 1573 // memory, and are not truncated. 1574 if (State->getCallOrPrologue() == Call) { 1575 unsigned excess = 4 * (ARM::R4 - reg); 1576 assert(size >= excess && "expected larger existing stack allocation"); 1577 size -= excess; 1578 } 1579 } 1580 // Confiscate any remaining parameter registers to preclude their 1581 // assignment to subsequent parameters. 1582 while (State->AllocateReg(GPRArgRegs, 4)) 1583 ; 1584} 1585 1586/// MatchingStackOffset - Return true if the given stack call argument is 1587/// already available in the same position (relatively) of the caller's 1588/// incoming argument stack. 1589static 1590bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1591 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1592 const ARMInstrInfo *TII) { 1593 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1594 int FI = INT_MAX; 1595 if (Arg.getOpcode() == ISD::CopyFromReg) { 1596 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1597 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1598 return false; 1599 MachineInstr *Def = MRI->getVRegDef(VR); 1600 if (!Def) 1601 return false; 1602 if (!Flags.isByVal()) { 1603 if (!TII->isLoadFromStackSlot(Def, FI)) 1604 return false; 1605 } else { 1606 return false; 1607 } 1608 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1609 if (Flags.isByVal()) 1610 // ByVal argument is passed in as a pointer but it's now being 1611 // dereferenced. e.g. 1612 // define @foo(%struct.X* %A) { 1613 // tail call @bar(%struct.X* byval %A) 1614 // } 1615 return false; 1616 SDValue Ptr = Ld->getBasePtr(); 1617 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1618 if (!FINode) 1619 return false; 1620 FI = FINode->getIndex(); 1621 } else 1622 return false; 1623 1624 assert(FI != INT_MAX); 1625 if (!MFI->isFixedObjectIndex(FI)) 1626 return false; 1627 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1628} 1629 1630/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1631/// for tail call optimization. Targets which want to do tail call 1632/// optimization should implement this function. 1633bool 1634ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1635 CallingConv::ID CalleeCC, 1636 bool isVarArg, 1637 bool isCalleeStructRet, 1638 bool isCallerStructRet, 1639 const SmallVectorImpl<ISD::OutputArg> &Outs, 1640 const SmallVectorImpl<SDValue> &OutVals, 1641 const SmallVectorImpl<ISD::InputArg> &Ins, 1642 SelectionDAG& DAG) const { 1643 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1644 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1645 bool CCMatch = CallerCC == CalleeCC; 1646 1647 // Look for obvious safe cases to perform tail call optimization that do not 1648 // require ABI changes. This is what gcc calls sibcall. 1649 1650 // Do not sibcall optimize vararg calls unless the call site is not passing 1651 // any arguments. 1652 if (isVarArg && !Outs.empty()) 1653 return false; 1654 1655 // Also avoid sibcall optimization if either caller or callee uses struct 1656 // return semantics. 1657 if (isCalleeStructRet || isCallerStructRet) 1658 return false; 1659 1660 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1661 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1662 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1663 // support in the assembler and linker to be used. This would need to be 1664 // fixed to fully support tail calls in Thumb1. 1665 // 1666 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1667 // LR. This means if we need to reload LR, it takes an extra instructions, 1668 // which outweighs the value of the tail call; but here we don't know yet 1669 // whether LR is going to be used. Probably the right approach is to 1670 // generate the tail call here and turn it back into CALL/RET in 1671 // emitEpilogue if LR is used. 1672 1673 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1674 // but we need to make sure there are enough registers; the only valid 1675 // registers are the 4 used for parameters. We don't currently do this 1676 // case. 1677 if (Subtarget->isThumb1Only()) 1678 return false; 1679 1680 // If the calling conventions do not match, then we'd better make sure the 1681 // results are returned in the same way as what the caller expects. 1682 if (!CCMatch) { 1683 SmallVector<CCValAssign, 16> RVLocs1; 1684 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1685 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1686 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1687 1688 SmallVector<CCValAssign, 16> RVLocs2; 1689 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1690 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1691 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1692 1693 if (RVLocs1.size() != RVLocs2.size()) 1694 return false; 1695 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1696 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1697 return false; 1698 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1699 return false; 1700 if (RVLocs1[i].isRegLoc()) { 1701 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1702 return false; 1703 } else { 1704 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1705 return false; 1706 } 1707 } 1708 } 1709 1710 // If the callee takes no arguments then go on to check the results of the 1711 // call. 1712 if (!Outs.empty()) { 1713 // Check if stack adjustment is needed. For now, do not do this if any 1714 // argument is passed on the stack. 1715 SmallVector<CCValAssign, 16> ArgLocs; 1716 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1717 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1718 CCInfo.AnalyzeCallOperands(Outs, 1719 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1720 if (CCInfo.getNextStackOffset()) { 1721 MachineFunction &MF = DAG.getMachineFunction(); 1722 1723 // Check if the arguments are already laid out in the right way as 1724 // the caller's fixed stack objects. 1725 MachineFrameInfo *MFI = MF.getFrameInfo(); 1726 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1727 const ARMInstrInfo *TII = 1728 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1729 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1730 i != e; 1731 ++i, ++realArgIdx) { 1732 CCValAssign &VA = ArgLocs[i]; 1733 EVT RegVT = VA.getLocVT(); 1734 SDValue Arg = OutVals[realArgIdx]; 1735 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1736 if (VA.getLocInfo() == CCValAssign::Indirect) 1737 return false; 1738 if (VA.needsCustom()) { 1739 // f64 and vector types are split into multiple registers or 1740 // register/stack-slot combinations. The types will not match 1741 // the registers; give up on memory f64 refs until we figure 1742 // out what to do about this. 1743 if (!VA.isRegLoc()) 1744 return false; 1745 if (!ArgLocs[++i].isRegLoc()) 1746 return false; 1747 if (RegVT == MVT::v2f64) { 1748 if (!ArgLocs[++i].isRegLoc()) 1749 return false; 1750 if (!ArgLocs[++i].isRegLoc()) 1751 return false; 1752 } 1753 } else if (!VA.isRegLoc()) { 1754 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1755 MFI, MRI, TII)) 1756 return false; 1757 } 1758 } 1759 } 1760 } 1761 1762 return true; 1763} 1764 1765SDValue 1766ARMTargetLowering::LowerReturn(SDValue Chain, 1767 CallingConv::ID CallConv, bool isVarArg, 1768 const SmallVectorImpl<ISD::OutputArg> &Outs, 1769 const SmallVectorImpl<SDValue> &OutVals, 1770 DebugLoc dl, SelectionDAG &DAG) const { 1771 1772 // CCValAssign - represent the assignment of the return value to a location. 1773 SmallVector<CCValAssign, 16> RVLocs; 1774 1775 // CCState - Info about the registers and stack slots. 1776 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1777 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1778 1779 // Analyze outgoing return values. 1780 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1781 isVarArg)); 1782 1783 // If this is the first return lowered for this function, add 1784 // the regs to the liveout set for the function. 1785 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1786 for (unsigned i = 0; i != RVLocs.size(); ++i) 1787 if (RVLocs[i].isRegLoc()) 1788 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1789 } 1790 1791 SDValue Flag; 1792 1793 // Copy the result values into the output registers. 1794 for (unsigned i = 0, realRVLocIdx = 0; 1795 i != RVLocs.size(); 1796 ++i, ++realRVLocIdx) { 1797 CCValAssign &VA = RVLocs[i]; 1798 assert(VA.isRegLoc() && "Can only return in registers!"); 1799 1800 SDValue Arg = OutVals[realRVLocIdx]; 1801 1802 switch (VA.getLocInfo()) { 1803 default: llvm_unreachable("Unknown loc info!"); 1804 case CCValAssign::Full: break; 1805 case CCValAssign::BCvt: 1806 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1807 break; 1808 } 1809 1810 if (VA.needsCustom()) { 1811 if (VA.getLocVT() == MVT::v2f64) { 1812 // Extract the first half and return it in two registers. 1813 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1814 DAG.getConstant(0, MVT::i32)); 1815 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1816 DAG.getVTList(MVT::i32, MVT::i32), Half); 1817 1818 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1819 Flag = Chain.getValue(1); 1820 VA = RVLocs[++i]; // skip ahead to next loc 1821 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1822 HalfGPRs.getValue(1), Flag); 1823 Flag = Chain.getValue(1); 1824 VA = RVLocs[++i]; // skip ahead to next loc 1825 1826 // Extract the 2nd half and fall through to handle it as an f64 value. 1827 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1828 DAG.getConstant(1, MVT::i32)); 1829 } 1830 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1831 // available. 1832 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1833 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1834 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1835 Flag = Chain.getValue(1); 1836 VA = RVLocs[++i]; // skip ahead to next loc 1837 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1838 Flag); 1839 } else 1840 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1841 1842 // Guarantee that all emitted copies are 1843 // stuck together, avoiding something bad. 1844 Flag = Chain.getValue(1); 1845 } 1846 1847 SDValue result; 1848 if (Flag.getNode()) 1849 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1850 else // Return Void 1851 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1852 1853 return result; 1854} 1855 1856bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1857 if (N->getNumValues() != 1) 1858 return false; 1859 if (!N->hasNUsesOfValue(1, 0)) 1860 return false; 1861 1862 unsigned NumCopies = 0; 1863 SDNode* Copies[2]; 1864 SDNode *Use = *N->use_begin(); 1865 if (Use->getOpcode() == ISD::CopyToReg) { 1866 Copies[NumCopies++] = Use; 1867 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1868 // f64 returned in a pair of GPRs. 1869 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1870 UI != UE; ++UI) { 1871 if (UI->getOpcode() != ISD::CopyToReg) 1872 return false; 1873 Copies[UI.getUse().getResNo()] = *UI; 1874 ++NumCopies; 1875 } 1876 } else if (Use->getOpcode() == ISD::BITCAST) { 1877 // f32 returned in a single GPR. 1878 if (!Use->hasNUsesOfValue(1, 0)) 1879 return false; 1880 Use = *Use->use_begin(); 1881 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1882 return false; 1883 Copies[NumCopies++] = Use; 1884 } else { 1885 return false; 1886 } 1887 1888 if (NumCopies != 1 && NumCopies != 2) 1889 return false; 1890 1891 bool HasRet = false; 1892 for (unsigned i = 0; i < NumCopies; ++i) { 1893 SDNode *Copy = Copies[i]; 1894 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1895 UI != UE; ++UI) { 1896 if (UI->getOpcode() == ISD::CopyToReg) { 1897 SDNode *Use = *UI; 1898 if (Use == Copies[0] || Use == Copies[1]) 1899 continue; 1900 return false; 1901 } 1902 if (UI->getOpcode() != ARMISD::RET_FLAG) 1903 return false; 1904 HasRet = true; 1905 } 1906 } 1907 1908 return HasRet; 1909} 1910 1911bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1912 if (!EnableARMTailCalls) 1913 return false; 1914 1915 if (!CI->isTailCall()) 1916 return false; 1917 1918 return !Subtarget->isThumb1Only(); 1919} 1920 1921// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1922// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1923// one of the above mentioned nodes. It has to be wrapped because otherwise 1924// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1925// be used to form addressing mode. These wrapped nodes will be selected 1926// into MOVi. 1927static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1928 EVT PtrVT = Op.getValueType(); 1929 // FIXME there is no actual debug info here 1930 DebugLoc dl = Op.getDebugLoc(); 1931 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1932 SDValue Res; 1933 if (CP->isMachineConstantPoolEntry()) 1934 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1935 CP->getAlignment()); 1936 else 1937 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1938 CP->getAlignment()); 1939 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1940} 1941 1942unsigned ARMTargetLowering::getJumpTableEncoding() const { 1943 return MachineJumpTableInfo::EK_Inline; 1944} 1945 1946SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1947 SelectionDAG &DAG) const { 1948 MachineFunction &MF = DAG.getMachineFunction(); 1949 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1950 unsigned ARMPCLabelIndex = 0; 1951 DebugLoc DL = Op.getDebugLoc(); 1952 EVT PtrVT = getPointerTy(); 1953 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1954 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1955 SDValue CPAddr; 1956 if (RelocM == Reloc::Static) { 1957 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1958 } else { 1959 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1960 ARMPCLabelIndex = AFI->createPICLabelUId(); 1961 ARMConstantPoolValue *CPV = 1962 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 1963 ARMCP::CPBlockAddress, PCAdj); 1964 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1965 } 1966 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1967 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1968 MachinePointerInfo::getConstantPool(), 1969 false, false, 0); 1970 if (RelocM == Reloc::Static) 1971 return Result; 1972 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1973 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1974} 1975 1976// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1977SDValue 1978ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1979 SelectionDAG &DAG) const { 1980 DebugLoc dl = GA->getDebugLoc(); 1981 EVT PtrVT = getPointerTy(); 1982 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1983 MachineFunction &MF = DAG.getMachineFunction(); 1984 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1985 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1986 ARMConstantPoolValue *CPV = 1987 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 1988 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1989 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1990 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1991 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1992 MachinePointerInfo::getConstantPool(), 1993 false, false, 0); 1994 SDValue Chain = Argument.getValue(1); 1995 1996 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1997 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1998 1999 // call __tls_get_addr. 2000 ArgListTy Args; 2001 ArgListEntry Entry; 2002 Entry.Node = Argument; 2003 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2004 Args.push_back(Entry); 2005 // FIXME: is there useful debug info available here? 2006 std::pair<SDValue, SDValue> CallResult = 2007 LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()), 2008 false, false, false, false, 2009 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 2010 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2011 return CallResult.first; 2012} 2013 2014// Lower ISD::GlobalTLSAddress using the "initial exec" or 2015// "local exec" model. 2016SDValue 2017ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2018 SelectionDAG &DAG) const { 2019 const GlobalValue *GV = GA->getGlobal(); 2020 DebugLoc dl = GA->getDebugLoc(); 2021 SDValue Offset; 2022 SDValue Chain = DAG.getEntryNode(); 2023 EVT PtrVT = getPointerTy(); 2024 // Get the Thread Pointer 2025 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2026 2027 if (GV->isDeclaration()) { 2028 MachineFunction &MF = DAG.getMachineFunction(); 2029 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2030 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2031 // Initial exec model. 2032 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2033 ARMConstantPoolValue *CPV = 2034 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2035 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2036 true); 2037 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2038 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2039 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2040 MachinePointerInfo::getConstantPool(), 2041 false, false, 0); 2042 Chain = Offset.getValue(1); 2043 2044 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2045 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2046 2047 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2048 MachinePointerInfo::getConstantPool(), 2049 false, false, 0); 2050 } else { 2051 // local exec model 2052 ARMConstantPoolValue *CPV = 2053 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2054 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2055 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2056 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2057 MachinePointerInfo::getConstantPool(), 2058 false, false, 0); 2059 } 2060 2061 // The address of the thread local variable is the add of the thread 2062 // pointer with the offset of the variable. 2063 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2064} 2065 2066SDValue 2067ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2068 // TODO: implement the "local dynamic" model 2069 assert(Subtarget->isTargetELF() && 2070 "TLS not implemented for non-ELF targets"); 2071 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2072 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 2073 // otherwise use the "Local Exec" TLS Model 2074 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 2075 return LowerToTLSGeneralDynamicModel(GA, DAG); 2076 else 2077 return LowerToTLSExecModels(GA, DAG); 2078} 2079 2080SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2081 SelectionDAG &DAG) const { 2082 EVT PtrVT = getPointerTy(); 2083 DebugLoc dl = Op.getDebugLoc(); 2084 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2085 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2086 if (RelocM == Reloc::PIC_) { 2087 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2088 ARMConstantPoolValue *CPV = 2089 ARMConstantPoolConstant::Create(GV, 2090 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2091 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2092 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2093 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2094 CPAddr, 2095 MachinePointerInfo::getConstantPool(), 2096 false, false, 0); 2097 SDValue Chain = Result.getValue(1); 2098 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2099 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2100 if (!UseGOTOFF) 2101 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2102 MachinePointerInfo::getGOT(), false, false, 0); 2103 return Result; 2104 } 2105 2106 // If we have T2 ops, we can materialize the address directly via movt/movw 2107 // pair. This is always cheaper. 2108 if (Subtarget->useMovt()) { 2109 ++NumMovwMovt; 2110 // FIXME: Once remat is capable of dealing with instructions with register 2111 // operands, expand this into two nodes. 2112 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2113 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2114 } else { 2115 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2116 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2117 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2118 MachinePointerInfo::getConstantPool(), 2119 false, false, 0); 2120 } 2121} 2122 2123SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2124 SelectionDAG &DAG) const { 2125 EVT PtrVT = getPointerTy(); 2126 DebugLoc dl = Op.getDebugLoc(); 2127 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2128 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2129 MachineFunction &MF = DAG.getMachineFunction(); 2130 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2131 2132 // FIXME: Enable this for static codegen when tool issues are fixed. 2133 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2134 ++NumMovwMovt; 2135 // FIXME: Once remat is capable of dealing with instructions with register 2136 // operands, expand this into two nodes. 2137 if (RelocM == Reloc::Static) 2138 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2139 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2140 2141 unsigned Wrapper = (RelocM == Reloc::PIC_) 2142 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2143 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2144 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2145 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2146 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2147 MachinePointerInfo::getGOT(), false, false, 0); 2148 return Result; 2149 } 2150 2151 unsigned ARMPCLabelIndex = 0; 2152 SDValue CPAddr; 2153 if (RelocM == Reloc::Static) { 2154 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2155 } else { 2156 ARMPCLabelIndex = AFI->createPICLabelUId(); 2157 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2158 ARMConstantPoolValue *CPV = 2159 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2160 PCAdj); 2161 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2162 } 2163 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2164 2165 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2166 MachinePointerInfo::getConstantPool(), 2167 false, false, 0); 2168 SDValue Chain = Result.getValue(1); 2169 2170 if (RelocM == Reloc::PIC_) { 2171 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2172 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2173 } 2174 2175 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2176 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2177 false, false, 0); 2178 2179 return Result; 2180} 2181 2182SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2183 SelectionDAG &DAG) const { 2184 assert(Subtarget->isTargetELF() && 2185 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2186 MachineFunction &MF = DAG.getMachineFunction(); 2187 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2188 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2189 EVT PtrVT = getPointerTy(); 2190 DebugLoc dl = Op.getDebugLoc(); 2191 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2192 ARMConstantPoolValue *CPV = 2193 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2194 ARMPCLabelIndex, PCAdj); 2195 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2196 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2197 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2198 MachinePointerInfo::getConstantPool(), 2199 false, false, 0); 2200 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2201 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2202} 2203 2204SDValue 2205ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2206 const { 2207 DebugLoc dl = Op.getDebugLoc(); 2208 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2209 Op.getOperand(0), Op.getOperand(1)); 2210} 2211 2212SDValue 2213ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2214 DebugLoc dl = Op.getDebugLoc(); 2215 SDValue Val = DAG.getConstant(0, MVT::i32); 2216 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2217 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2218 Op.getOperand(1), Val); 2219} 2220 2221SDValue 2222ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2223 DebugLoc dl = Op.getDebugLoc(); 2224 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2225 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2226} 2227 2228SDValue 2229ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2230 const ARMSubtarget *Subtarget) const { 2231 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2232 DebugLoc dl = Op.getDebugLoc(); 2233 switch (IntNo) { 2234 default: return SDValue(); // Don't custom lower most intrinsics. 2235 case Intrinsic::arm_thread_pointer: { 2236 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2237 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2238 } 2239 case Intrinsic::eh_sjlj_lsda: { 2240 MachineFunction &MF = DAG.getMachineFunction(); 2241 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2242 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2243 EVT PtrVT = getPointerTy(); 2244 DebugLoc dl = Op.getDebugLoc(); 2245 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2246 SDValue CPAddr; 2247 unsigned PCAdj = (RelocM != Reloc::PIC_) 2248 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2249 ARMConstantPoolValue *CPV = 2250 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2251 ARMCP::CPLSDA, PCAdj); 2252 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2253 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2254 SDValue Result = 2255 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2256 MachinePointerInfo::getConstantPool(), 2257 false, false, 0); 2258 2259 if (RelocM == Reloc::PIC_) { 2260 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2261 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2262 } 2263 return Result; 2264 } 2265 case Intrinsic::arm_neon_vmulls: 2266 case Intrinsic::arm_neon_vmullu: { 2267 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2268 ? ARMISD::VMULLs : ARMISD::VMULLu; 2269 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2270 Op.getOperand(1), Op.getOperand(2)); 2271 } 2272 } 2273} 2274 2275static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2276 const ARMSubtarget *Subtarget) { 2277 DebugLoc dl = Op.getDebugLoc(); 2278 if (!Subtarget->hasDataBarrier()) { 2279 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2280 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2281 // here. 2282 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2283 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2284 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2285 DAG.getConstant(0, MVT::i32)); 2286 } 2287 2288 SDValue Op5 = Op.getOperand(5); 2289 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2290 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2291 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2292 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2293 2294 ARM_MB::MemBOpt DMBOpt; 2295 if (isDeviceBarrier) 2296 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2297 else 2298 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2299 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2300 DAG.getConstant(DMBOpt, MVT::i32)); 2301} 2302 2303 2304static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2305 const ARMSubtarget *Subtarget) { 2306 // FIXME: handle "fence singlethread" more efficiently. 2307 DebugLoc dl = Op.getDebugLoc(); 2308 if (!Subtarget->hasDataBarrier()) { 2309 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2310 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2311 // here. 2312 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2313 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2314 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2315 DAG.getConstant(0, MVT::i32)); 2316 } 2317 2318 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2319 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2320} 2321 2322static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2323 const ARMSubtarget *Subtarget) { 2324 // ARM pre v5TE and Thumb1 does not have preload instructions. 2325 if (!(Subtarget->isThumb2() || 2326 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2327 // Just preserve the chain. 2328 return Op.getOperand(0); 2329 2330 DebugLoc dl = Op.getDebugLoc(); 2331 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2332 if (!isRead && 2333 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2334 // ARMv7 with MP extension has PLDW. 2335 return Op.getOperand(0); 2336 2337 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2338 if (Subtarget->isThumb()) { 2339 // Invert the bits. 2340 isRead = ~isRead & 1; 2341 isData = ~isData & 1; 2342 } 2343 2344 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2345 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2346 DAG.getConstant(isData, MVT::i32)); 2347} 2348 2349static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2350 MachineFunction &MF = DAG.getMachineFunction(); 2351 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2352 2353 // vastart just stores the address of the VarArgsFrameIndex slot into the 2354 // memory location argument. 2355 DebugLoc dl = Op.getDebugLoc(); 2356 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2357 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2358 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2359 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2360 MachinePointerInfo(SV), false, false, 0); 2361} 2362 2363SDValue 2364ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2365 SDValue &Root, SelectionDAG &DAG, 2366 DebugLoc dl) const { 2367 MachineFunction &MF = DAG.getMachineFunction(); 2368 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2369 2370 TargetRegisterClass *RC; 2371 if (AFI->isThumb1OnlyFunction()) 2372 RC = ARM::tGPRRegisterClass; 2373 else 2374 RC = ARM::GPRRegisterClass; 2375 2376 // Transform the arguments stored in physical registers into virtual ones. 2377 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2378 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2379 2380 SDValue ArgValue2; 2381 if (NextVA.isMemLoc()) { 2382 MachineFrameInfo *MFI = MF.getFrameInfo(); 2383 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2384 2385 // Create load node to retrieve arguments from the stack. 2386 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2387 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2388 MachinePointerInfo::getFixedStack(FI), 2389 false, false, 0); 2390 } else { 2391 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2392 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2393 } 2394 2395 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2396} 2397 2398void 2399ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2400 unsigned &VARegSize, unsigned &VARegSaveSize) 2401 const { 2402 unsigned NumGPRs; 2403 if (CCInfo.isFirstByValRegValid()) 2404 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2405 else { 2406 unsigned int firstUnalloced; 2407 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2408 sizeof(GPRArgRegs) / 2409 sizeof(GPRArgRegs[0])); 2410 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2411 } 2412 2413 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2414 VARegSize = NumGPRs * 4; 2415 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2416} 2417 2418// The remaining GPRs hold either the beginning of variable-argument 2419// data, or the beginning of an aggregate passed by value (usuall 2420// byval). Either way, we allocate stack slots adjacent to the data 2421// provided by our caller, and store the unallocated registers there. 2422// If this is a variadic function, the va_list pointer will begin with 2423// these values; otherwise, this reassembles a (byval) structure that 2424// was split between registers and memory. 2425void 2426ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2427 DebugLoc dl, SDValue &Chain, 2428 unsigned ArgOffset) const { 2429 MachineFunction &MF = DAG.getMachineFunction(); 2430 MachineFrameInfo *MFI = MF.getFrameInfo(); 2431 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2432 unsigned firstRegToSaveIndex; 2433 if (CCInfo.isFirstByValRegValid()) 2434 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2435 else { 2436 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2437 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2438 } 2439 2440 unsigned VARegSize, VARegSaveSize; 2441 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2442 if (VARegSaveSize) { 2443 // If this function is vararg, store any remaining integer argument regs 2444 // to their spots on the stack so that they may be loaded by deferencing 2445 // the result of va_next. 2446 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2447 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2448 ArgOffset + VARegSaveSize 2449 - VARegSize, 2450 false)); 2451 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2452 getPointerTy()); 2453 2454 SmallVector<SDValue, 4> MemOps; 2455 for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { 2456 TargetRegisterClass *RC; 2457 if (AFI->isThumb1OnlyFunction()) 2458 RC = ARM::tGPRRegisterClass; 2459 else 2460 RC = ARM::GPRRegisterClass; 2461 2462 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2463 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2464 SDValue Store = 2465 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2466 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2467 false, false, 0); 2468 MemOps.push_back(Store); 2469 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2470 DAG.getConstant(4, getPointerTy())); 2471 } 2472 if (!MemOps.empty()) 2473 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2474 &MemOps[0], MemOps.size()); 2475 } else 2476 // This will point to the next argument passed via stack. 2477 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2478} 2479 2480SDValue 2481ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2482 CallingConv::ID CallConv, bool isVarArg, 2483 const SmallVectorImpl<ISD::InputArg> 2484 &Ins, 2485 DebugLoc dl, SelectionDAG &DAG, 2486 SmallVectorImpl<SDValue> &InVals) 2487 const { 2488 MachineFunction &MF = DAG.getMachineFunction(); 2489 MachineFrameInfo *MFI = MF.getFrameInfo(); 2490 2491 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2492 2493 // Assign locations to all of the incoming arguments. 2494 SmallVector<CCValAssign, 16> ArgLocs; 2495 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2496 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2497 CCInfo.AnalyzeFormalArguments(Ins, 2498 CCAssignFnForNode(CallConv, /* Return*/ false, 2499 isVarArg)); 2500 2501 SmallVector<SDValue, 16> ArgValues; 2502 int lastInsIndex = -1; 2503 2504 SDValue ArgValue; 2505 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2506 CCValAssign &VA = ArgLocs[i]; 2507 2508 // Arguments stored in registers. 2509 if (VA.isRegLoc()) { 2510 EVT RegVT = VA.getLocVT(); 2511 2512 if (VA.needsCustom()) { 2513 // f64 and vector types are split up into multiple registers or 2514 // combinations of registers and stack slots. 2515 if (VA.getLocVT() == MVT::v2f64) { 2516 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2517 Chain, DAG, dl); 2518 VA = ArgLocs[++i]; // skip ahead to next loc 2519 SDValue ArgValue2; 2520 if (VA.isMemLoc()) { 2521 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2522 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2523 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2524 MachinePointerInfo::getFixedStack(FI), 2525 false, false, 0); 2526 } else { 2527 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2528 Chain, DAG, dl); 2529 } 2530 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2531 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2532 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2533 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2534 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2535 } else 2536 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2537 2538 } else { 2539 TargetRegisterClass *RC; 2540 2541 if (RegVT == MVT::f32) 2542 RC = ARM::SPRRegisterClass; 2543 else if (RegVT == MVT::f64) 2544 RC = ARM::DPRRegisterClass; 2545 else if (RegVT == MVT::v2f64) 2546 RC = ARM::QPRRegisterClass; 2547 else if (RegVT == MVT::i32) 2548 RC = (AFI->isThumb1OnlyFunction() ? 2549 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2550 else 2551 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2552 2553 // Transform the arguments in physical registers into virtual ones. 2554 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2555 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2556 } 2557 2558 // If this is an 8 or 16-bit value, it is really passed promoted 2559 // to 32 bits. Insert an assert[sz]ext to capture this, then 2560 // truncate to the right size. 2561 switch (VA.getLocInfo()) { 2562 default: llvm_unreachable("Unknown loc info!"); 2563 case CCValAssign::Full: break; 2564 case CCValAssign::BCvt: 2565 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2566 break; 2567 case CCValAssign::SExt: 2568 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2569 DAG.getValueType(VA.getValVT())); 2570 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2571 break; 2572 case CCValAssign::ZExt: 2573 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2574 DAG.getValueType(VA.getValVT())); 2575 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2576 break; 2577 } 2578 2579 InVals.push_back(ArgValue); 2580 2581 } else { // VA.isRegLoc() 2582 2583 // sanity check 2584 assert(VA.isMemLoc()); 2585 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2586 2587 int index = ArgLocs[i].getValNo(); 2588 2589 // Some Ins[] entries become multiple ArgLoc[] entries. 2590 // Process them only once. 2591 if (index != lastInsIndex) 2592 { 2593 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2594 // FIXME: For now, all byval parameter objects are marked mutable. 2595 // This can be changed with more analysis. 2596 // In case of tail call optimization mark all arguments mutable. 2597 // Since they could be overwritten by lowering of arguments in case of 2598 // a tail call. 2599 if (Flags.isByVal()) { 2600 unsigned VARegSize, VARegSaveSize; 2601 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2602 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); 2603 unsigned Bytes = Flags.getByValSize() - VARegSize; 2604 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2605 int FI = MFI->CreateFixedObject(Bytes, 2606 VA.getLocMemOffset(), false); 2607 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2608 } else { 2609 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2610 VA.getLocMemOffset(), true); 2611 2612 // Create load nodes to retrieve arguments from the stack. 2613 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2614 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2615 MachinePointerInfo::getFixedStack(FI), 2616 false, false, 0)); 2617 } 2618 lastInsIndex = index; 2619 } 2620 } 2621 } 2622 2623 // varargs 2624 if (isVarArg) 2625 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); 2626 2627 return Chain; 2628} 2629 2630/// isFloatingPointZero - Return true if this is +0.0. 2631static bool isFloatingPointZero(SDValue Op) { 2632 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2633 return CFP->getValueAPF().isPosZero(); 2634 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2635 // Maybe this has already been legalized into the constant pool? 2636 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2637 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2638 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2639 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2640 return CFP->getValueAPF().isPosZero(); 2641 } 2642 } 2643 return false; 2644} 2645 2646/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2647/// the given operands. 2648SDValue 2649ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2650 SDValue &ARMcc, SelectionDAG &DAG, 2651 DebugLoc dl) const { 2652 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2653 unsigned C = RHSC->getZExtValue(); 2654 if (!isLegalICmpImmediate(C)) { 2655 // Constant does not fit, try adjusting it by one? 2656 switch (CC) { 2657 default: break; 2658 case ISD::SETLT: 2659 case ISD::SETGE: 2660 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2661 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2662 RHS = DAG.getConstant(C-1, MVT::i32); 2663 } 2664 break; 2665 case ISD::SETULT: 2666 case ISD::SETUGE: 2667 if (C != 0 && isLegalICmpImmediate(C-1)) { 2668 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2669 RHS = DAG.getConstant(C-1, MVT::i32); 2670 } 2671 break; 2672 case ISD::SETLE: 2673 case ISD::SETGT: 2674 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2675 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2676 RHS = DAG.getConstant(C+1, MVT::i32); 2677 } 2678 break; 2679 case ISD::SETULE: 2680 case ISD::SETUGT: 2681 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2682 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2683 RHS = DAG.getConstant(C+1, MVT::i32); 2684 } 2685 break; 2686 } 2687 } 2688 } 2689 2690 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2691 ARMISD::NodeType CompareType; 2692 switch (CondCode) { 2693 default: 2694 CompareType = ARMISD::CMP; 2695 break; 2696 case ARMCC::EQ: 2697 case ARMCC::NE: 2698 // Uses only Z Flag 2699 CompareType = ARMISD::CMPZ; 2700 break; 2701 } 2702 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2703 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2704} 2705 2706/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2707SDValue 2708ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2709 DebugLoc dl) const { 2710 SDValue Cmp; 2711 if (!isFloatingPointZero(RHS)) 2712 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2713 else 2714 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2715 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2716} 2717 2718/// duplicateCmp - Glue values can have only one use, so this function 2719/// duplicates a comparison node. 2720SDValue 2721ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2722 unsigned Opc = Cmp.getOpcode(); 2723 DebugLoc DL = Cmp.getDebugLoc(); 2724 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2725 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2726 2727 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2728 Cmp = Cmp.getOperand(0); 2729 Opc = Cmp.getOpcode(); 2730 if (Opc == ARMISD::CMPFP) 2731 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2732 else { 2733 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2734 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2735 } 2736 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2737} 2738 2739SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2740 SDValue Cond = Op.getOperand(0); 2741 SDValue SelectTrue = Op.getOperand(1); 2742 SDValue SelectFalse = Op.getOperand(2); 2743 DebugLoc dl = Op.getDebugLoc(); 2744 2745 // Convert: 2746 // 2747 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2748 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2749 // 2750 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2751 const ConstantSDNode *CMOVTrue = 2752 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2753 const ConstantSDNode *CMOVFalse = 2754 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2755 2756 if (CMOVTrue && CMOVFalse) { 2757 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2758 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2759 2760 SDValue True; 2761 SDValue False; 2762 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2763 True = SelectTrue; 2764 False = SelectFalse; 2765 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2766 True = SelectFalse; 2767 False = SelectTrue; 2768 } 2769 2770 if (True.getNode() && False.getNode()) { 2771 EVT VT = Op.getValueType(); 2772 SDValue ARMcc = Cond.getOperand(2); 2773 SDValue CCR = Cond.getOperand(3); 2774 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2775 assert(True.getValueType() == VT); 2776 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2777 } 2778 } 2779 } 2780 2781 return DAG.getSelectCC(dl, Cond, 2782 DAG.getConstant(0, Cond.getValueType()), 2783 SelectTrue, SelectFalse, ISD::SETNE); 2784} 2785 2786SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2787 EVT VT = Op.getValueType(); 2788 SDValue LHS = Op.getOperand(0); 2789 SDValue RHS = Op.getOperand(1); 2790 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2791 SDValue TrueVal = Op.getOperand(2); 2792 SDValue FalseVal = Op.getOperand(3); 2793 DebugLoc dl = Op.getDebugLoc(); 2794 2795 if (LHS.getValueType() == MVT::i32) { 2796 SDValue ARMcc; 2797 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2798 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2799 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2800 } 2801 2802 ARMCC::CondCodes CondCode, CondCode2; 2803 FPCCToARMCC(CC, CondCode, CondCode2); 2804 2805 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2806 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2807 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2808 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2809 ARMcc, CCR, Cmp); 2810 if (CondCode2 != ARMCC::AL) { 2811 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2812 // FIXME: Needs another CMP because flag can have but one use. 2813 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2814 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2815 Result, TrueVal, ARMcc2, CCR, Cmp2); 2816 } 2817 return Result; 2818} 2819 2820/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2821/// to morph to an integer compare sequence. 2822static bool canChangeToInt(SDValue Op, bool &SeenZero, 2823 const ARMSubtarget *Subtarget) { 2824 SDNode *N = Op.getNode(); 2825 if (!N->hasOneUse()) 2826 // Otherwise it requires moving the value from fp to integer registers. 2827 return false; 2828 if (!N->getNumValues()) 2829 return false; 2830 EVT VT = Op.getValueType(); 2831 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2832 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2833 // vmrs are very slow, e.g. cortex-a8. 2834 return false; 2835 2836 if (isFloatingPointZero(Op)) { 2837 SeenZero = true; 2838 return true; 2839 } 2840 return ISD::isNormalLoad(N); 2841} 2842 2843static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2844 if (isFloatingPointZero(Op)) 2845 return DAG.getConstant(0, MVT::i32); 2846 2847 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2848 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2849 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2850 Ld->isVolatile(), Ld->isNonTemporal(), 2851 Ld->getAlignment()); 2852 2853 llvm_unreachable("Unknown VFP cmp argument!"); 2854} 2855 2856static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2857 SDValue &RetVal1, SDValue &RetVal2) { 2858 if (isFloatingPointZero(Op)) { 2859 RetVal1 = DAG.getConstant(0, MVT::i32); 2860 RetVal2 = DAG.getConstant(0, MVT::i32); 2861 return; 2862 } 2863 2864 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2865 SDValue Ptr = Ld->getBasePtr(); 2866 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2867 Ld->getChain(), Ptr, 2868 Ld->getPointerInfo(), 2869 Ld->isVolatile(), Ld->isNonTemporal(), 2870 Ld->getAlignment()); 2871 2872 EVT PtrType = Ptr.getValueType(); 2873 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2874 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2875 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2876 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2877 Ld->getChain(), NewPtr, 2878 Ld->getPointerInfo().getWithOffset(4), 2879 Ld->isVolatile(), Ld->isNonTemporal(), 2880 NewAlign); 2881 return; 2882 } 2883 2884 llvm_unreachable("Unknown VFP cmp argument!"); 2885} 2886 2887/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2888/// f32 and even f64 comparisons to integer ones. 2889SDValue 2890ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2891 SDValue Chain = Op.getOperand(0); 2892 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2893 SDValue LHS = Op.getOperand(2); 2894 SDValue RHS = Op.getOperand(3); 2895 SDValue Dest = Op.getOperand(4); 2896 DebugLoc dl = Op.getDebugLoc(); 2897 2898 bool SeenZero = false; 2899 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2900 canChangeToInt(RHS, SeenZero, Subtarget) && 2901 // If one of the operand is zero, it's safe to ignore the NaN case since 2902 // we only care about equality comparisons. 2903 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2904 // If unsafe fp math optimization is enabled and there are no other uses of 2905 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2906 // to an integer comparison. 2907 if (CC == ISD::SETOEQ) 2908 CC = ISD::SETEQ; 2909 else if (CC == ISD::SETUNE) 2910 CC = ISD::SETNE; 2911 2912 SDValue ARMcc; 2913 if (LHS.getValueType() == MVT::f32) { 2914 LHS = bitcastf32Toi32(LHS, DAG); 2915 RHS = bitcastf32Toi32(RHS, DAG); 2916 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2917 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2918 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2919 Chain, Dest, ARMcc, CCR, Cmp); 2920 } 2921 2922 SDValue LHS1, LHS2; 2923 SDValue RHS1, RHS2; 2924 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2925 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2926 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2927 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2928 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2929 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2930 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2931 } 2932 2933 return SDValue(); 2934} 2935 2936SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2937 SDValue Chain = Op.getOperand(0); 2938 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2939 SDValue LHS = Op.getOperand(2); 2940 SDValue RHS = Op.getOperand(3); 2941 SDValue Dest = Op.getOperand(4); 2942 DebugLoc dl = Op.getDebugLoc(); 2943 2944 if (LHS.getValueType() == MVT::i32) { 2945 SDValue ARMcc; 2946 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2947 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2948 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2949 Chain, Dest, ARMcc, CCR, Cmp); 2950 } 2951 2952 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2953 2954 if (UnsafeFPMath && 2955 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2956 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2957 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2958 if (Result.getNode()) 2959 return Result; 2960 } 2961 2962 ARMCC::CondCodes CondCode, CondCode2; 2963 FPCCToARMCC(CC, CondCode, CondCode2); 2964 2965 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2966 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2967 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2968 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2969 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2970 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2971 if (CondCode2 != ARMCC::AL) { 2972 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2973 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2974 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2975 } 2976 return Res; 2977} 2978 2979SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2980 SDValue Chain = Op.getOperand(0); 2981 SDValue Table = Op.getOperand(1); 2982 SDValue Index = Op.getOperand(2); 2983 DebugLoc dl = Op.getDebugLoc(); 2984 2985 EVT PTy = getPointerTy(); 2986 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2987 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2988 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2989 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2990 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2991 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2992 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2993 if (Subtarget->isThumb2()) { 2994 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2995 // which does another jump to the destination. This also makes it easier 2996 // to translate it to TBB / TBH later. 2997 // FIXME: This might not work if the function is extremely large. 2998 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2999 Addr, Op.getOperand(2), JTI, UId); 3000 } 3001 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3002 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3003 MachinePointerInfo::getJumpTable(), 3004 false, false, 0); 3005 Chain = Addr.getValue(1); 3006 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3007 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3008 } else { 3009 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3010 MachinePointerInfo::getJumpTable(), false, false, 0); 3011 Chain = Addr.getValue(1); 3012 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3013 } 3014} 3015 3016static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3017 DebugLoc dl = Op.getDebugLoc(); 3018 unsigned Opc; 3019 3020 switch (Op.getOpcode()) { 3021 default: 3022 assert(0 && "Invalid opcode!"); 3023 case ISD::FP_TO_SINT: 3024 Opc = ARMISD::FTOSI; 3025 break; 3026 case ISD::FP_TO_UINT: 3027 Opc = ARMISD::FTOUI; 3028 break; 3029 } 3030 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3031 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3032} 3033 3034static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3035 EVT VT = Op.getValueType(); 3036 DebugLoc dl = Op.getDebugLoc(); 3037 3038 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3039 "Invalid type for custom lowering!"); 3040 if (VT != MVT::v4f32) 3041 return DAG.UnrollVectorOp(Op.getNode()); 3042 3043 unsigned CastOpc; 3044 unsigned Opc; 3045 switch (Op.getOpcode()) { 3046 default: 3047 assert(0 && "Invalid opcode!"); 3048 case ISD::SINT_TO_FP: 3049 CastOpc = ISD::SIGN_EXTEND; 3050 Opc = ISD::SINT_TO_FP; 3051 break; 3052 case ISD::UINT_TO_FP: 3053 CastOpc = ISD::ZERO_EXTEND; 3054 Opc = ISD::UINT_TO_FP; 3055 break; 3056 } 3057 3058 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3059 return DAG.getNode(Opc, dl, VT, Op); 3060} 3061 3062static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3063 EVT VT = Op.getValueType(); 3064 if (VT.isVector()) 3065 return LowerVectorINT_TO_FP(Op, DAG); 3066 3067 DebugLoc dl = Op.getDebugLoc(); 3068 unsigned Opc; 3069 3070 switch (Op.getOpcode()) { 3071 default: 3072 assert(0 && "Invalid opcode!"); 3073 case ISD::SINT_TO_FP: 3074 Opc = ARMISD::SITOF; 3075 break; 3076 case ISD::UINT_TO_FP: 3077 Opc = ARMISD::UITOF; 3078 break; 3079 } 3080 3081 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3082 return DAG.getNode(Opc, dl, VT, Op); 3083} 3084 3085SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3086 // Implement fcopysign with a fabs and a conditional fneg. 3087 SDValue Tmp0 = Op.getOperand(0); 3088 SDValue Tmp1 = Op.getOperand(1); 3089 DebugLoc dl = Op.getDebugLoc(); 3090 EVT VT = Op.getValueType(); 3091 EVT SrcVT = Tmp1.getValueType(); 3092 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3093 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3094 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3095 3096 if (UseNEON) { 3097 // Use VBSL to copy the sign bit. 3098 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3099 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3100 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3101 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3102 if (VT == MVT::f64) 3103 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3104 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3105 DAG.getConstant(32, MVT::i32)); 3106 else /*if (VT == MVT::f32)*/ 3107 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3108 if (SrcVT == MVT::f32) { 3109 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3110 if (VT == MVT::f64) 3111 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3112 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3113 DAG.getConstant(32, MVT::i32)); 3114 } else if (VT == MVT::f32) 3115 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3116 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3117 DAG.getConstant(32, MVT::i32)); 3118 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3119 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3120 3121 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3122 MVT::i32); 3123 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3124 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3125 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3126 3127 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3128 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3129 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3130 if (VT == MVT::f32) { 3131 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3132 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3133 DAG.getConstant(0, MVT::i32)); 3134 } else { 3135 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3136 } 3137 3138 return Res; 3139 } 3140 3141 // Bitcast operand 1 to i32. 3142 if (SrcVT == MVT::f64) 3143 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3144 &Tmp1, 1).getValue(1); 3145 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3146 3147 // Or in the signbit with integer operations. 3148 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3149 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3150 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3151 if (VT == MVT::f32) { 3152 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3153 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3154 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3155 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3156 } 3157 3158 // f64: Or the high part with signbit and then combine two parts. 3159 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3160 &Tmp0, 1); 3161 SDValue Lo = Tmp0.getValue(0); 3162 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3163 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3164 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3165} 3166 3167SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3168 MachineFunction &MF = DAG.getMachineFunction(); 3169 MachineFrameInfo *MFI = MF.getFrameInfo(); 3170 MFI->setReturnAddressIsTaken(true); 3171 3172 EVT VT = Op.getValueType(); 3173 DebugLoc dl = Op.getDebugLoc(); 3174 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3175 if (Depth) { 3176 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3177 SDValue Offset = DAG.getConstant(4, MVT::i32); 3178 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3179 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3180 MachinePointerInfo(), false, false, 0); 3181 } 3182 3183 // Return LR, which contains the return address. Mark it an implicit live-in. 3184 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3185 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3186} 3187 3188SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3189 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3190 MFI->setFrameAddressIsTaken(true); 3191 3192 EVT VT = Op.getValueType(); 3193 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3194 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3195 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3196 ? ARM::R7 : ARM::R11; 3197 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3198 while (Depth--) 3199 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3200 MachinePointerInfo(), 3201 false, false, 0); 3202 return FrameAddr; 3203} 3204 3205/// ExpandBITCAST - If the target supports VFP, this function is called to 3206/// expand a bit convert where either the source or destination type is i64 to 3207/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3208/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3209/// vectors), since the legalizer won't know what to do with that. 3210static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3211 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3212 DebugLoc dl = N->getDebugLoc(); 3213 SDValue Op = N->getOperand(0); 3214 3215 // This function is only supposed to be called for i64 types, either as the 3216 // source or destination of the bit convert. 3217 EVT SrcVT = Op.getValueType(); 3218 EVT DstVT = N->getValueType(0); 3219 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3220 "ExpandBITCAST called for non-i64 type"); 3221 3222 // Turn i64->f64 into VMOVDRR. 3223 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3224 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3225 DAG.getConstant(0, MVT::i32)); 3226 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3227 DAG.getConstant(1, MVT::i32)); 3228 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3229 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3230 } 3231 3232 // Turn f64->i64 into VMOVRRD. 3233 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3234 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3235 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3236 // Merge the pieces into a single i64 value. 3237 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3238 } 3239 3240 return SDValue(); 3241} 3242 3243/// getZeroVector - Returns a vector of specified type with all zero elements. 3244/// Zero vectors are used to represent vector negation and in those cases 3245/// will be implemented with the NEON VNEG instruction. However, VNEG does 3246/// not support i64 elements, so sometimes the zero vectors will need to be 3247/// explicitly constructed. Regardless, use a canonical VMOV to create the 3248/// zero vector. 3249static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3250 assert(VT.isVector() && "Expected a vector type"); 3251 // The canonical modified immediate encoding of a zero vector is....0! 3252 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3253 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3254 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3255 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3256} 3257 3258/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3259/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3260SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3261 SelectionDAG &DAG) const { 3262 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3263 EVT VT = Op.getValueType(); 3264 unsigned VTBits = VT.getSizeInBits(); 3265 DebugLoc dl = Op.getDebugLoc(); 3266 SDValue ShOpLo = Op.getOperand(0); 3267 SDValue ShOpHi = Op.getOperand(1); 3268 SDValue ShAmt = Op.getOperand(2); 3269 SDValue ARMcc; 3270 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3271 3272 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3273 3274 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3275 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3276 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3277 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3278 DAG.getConstant(VTBits, MVT::i32)); 3279 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3280 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3281 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3282 3283 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3284 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3285 ARMcc, DAG, dl); 3286 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3287 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3288 CCR, Cmp); 3289 3290 SDValue Ops[2] = { Lo, Hi }; 3291 return DAG.getMergeValues(Ops, 2, dl); 3292} 3293 3294/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3295/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3296SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3297 SelectionDAG &DAG) const { 3298 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3299 EVT VT = Op.getValueType(); 3300 unsigned VTBits = VT.getSizeInBits(); 3301 DebugLoc dl = Op.getDebugLoc(); 3302 SDValue ShOpLo = Op.getOperand(0); 3303 SDValue ShOpHi = Op.getOperand(1); 3304 SDValue ShAmt = Op.getOperand(2); 3305 SDValue ARMcc; 3306 3307 assert(Op.getOpcode() == ISD::SHL_PARTS); 3308 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3309 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3310 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3311 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3312 DAG.getConstant(VTBits, MVT::i32)); 3313 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3314 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3315 3316 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3317 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3318 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3319 ARMcc, DAG, dl); 3320 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3321 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3322 CCR, Cmp); 3323 3324 SDValue Ops[2] = { Lo, Hi }; 3325 return DAG.getMergeValues(Ops, 2, dl); 3326} 3327 3328SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3329 SelectionDAG &DAG) const { 3330 // The rounding mode is in bits 23:22 of the FPSCR. 3331 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3332 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3333 // so that the shift + and get folded into a bitfield extract. 3334 DebugLoc dl = Op.getDebugLoc(); 3335 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3336 DAG.getConstant(Intrinsic::arm_get_fpscr, 3337 MVT::i32)); 3338 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3339 DAG.getConstant(1U << 22, MVT::i32)); 3340 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3341 DAG.getConstant(22, MVT::i32)); 3342 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3343 DAG.getConstant(3, MVT::i32)); 3344} 3345 3346static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3347 const ARMSubtarget *ST) { 3348 EVT VT = N->getValueType(0); 3349 DebugLoc dl = N->getDebugLoc(); 3350 3351 if (!ST->hasV6T2Ops()) 3352 return SDValue(); 3353 3354 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3355 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3356} 3357 3358static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3359 const ARMSubtarget *ST) { 3360 EVT VT = N->getValueType(0); 3361 DebugLoc dl = N->getDebugLoc(); 3362 3363 if (!VT.isVector()) 3364 return SDValue(); 3365 3366 // Lower vector shifts on NEON to use VSHL. 3367 assert(ST->hasNEON() && "unexpected vector shift"); 3368 3369 // Left shifts translate directly to the vshiftu intrinsic. 3370 if (N->getOpcode() == ISD::SHL) 3371 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3372 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3373 N->getOperand(0), N->getOperand(1)); 3374 3375 assert((N->getOpcode() == ISD::SRA || 3376 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3377 3378 // NEON uses the same intrinsics for both left and right shifts. For 3379 // right shifts, the shift amounts are negative, so negate the vector of 3380 // shift amounts. 3381 EVT ShiftVT = N->getOperand(1).getValueType(); 3382 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3383 getZeroVector(ShiftVT, DAG, dl), 3384 N->getOperand(1)); 3385 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3386 Intrinsic::arm_neon_vshifts : 3387 Intrinsic::arm_neon_vshiftu); 3388 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3389 DAG.getConstant(vshiftInt, MVT::i32), 3390 N->getOperand(0), NegatedCount); 3391} 3392 3393static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3394 const ARMSubtarget *ST) { 3395 EVT VT = N->getValueType(0); 3396 DebugLoc dl = N->getDebugLoc(); 3397 3398 // We can get here for a node like i32 = ISD::SHL i32, i64 3399 if (VT != MVT::i64) 3400 return SDValue(); 3401 3402 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3403 "Unknown shift to lower!"); 3404 3405 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3406 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3407 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3408 return SDValue(); 3409 3410 // If we are in thumb mode, we don't have RRX. 3411 if (ST->isThumb1Only()) return SDValue(); 3412 3413 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3414 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3415 DAG.getConstant(0, MVT::i32)); 3416 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3417 DAG.getConstant(1, MVT::i32)); 3418 3419 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3420 // captures the result into a carry flag. 3421 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3422 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3423 3424 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3425 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3426 3427 // Merge the pieces into a single i64 value. 3428 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3429} 3430 3431static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3432 SDValue TmpOp0, TmpOp1; 3433 bool Invert = false; 3434 bool Swap = false; 3435 unsigned Opc = 0; 3436 3437 SDValue Op0 = Op.getOperand(0); 3438 SDValue Op1 = Op.getOperand(1); 3439 SDValue CC = Op.getOperand(2); 3440 EVT VT = Op.getValueType(); 3441 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3442 DebugLoc dl = Op.getDebugLoc(); 3443 3444 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3445 switch (SetCCOpcode) { 3446 default: llvm_unreachable("Illegal FP comparison"); break; 3447 case ISD::SETUNE: 3448 case ISD::SETNE: Invert = true; // Fallthrough 3449 case ISD::SETOEQ: 3450 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3451 case ISD::SETOLT: 3452 case ISD::SETLT: Swap = true; // Fallthrough 3453 case ISD::SETOGT: 3454 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3455 case ISD::SETOLE: 3456 case ISD::SETLE: Swap = true; // Fallthrough 3457 case ISD::SETOGE: 3458 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3459 case ISD::SETUGE: Swap = true; // Fallthrough 3460 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3461 case ISD::SETUGT: Swap = true; // Fallthrough 3462 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3463 case ISD::SETUEQ: Invert = true; // Fallthrough 3464 case ISD::SETONE: 3465 // Expand this to (OLT | OGT). 3466 TmpOp0 = Op0; 3467 TmpOp1 = Op1; 3468 Opc = ISD::OR; 3469 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3470 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3471 break; 3472 case ISD::SETUO: Invert = true; // Fallthrough 3473 case ISD::SETO: 3474 // Expand this to (OLT | OGE). 3475 TmpOp0 = Op0; 3476 TmpOp1 = Op1; 3477 Opc = ISD::OR; 3478 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3479 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3480 break; 3481 } 3482 } else { 3483 // Integer comparisons. 3484 switch (SetCCOpcode) { 3485 default: llvm_unreachable("Illegal integer comparison"); break; 3486 case ISD::SETNE: Invert = true; 3487 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3488 case ISD::SETLT: Swap = true; 3489 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3490 case ISD::SETLE: Swap = true; 3491 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3492 case ISD::SETULT: Swap = true; 3493 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3494 case ISD::SETULE: Swap = true; 3495 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3496 } 3497 3498 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3499 if (Opc == ARMISD::VCEQ) { 3500 3501 SDValue AndOp; 3502 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3503 AndOp = Op0; 3504 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3505 AndOp = Op1; 3506 3507 // Ignore bitconvert. 3508 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3509 AndOp = AndOp.getOperand(0); 3510 3511 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3512 Opc = ARMISD::VTST; 3513 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3514 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3515 Invert = !Invert; 3516 } 3517 } 3518 } 3519 3520 if (Swap) 3521 std::swap(Op0, Op1); 3522 3523 // If one of the operands is a constant vector zero, attempt to fold the 3524 // comparison to a specialized compare-against-zero form. 3525 SDValue SingleOp; 3526 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3527 SingleOp = Op0; 3528 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3529 if (Opc == ARMISD::VCGE) 3530 Opc = ARMISD::VCLEZ; 3531 else if (Opc == ARMISD::VCGT) 3532 Opc = ARMISD::VCLTZ; 3533 SingleOp = Op1; 3534 } 3535 3536 SDValue Result; 3537 if (SingleOp.getNode()) { 3538 switch (Opc) { 3539 case ARMISD::VCEQ: 3540 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3541 case ARMISD::VCGE: 3542 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3543 case ARMISD::VCLEZ: 3544 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3545 case ARMISD::VCGT: 3546 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3547 case ARMISD::VCLTZ: 3548 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3549 default: 3550 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3551 } 3552 } else { 3553 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3554 } 3555 3556 if (Invert) 3557 Result = DAG.getNOT(dl, Result, VT); 3558 3559 return Result; 3560} 3561 3562/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3563/// valid vector constant for a NEON instruction with a "modified immediate" 3564/// operand (e.g., VMOV). If so, return the encoded value. 3565static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3566 unsigned SplatBitSize, SelectionDAG &DAG, 3567 EVT &VT, bool is128Bits, NEONModImmType type) { 3568 unsigned OpCmode, Imm; 3569 3570 // SplatBitSize is set to the smallest size that splats the vector, so a 3571 // zero vector will always have SplatBitSize == 8. However, NEON modified 3572 // immediate instructions others than VMOV do not support the 8-bit encoding 3573 // of a zero vector, and the default encoding of zero is supposed to be the 3574 // 32-bit version. 3575 if (SplatBits == 0) 3576 SplatBitSize = 32; 3577 3578 switch (SplatBitSize) { 3579 case 8: 3580 if (type != VMOVModImm) 3581 return SDValue(); 3582 // Any 1-byte value is OK. Op=0, Cmode=1110. 3583 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3584 OpCmode = 0xe; 3585 Imm = SplatBits; 3586 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3587 break; 3588 3589 case 16: 3590 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3591 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3592 if ((SplatBits & ~0xff) == 0) { 3593 // Value = 0x00nn: Op=x, Cmode=100x. 3594 OpCmode = 0x8; 3595 Imm = SplatBits; 3596 break; 3597 } 3598 if ((SplatBits & ~0xff00) == 0) { 3599 // Value = 0xnn00: Op=x, Cmode=101x. 3600 OpCmode = 0xa; 3601 Imm = SplatBits >> 8; 3602 break; 3603 } 3604 return SDValue(); 3605 3606 case 32: 3607 // NEON's 32-bit VMOV supports splat values where: 3608 // * only one byte is nonzero, or 3609 // * the least significant byte is 0xff and the second byte is nonzero, or 3610 // * the least significant 2 bytes are 0xff and the third is nonzero. 3611 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3612 if ((SplatBits & ~0xff) == 0) { 3613 // Value = 0x000000nn: Op=x, Cmode=000x. 3614 OpCmode = 0; 3615 Imm = SplatBits; 3616 break; 3617 } 3618 if ((SplatBits & ~0xff00) == 0) { 3619 // Value = 0x0000nn00: Op=x, Cmode=001x. 3620 OpCmode = 0x2; 3621 Imm = SplatBits >> 8; 3622 break; 3623 } 3624 if ((SplatBits & ~0xff0000) == 0) { 3625 // Value = 0x00nn0000: Op=x, Cmode=010x. 3626 OpCmode = 0x4; 3627 Imm = SplatBits >> 16; 3628 break; 3629 } 3630 if ((SplatBits & ~0xff000000) == 0) { 3631 // Value = 0xnn000000: Op=x, Cmode=011x. 3632 OpCmode = 0x6; 3633 Imm = SplatBits >> 24; 3634 break; 3635 } 3636 3637 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3638 if (type == OtherModImm) return SDValue(); 3639 3640 if ((SplatBits & ~0xffff) == 0 && 3641 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3642 // Value = 0x0000nnff: Op=x, Cmode=1100. 3643 OpCmode = 0xc; 3644 Imm = SplatBits >> 8; 3645 SplatBits |= 0xff; 3646 break; 3647 } 3648 3649 if ((SplatBits & ~0xffffff) == 0 && 3650 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3651 // Value = 0x00nnffff: Op=x, Cmode=1101. 3652 OpCmode = 0xd; 3653 Imm = SplatBits >> 16; 3654 SplatBits |= 0xffff; 3655 break; 3656 } 3657 3658 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3659 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3660 // VMOV.I32. A (very) minor optimization would be to replicate the value 3661 // and fall through here to test for a valid 64-bit splat. But, then the 3662 // caller would also need to check and handle the change in size. 3663 return SDValue(); 3664 3665 case 64: { 3666 if (type != VMOVModImm) 3667 return SDValue(); 3668 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3669 uint64_t BitMask = 0xff; 3670 uint64_t Val = 0; 3671 unsigned ImmMask = 1; 3672 Imm = 0; 3673 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3674 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3675 Val |= BitMask; 3676 Imm |= ImmMask; 3677 } else if ((SplatBits & BitMask) != 0) { 3678 return SDValue(); 3679 } 3680 BitMask <<= 8; 3681 ImmMask <<= 1; 3682 } 3683 // Op=1, Cmode=1110. 3684 OpCmode = 0x1e; 3685 SplatBits = Val; 3686 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3687 break; 3688 } 3689 3690 default: 3691 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3692 return SDValue(); 3693 } 3694 3695 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3696 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3697} 3698 3699static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3700 bool &ReverseVEXT, unsigned &Imm) { 3701 unsigned NumElts = VT.getVectorNumElements(); 3702 ReverseVEXT = false; 3703 3704 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3705 if (M[0] < 0) 3706 return false; 3707 3708 Imm = M[0]; 3709 3710 // If this is a VEXT shuffle, the immediate value is the index of the first 3711 // element. The other shuffle indices must be the successive elements after 3712 // the first one. 3713 unsigned ExpectedElt = Imm; 3714 for (unsigned i = 1; i < NumElts; ++i) { 3715 // Increment the expected index. If it wraps around, it may still be 3716 // a VEXT but the source vectors must be swapped. 3717 ExpectedElt += 1; 3718 if (ExpectedElt == NumElts * 2) { 3719 ExpectedElt = 0; 3720 ReverseVEXT = true; 3721 } 3722 3723 if (M[i] < 0) continue; // ignore UNDEF indices 3724 if (ExpectedElt != static_cast<unsigned>(M[i])) 3725 return false; 3726 } 3727 3728 // Adjust the index value if the source operands will be swapped. 3729 if (ReverseVEXT) 3730 Imm -= NumElts; 3731 3732 return true; 3733} 3734 3735/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3736/// instruction with the specified blocksize. (The order of the elements 3737/// within each block of the vector is reversed.) 3738static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3739 unsigned BlockSize) { 3740 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3741 "Only possible block sizes for VREV are: 16, 32, 64"); 3742 3743 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3744 if (EltSz == 64) 3745 return false; 3746 3747 unsigned NumElts = VT.getVectorNumElements(); 3748 unsigned BlockElts = M[0] + 1; 3749 // If the first shuffle index is UNDEF, be optimistic. 3750 if (M[0] < 0) 3751 BlockElts = BlockSize / EltSz; 3752 3753 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3754 return false; 3755 3756 for (unsigned i = 0; i < NumElts; ++i) { 3757 if (M[i] < 0) continue; // ignore UNDEF indices 3758 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3759 return false; 3760 } 3761 3762 return true; 3763} 3764 3765static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3766 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3767 // range, then 0 is placed into the resulting vector. So pretty much any mask 3768 // of 8 elements can work here. 3769 return VT == MVT::v8i8 && M.size() == 8; 3770} 3771 3772static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3773 unsigned &WhichResult) { 3774 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3775 if (EltSz == 64) 3776 return false; 3777 3778 unsigned NumElts = VT.getVectorNumElements(); 3779 WhichResult = (M[0] == 0 ? 0 : 1); 3780 for (unsigned i = 0; i < NumElts; i += 2) { 3781 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3782 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3783 return false; 3784 } 3785 return true; 3786} 3787 3788/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3789/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3790/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3791static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3792 unsigned &WhichResult) { 3793 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3794 if (EltSz == 64) 3795 return false; 3796 3797 unsigned NumElts = VT.getVectorNumElements(); 3798 WhichResult = (M[0] == 0 ? 0 : 1); 3799 for (unsigned i = 0; i < NumElts; i += 2) { 3800 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3801 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3802 return false; 3803 } 3804 return true; 3805} 3806 3807static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3808 unsigned &WhichResult) { 3809 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3810 if (EltSz == 64) 3811 return false; 3812 3813 unsigned NumElts = VT.getVectorNumElements(); 3814 WhichResult = (M[0] == 0 ? 0 : 1); 3815 for (unsigned i = 0; i != NumElts; ++i) { 3816 if (M[i] < 0) continue; // ignore UNDEF indices 3817 if ((unsigned) M[i] != 2 * i + WhichResult) 3818 return false; 3819 } 3820 3821 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3822 if (VT.is64BitVector() && EltSz == 32) 3823 return false; 3824 3825 return true; 3826} 3827 3828/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3829/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3830/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3831static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3832 unsigned &WhichResult) { 3833 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3834 if (EltSz == 64) 3835 return false; 3836 3837 unsigned Half = VT.getVectorNumElements() / 2; 3838 WhichResult = (M[0] == 0 ? 0 : 1); 3839 for (unsigned j = 0; j != 2; ++j) { 3840 unsigned Idx = WhichResult; 3841 for (unsigned i = 0; i != Half; ++i) { 3842 int MIdx = M[i + j * Half]; 3843 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3844 return false; 3845 Idx += 2; 3846 } 3847 } 3848 3849 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3850 if (VT.is64BitVector() && EltSz == 32) 3851 return false; 3852 3853 return true; 3854} 3855 3856static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3857 unsigned &WhichResult) { 3858 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3859 if (EltSz == 64) 3860 return false; 3861 3862 unsigned NumElts = VT.getVectorNumElements(); 3863 WhichResult = (M[0] == 0 ? 0 : 1); 3864 unsigned Idx = WhichResult * NumElts / 2; 3865 for (unsigned i = 0; i != NumElts; i += 2) { 3866 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3867 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3868 return false; 3869 Idx += 1; 3870 } 3871 3872 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3873 if (VT.is64BitVector() && EltSz == 32) 3874 return false; 3875 3876 return true; 3877} 3878 3879/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3880/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3881/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3882static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3883 unsigned &WhichResult) { 3884 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3885 if (EltSz == 64) 3886 return false; 3887 3888 unsigned NumElts = VT.getVectorNumElements(); 3889 WhichResult = (M[0] == 0 ? 0 : 1); 3890 unsigned Idx = WhichResult * NumElts / 2; 3891 for (unsigned i = 0; i != NumElts; i += 2) { 3892 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3893 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3894 return false; 3895 Idx += 1; 3896 } 3897 3898 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3899 if (VT.is64BitVector() && EltSz == 32) 3900 return false; 3901 3902 return true; 3903} 3904 3905// If N is an integer constant that can be moved into a register in one 3906// instruction, return an SDValue of such a constant (will become a MOV 3907// instruction). Otherwise return null. 3908static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3909 const ARMSubtarget *ST, DebugLoc dl) { 3910 uint64_t Val; 3911 if (!isa<ConstantSDNode>(N)) 3912 return SDValue(); 3913 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3914 3915 if (ST->isThumb1Only()) { 3916 if (Val <= 255 || ~Val <= 255) 3917 return DAG.getConstant(Val, MVT::i32); 3918 } else { 3919 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3920 return DAG.getConstant(Val, MVT::i32); 3921 } 3922 return SDValue(); 3923} 3924 3925// If this is a case we can't handle, return null and let the default 3926// expansion code take care of it. 3927SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3928 const ARMSubtarget *ST) const { 3929 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3930 DebugLoc dl = Op.getDebugLoc(); 3931 EVT VT = Op.getValueType(); 3932 3933 APInt SplatBits, SplatUndef; 3934 unsigned SplatBitSize; 3935 bool HasAnyUndefs; 3936 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3937 if (SplatBitSize <= 64) { 3938 // Check if an immediate VMOV works. 3939 EVT VmovVT; 3940 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3941 SplatUndef.getZExtValue(), SplatBitSize, 3942 DAG, VmovVT, VT.is128BitVector(), 3943 VMOVModImm); 3944 if (Val.getNode()) { 3945 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3946 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3947 } 3948 3949 // Try an immediate VMVN. 3950 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 3951 Val = isNEONModifiedImm(NegatedImm, 3952 SplatUndef.getZExtValue(), SplatBitSize, 3953 DAG, VmovVT, VT.is128BitVector(), 3954 VMVNModImm); 3955 if (Val.getNode()) { 3956 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3957 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3958 } 3959 } 3960 } 3961 3962 // Scan through the operands to see if only one value is used. 3963 unsigned NumElts = VT.getVectorNumElements(); 3964 bool isOnlyLowElement = true; 3965 bool usesOnlyOneValue = true; 3966 bool isConstant = true; 3967 SDValue Value; 3968 for (unsigned i = 0; i < NumElts; ++i) { 3969 SDValue V = Op.getOperand(i); 3970 if (V.getOpcode() == ISD::UNDEF) 3971 continue; 3972 if (i > 0) 3973 isOnlyLowElement = false; 3974 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3975 isConstant = false; 3976 3977 if (!Value.getNode()) 3978 Value = V; 3979 else if (V != Value) 3980 usesOnlyOneValue = false; 3981 } 3982 3983 if (!Value.getNode()) 3984 return DAG.getUNDEF(VT); 3985 3986 if (isOnlyLowElement) 3987 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3988 3989 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3990 3991 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3992 // i32 and try again. 3993 if (usesOnlyOneValue && EltSize <= 32) { 3994 if (!isConstant) 3995 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3996 if (VT.getVectorElementType().isFloatingPoint()) { 3997 SmallVector<SDValue, 8> Ops; 3998 for (unsigned i = 0; i < NumElts; ++i) 3999 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4000 Op.getOperand(i))); 4001 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 4002 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 4003 Val = LowerBUILD_VECTOR(Val, DAG, ST); 4004 if (Val.getNode()) 4005 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4006 } 4007 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 4008 if (Val.getNode()) 4009 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4010 } 4011 4012 // If all elements are constants and the case above didn't get hit, fall back 4013 // to the default expansion, which will generate a load from the constant 4014 // pool. 4015 if (isConstant) 4016 return SDValue(); 4017 4018 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4019 if (NumElts >= 4) { 4020 SDValue shuffle = ReconstructShuffle(Op, DAG); 4021 if (shuffle != SDValue()) 4022 return shuffle; 4023 } 4024 4025 // Vectors with 32- or 64-bit elements can be built by directly assigning 4026 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4027 // will be legalized. 4028 if (EltSize >= 32) { 4029 // Do the expansion with floating-point types, since that is what the VFP 4030 // registers are defined to use, and since i64 is not legal. 4031 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4032 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4033 SmallVector<SDValue, 8> Ops; 4034 for (unsigned i = 0; i < NumElts; ++i) 4035 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4036 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4037 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4038 } 4039 4040 return SDValue(); 4041} 4042 4043// Gather data to see if the operation can be modelled as a 4044// shuffle in combination with VEXTs. 4045SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4046 SelectionDAG &DAG) const { 4047 DebugLoc dl = Op.getDebugLoc(); 4048 EVT VT = Op.getValueType(); 4049 unsigned NumElts = VT.getVectorNumElements(); 4050 4051 SmallVector<SDValue, 2> SourceVecs; 4052 SmallVector<unsigned, 2> MinElts; 4053 SmallVector<unsigned, 2> MaxElts; 4054 4055 for (unsigned i = 0; i < NumElts; ++i) { 4056 SDValue V = Op.getOperand(i); 4057 if (V.getOpcode() == ISD::UNDEF) 4058 continue; 4059 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4060 // A shuffle can only come from building a vector from various 4061 // elements of other vectors. 4062 return SDValue(); 4063 } else if (V.getOperand(0).getValueType().getVectorElementType() != 4064 VT.getVectorElementType()) { 4065 // This code doesn't know how to handle shuffles where the vector 4066 // element types do not match (this happens because type legalization 4067 // promotes the return type of EXTRACT_VECTOR_ELT). 4068 // FIXME: It might be appropriate to extend this code to handle 4069 // mismatched types. 4070 return SDValue(); 4071 } 4072 4073 // Record this extraction against the appropriate vector if possible... 4074 SDValue SourceVec = V.getOperand(0); 4075 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4076 bool FoundSource = false; 4077 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4078 if (SourceVecs[j] == SourceVec) { 4079 if (MinElts[j] > EltNo) 4080 MinElts[j] = EltNo; 4081 if (MaxElts[j] < EltNo) 4082 MaxElts[j] = EltNo; 4083 FoundSource = true; 4084 break; 4085 } 4086 } 4087 4088 // Or record a new source if not... 4089 if (!FoundSource) { 4090 SourceVecs.push_back(SourceVec); 4091 MinElts.push_back(EltNo); 4092 MaxElts.push_back(EltNo); 4093 } 4094 } 4095 4096 // Currently only do something sane when at most two source vectors 4097 // involved. 4098 if (SourceVecs.size() > 2) 4099 return SDValue(); 4100 4101 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4102 int VEXTOffsets[2] = {0, 0}; 4103 4104 // This loop extracts the usage patterns of the source vectors 4105 // and prepares appropriate SDValues for a shuffle if possible. 4106 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4107 if (SourceVecs[i].getValueType() == VT) { 4108 // No VEXT necessary 4109 ShuffleSrcs[i] = SourceVecs[i]; 4110 VEXTOffsets[i] = 0; 4111 continue; 4112 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4113 // It probably isn't worth padding out a smaller vector just to 4114 // break it down again in a shuffle. 4115 return SDValue(); 4116 } 4117 4118 // Since only 64-bit and 128-bit vectors are legal on ARM and 4119 // we've eliminated the other cases... 4120 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4121 "unexpected vector sizes in ReconstructShuffle"); 4122 4123 if (MaxElts[i] - MinElts[i] >= NumElts) { 4124 // Span too large for a VEXT to cope 4125 return SDValue(); 4126 } 4127 4128 if (MinElts[i] >= NumElts) { 4129 // The extraction can just take the second half 4130 VEXTOffsets[i] = NumElts; 4131 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4132 SourceVecs[i], 4133 DAG.getIntPtrConstant(NumElts)); 4134 } else if (MaxElts[i] < NumElts) { 4135 // The extraction can just take the first half 4136 VEXTOffsets[i] = 0; 4137 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4138 SourceVecs[i], 4139 DAG.getIntPtrConstant(0)); 4140 } else { 4141 // An actual VEXT is needed 4142 VEXTOffsets[i] = MinElts[i]; 4143 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4144 SourceVecs[i], 4145 DAG.getIntPtrConstant(0)); 4146 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4147 SourceVecs[i], 4148 DAG.getIntPtrConstant(NumElts)); 4149 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4150 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4151 } 4152 } 4153 4154 SmallVector<int, 8> Mask; 4155 4156 for (unsigned i = 0; i < NumElts; ++i) { 4157 SDValue Entry = Op.getOperand(i); 4158 if (Entry.getOpcode() == ISD::UNDEF) { 4159 Mask.push_back(-1); 4160 continue; 4161 } 4162 4163 SDValue ExtractVec = Entry.getOperand(0); 4164 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4165 .getOperand(1))->getSExtValue(); 4166 if (ExtractVec == SourceVecs[0]) { 4167 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4168 } else { 4169 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4170 } 4171 } 4172 4173 // Final check before we try to produce nonsense... 4174 if (isShuffleMaskLegal(Mask, VT)) 4175 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4176 &Mask[0]); 4177 4178 return SDValue(); 4179} 4180 4181/// isShuffleMaskLegal - Targets can use this to indicate that they only 4182/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4183/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4184/// are assumed to be legal. 4185bool 4186ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4187 EVT VT) const { 4188 if (VT.getVectorNumElements() == 4 && 4189 (VT.is128BitVector() || VT.is64BitVector())) { 4190 unsigned PFIndexes[4]; 4191 for (unsigned i = 0; i != 4; ++i) { 4192 if (M[i] < 0) 4193 PFIndexes[i] = 8; 4194 else 4195 PFIndexes[i] = M[i]; 4196 } 4197 4198 // Compute the index in the perfect shuffle table. 4199 unsigned PFTableIndex = 4200 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4201 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4202 unsigned Cost = (PFEntry >> 30); 4203 4204 if (Cost <= 4) 4205 return true; 4206 } 4207 4208 bool ReverseVEXT; 4209 unsigned Imm, WhichResult; 4210 4211 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4212 return (EltSize >= 32 || 4213 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4214 isVREVMask(M, VT, 64) || 4215 isVREVMask(M, VT, 32) || 4216 isVREVMask(M, VT, 16) || 4217 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4218 isVTBLMask(M, VT) || 4219 isVTRNMask(M, VT, WhichResult) || 4220 isVUZPMask(M, VT, WhichResult) || 4221 isVZIPMask(M, VT, WhichResult) || 4222 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4223 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4224 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4225} 4226 4227/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4228/// the specified operations to build the shuffle. 4229static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4230 SDValue RHS, SelectionDAG &DAG, 4231 DebugLoc dl) { 4232 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4233 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4234 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4235 4236 enum { 4237 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4238 OP_VREV, 4239 OP_VDUP0, 4240 OP_VDUP1, 4241 OP_VDUP2, 4242 OP_VDUP3, 4243 OP_VEXT1, 4244 OP_VEXT2, 4245 OP_VEXT3, 4246 OP_VUZPL, // VUZP, left result 4247 OP_VUZPR, // VUZP, right result 4248 OP_VZIPL, // VZIP, left result 4249 OP_VZIPR, // VZIP, right result 4250 OP_VTRNL, // VTRN, left result 4251 OP_VTRNR // VTRN, right result 4252 }; 4253 4254 if (OpNum == OP_COPY) { 4255 if (LHSID == (1*9+2)*9+3) return LHS; 4256 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4257 return RHS; 4258 } 4259 4260 SDValue OpLHS, OpRHS; 4261 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4262 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4263 EVT VT = OpLHS.getValueType(); 4264 4265 switch (OpNum) { 4266 default: llvm_unreachable("Unknown shuffle opcode!"); 4267 case OP_VREV: 4268 // VREV divides the vector in half and swaps within the half. 4269 if (VT.getVectorElementType() == MVT::i32 || 4270 VT.getVectorElementType() == MVT::f32) 4271 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4272 // vrev <4 x i16> -> VREV32 4273 if (VT.getVectorElementType() == MVT::i16) 4274 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4275 // vrev <4 x i8> -> VREV16 4276 assert(VT.getVectorElementType() == MVT::i8); 4277 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4278 case OP_VDUP0: 4279 case OP_VDUP1: 4280 case OP_VDUP2: 4281 case OP_VDUP3: 4282 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4283 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4284 case OP_VEXT1: 4285 case OP_VEXT2: 4286 case OP_VEXT3: 4287 return DAG.getNode(ARMISD::VEXT, dl, VT, 4288 OpLHS, OpRHS, 4289 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4290 case OP_VUZPL: 4291 case OP_VUZPR: 4292 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4293 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4294 case OP_VZIPL: 4295 case OP_VZIPR: 4296 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4297 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4298 case OP_VTRNL: 4299 case OP_VTRNR: 4300 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4301 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4302 } 4303} 4304 4305static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4306 SmallVectorImpl<int> &ShuffleMask, 4307 SelectionDAG &DAG) { 4308 // Check to see if we can use the VTBL instruction. 4309 SDValue V1 = Op.getOperand(0); 4310 SDValue V2 = Op.getOperand(1); 4311 DebugLoc DL = Op.getDebugLoc(); 4312 4313 SmallVector<SDValue, 8> VTBLMask; 4314 for (SmallVectorImpl<int>::iterator 4315 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4316 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4317 4318 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4319 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4320 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4321 &VTBLMask[0], 8)); 4322 4323 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4324 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4325 &VTBLMask[0], 8)); 4326} 4327 4328static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4329 SDValue V1 = Op.getOperand(0); 4330 SDValue V2 = Op.getOperand(1); 4331 DebugLoc dl = Op.getDebugLoc(); 4332 EVT VT = Op.getValueType(); 4333 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4334 SmallVector<int, 8> ShuffleMask; 4335 4336 // Convert shuffles that are directly supported on NEON to target-specific 4337 // DAG nodes, instead of keeping them as shuffles and matching them again 4338 // during code selection. This is more efficient and avoids the possibility 4339 // of inconsistencies between legalization and selection. 4340 // FIXME: floating-point vectors should be canonicalized to integer vectors 4341 // of the same time so that they get CSEd properly. 4342 SVN->getMask(ShuffleMask); 4343 4344 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4345 if (EltSize <= 32) { 4346 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4347 int Lane = SVN->getSplatIndex(); 4348 // If this is undef splat, generate it via "just" vdup, if possible. 4349 if (Lane == -1) Lane = 0; 4350 4351 // Test if V1 is a SCALAR_TO_VECTOR. 4352 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4353 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4354 } 4355 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 4356 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 4357 // reaches it). 4358 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 4359 !isa<ConstantSDNode>(V1.getOperand(0))) { 4360 bool IsScalarToVector = true; 4361 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 4362 if (V1.getOperand(i).getOpcode() != ISD::UNDEF) { 4363 IsScalarToVector = false; 4364 break; 4365 } 4366 if (IsScalarToVector) 4367 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4368 } 4369 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4370 DAG.getConstant(Lane, MVT::i32)); 4371 } 4372 4373 bool ReverseVEXT; 4374 unsigned Imm; 4375 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4376 if (ReverseVEXT) 4377 std::swap(V1, V2); 4378 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4379 DAG.getConstant(Imm, MVT::i32)); 4380 } 4381 4382 if (isVREVMask(ShuffleMask, VT, 64)) 4383 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4384 if (isVREVMask(ShuffleMask, VT, 32)) 4385 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4386 if (isVREVMask(ShuffleMask, VT, 16)) 4387 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4388 4389 // Check for Neon shuffles that modify both input vectors in place. 4390 // If both results are used, i.e., if there are two shuffles with the same 4391 // source operands and with masks corresponding to both results of one of 4392 // these operations, DAG memoization will ensure that a single node is 4393 // used for both shuffles. 4394 unsigned WhichResult; 4395 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4396 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4397 V1, V2).getValue(WhichResult); 4398 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4399 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4400 V1, V2).getValue(WhichResult); 4401 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4402 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4403 V1, V2).getValue(WhichResult); 4404 4405 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4406 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4407 V1, V1).getValue(WhichResult); 4408 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4409 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4410 V1, V1).getValue(WhichResult); 4411 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4412 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4413 V1, V1).getValue(WhichResult); 4414 } 4415 4416 // If the shuffle is not directly supported and it has 4 elements, use 4417 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4418 unsigned NumElts = VT.getVectorNumElements(); 4419 if (NumElts == 4) { 4420 unsigned PFIndexes[4]; 4421 for (unsigned i = 0; i != 4; ++i) { 4422 if (ShuffleMask[i] < 0) 4423 PFIndexes[i] = 8; 4424 else 4425 PFIndexes[i] = ShuffleMask[i]; 4426 } 4427 4428 // Compute the index in the perfect shuffle table. 4429 unsigned PFTableIndex = 4430 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4431 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4432 unsigned Cost = (PFEntry >> 30); 4433 4434 if (Cost <= 4) 4435 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4436 } 4437 4438 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4439 if (EltSize >= 32) { 4440 // Do the expansion with floating-point types, since that is what the VFP 4441 // registers are defined to use, and since i64 is not legal. 4442 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4443 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4444 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4445 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4446 SmallVector<SDValue, 8> Ops; 4447 for (unsigned i = 0; i < NumElts; ++i) { 4448 if (ShuffleMask[i] < 0) 4449 Ops.push_back(DAG.getUNDEF(EltVT)); 4450 else 4451 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4452 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4453 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4454 MVT::i32))); 4455 } 4456 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4457 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4458 } 4459 4460 if (VT == MVT::v8i8) { 4461 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4462 if (NewOp.getNode()) 4463 return NewOp; 4464 } 4465 4466 return SDValue(); 4467} 4468 4469static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4470 // INSERT_VECTOR_ELT is legal only for immediate indexes. 4471 SDValue Lane = Op.getOperand(2); 4472 if (!isa<ConstantSDNode>(Lane)) 4473 return SDValue(); 4474 4475 return Op; 4476} 4477 4478static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4479 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4480 SDValue Lane = Op.getOperand(1); 4481 if (!isa<ConstantSDNode>(Lane)) 4482 return SDValue(); 4483 4484 SDValue Vec = Op.getOperand(0); 4485 if (Op.getValueType() == MVT::i32 && 4486 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4487 DebugLoc dl = Op.getDebugLoc(); 4488 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4489 } 4490 4491 return Op; 4492} 4493 4494static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4495 // The only time a CONCAT_VECTORS operation can have legal types is when 4496 // two 64-bit vectors are concatenated to a 128-bit vector. 4497 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4498 "unexpected CONCAT_VECTORS"); 4499 DebugLoc dl = Op.getDebugLoc(); 4500 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4501 SDValue Op0 = Op.getOperand(0); 4502 SDValue Op1 = Op.getOperand(1); 4503 if (Op0.getOpcode() != ISD::UNDEF) 4504 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4505 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4506 DAG.getIntPtrConstant(0)); 4507 if (Op1.getOpcode() != ISD::UNDEF) 4508 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4509 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4510 DAG.getIntPtrConstant(1)); 4511 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4512} 4513 4514/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4515/// element has been zero/sign-extended, depending on the isSigned parameter, 4516/// from an integer type half its size. 4517static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4518 bool isSigned) { 4519 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4520 EVT VT = N->getValueType(0); 4521 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4522 SDNode *BVN = N->getOperand(0).getNode(); 4523 if (BVN->getValueType(0) != MVT::v4i32 || 4524 BVN->getOpcode() != ISD::BUILD_VECTOR) 4525 return false; 4526 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4527 unsigned HiElt = 1 - LoElt; 4528 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4529 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4530 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4531 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4532 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4533 return false; 4534 if (isSigned) { 4535 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4536 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4537 return true; 4538 } else { 4539 if (Hi0->isNullValue() && Hi1->isNullValue()) 4540 return true; 4541 } 4542 return false; 4543 } 4544 4545 if (N->getOpcode() != ISD::BUILD_VECTOR) 4546 return false; 4547 4548 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4549 SDNode *Elt = N->getOperand(i).getNode(); 4550 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4551 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4552 unsigned HalfSize = EltSize / 2; 4553 if (isSigned) { 4554 if (!isIntN(HalfSize, C->getSExtValue())) 4555 return false; 4556 } else { 4557 if (!isUIntN(HalfSize, C->getZExtValue())) 4558 return false; 4559 } 4560 continue; 4561 } 4562 return false; 4563 } 4564 4565 return true; 4566} 4567 4568/// isSignExtended - Check if a node is a vector value that is sign-extended 4569/// or a constant BUILD_VECTOR with sign-extended elements. 4570static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4571 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4572 return true; 4573 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4574 return true; 4575 return false; 4576} 4577 4578/// isZeroExtended - Check if a node is a vector value that is zero-extended 4579/// or a constant BUILD_VECTOR with zero-extended elements. 4580static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4581 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4582 return true; 4583 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4584 return true; 4585 return false; 4586} 4587 4588/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4589/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4590static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4591 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4592 return N->getOperand(0); 4593 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4594 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4595 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4596 LD->isNonTemporal(), LD->getAlignment()); 4597 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4598 // have been legalized as a BITCAST from v4i32. 4599 if (N->getOpcode() == ISD::BITCAST) { 4600 SDNode *BVN = N->getOperand(0).getNode(); 4601 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4602 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4603 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4604 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4605 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4606 } 4607 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4608 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4609 EVT VT = N->getValueType(0); 4610 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4611 unsigned NumElts = VT.getVectorNumElements(); 4612 MVT TruncVT = MVT::getIntegerVT(EltSize); 4613 SmallVector<SDValue, 8> Ops; 4614 for (unsigned i = 0; i != NumElts; ++i) { 4615 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4616 const APInt &CInt = C->getAPIntValue(); 4617 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4618 } 4619 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4620 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4621} 4622 4623static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4624 unsigned Opcode = N->getOpcode(); 4625 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4626 SDNode *N0 = N->getOperand(0).getNode(); 4627 SDNode *N1 = N->getOperand(1).getNode(); 4628 return N0->hasOneUse() && N1->hasOneUse() && 4629 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4630 } 4631 return false; 4632} 4633 4634static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4635 unsigned Opcode = N->getOpcode(); 4636 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4637 SDNode *N0 = N->getOperand(0).getNode(); 4638 SDNode *N1 = N->getOperand(1).getNode(); 4639 return N0->hasOneUse() && N1->hasOneUse() && 4640 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4641 } 4642 return false; 4643} 4644 4645static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4646 // Multiplications are only custom-lowered for 128-bit vectors so that 4647 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4648 EVT VT = Op.getValueType(); 4649 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4650 SDNode *N0 = Op.getOperand(0).getNode(); 4651 SDNode *N1 = Op.getOperand(1).getNode(); 4652 unsigned NewOpc = 0; 4653 bool isMLA = false; 4654 bool isN0SExt = isSignExtended(N0, DAG); 4655 bool isN1SExt = isSignExtended(N1, DAG); 4656 if (isN0SExt && isN1SExt) 4657 NewOpc = ARMISD::VMULLs; 4658 else { 4659 bool isN0ZExt = isZeroExtended(N0, DAG); 4660 bool isN1ZExt = isZeroExtended(N1, DAG); 4661 if (isN0ZExt && isN1ZExt) 4662 NewOpc = ARMISD::VMULLu; 4663 else if (isN1SExt || isN1ZExt) { 4664 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4665 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4666 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4667 NewOpc = ARMISD::VMULLs; 4668 isMLA = true; 4669 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4670 NewOpc = ARMISD::VMULLu; 4671 isMLA = true; 4672 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4673 std::swap(N0, N1); 4674 NewOpc = ARMISD::VMULLu; 4675 isMLA = true; 4676 } 4677 } 4678 4679 if (!NewOpc) { 4680 if (VT == MVT::v2i64) 4681 // Fall through to expand this. It is not legal. 4682 return SDValue(); 4683 else 4684 // Other vector multiplications are legal. 4685 return Op; 4686 } 4687 } 4688 4689 // Legalize to a VMULL instruction. 4690 DebugLoc DL = Op.getDebugLoc(); 4691 SDValue Op0; 4692 SDValue Op1 = SkipExtension(N1, DAG); 4693 if (!isMLA) { 4694 Op0 = SkipExtension(N0, DAG); 4695 assert(Op0.getValueType().is64BitVector() && 4696 Op1.getValueType().is64BitVector() && 4697 "unexpected types for extended operands to VMULL"); 4698 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4699 } 4700 4701 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4702 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4703 // vmull q0, d4, d6 4704 // vmlal q0, d5, d6 4705 // is faster than 4706 // vaddl q0, d4, d5 4707 // vmovl q1, d6 4708 // vmul q0, q0, q1 4709 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4710 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4711 EVT Op1VT = Op1.getValueType(); 4712 return DAG.getNode(N0->getOpcode(), DL, VT, 4713 DAG.getNode(NewOpc, DL, VT, 4714 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4715 DAG.getNode(NewOpc, DL, VT, 4716 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4717} 4718 4719static SDValue 4720LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4721 // Convert to float 4722 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4723 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4724 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4725 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4726 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4727 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4728 // Get reciprocal estimate. 4729 // float4 recip = vrecpeq_f32(yf); 4730 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4731 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4732 // Because char has a smaller range than uchar, we can actually get away 4733 // without any newton steps. This requires that we use a weird bias 4734 // of 0xb000, however (again, this has been exhaustively tested). 4735 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4736 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4737 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4738 Y = DAG.getConstant(0xb000, MVT::i32); 4739 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4740 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4741 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4742 // Convert back to short. 4743 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4744 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4745 return X; 4746} 4747 4748static SDValue 4749LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4750 SDValue N2; 4751 // Convert to float. 4752 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4753 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4754 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4755 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4756 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4757 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4758 4759 // Use reciprocal estimate and one refinement step. 4760 // float4 recip = vrecpeq_f32(yf); 4761 // recip *= vrecpsq_f32(yf, recip); 4762 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4763 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4764 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4765 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4766 N1, N2); 4767 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4768 // Because short has a smaller range than ushort, we can actually get away 4769 // with only a single newton step. This requires that we use a weird bias 4770 // of 89, however (again, this has been exhaustively tested). 4771 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 4772 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4773 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4774 N1 = DAG.getConstant(0x89, MVT::i32); 4775 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4776 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4777 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4778 // Convert back to integer and return. 4779 // return vmovn_s32(vcvt_s32_f32(result)); 4780 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4781 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4782 return N0; 4783} 4784 4785static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4786 EVT VT = Op.getValueType(); 4787 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4788 "unexpected type for custom-lowering ISD::SDIV"); 4789 4790 DebugLoc dl = Op.getDebugLoc(); 4791 SDValue N0 = Op.getOperand(0); 4792 SDValue N1 = Op.getOperand(1); 4793 SDValue N2, N3; 4794 4795 if (VT == MVT::v8i8) { 4796 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4797 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4798 4799 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4800 DAG.getIntPtrConstant(4)); 4801 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4802 DAG.getIntPtrConstant(4)); 4803 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4804 DAG.getIntPtrConstant(0)); 4805 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4806 DAG.getIntPtrConstant(0)); 4807 4808 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4809 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4810 4811 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4812 N0 = LowerCONCAT_VECTORS(N0, DAG); 4813 4814 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4815 return N0; 4816 } 4817 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4818} 4819 4820static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4821 EVT VT = Op.getValueType(); 4822 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4823 "unexpected type for custom-lowering ISD::UDIV"); 4824 4825 DebugLoc dl = Op.getDebugLoc(); 4826 SDValue N0 = Op.getOperand(0); 4827 SDValue N1 = Op.getOperand(1); 4828 SDValue N2, N3; 4829 4830 if (VT == MVT::v8i8) { 4831 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4832 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4833 4834 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4835 DAG.getIntPtrConstant(4)); 4836 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4837 DAG.getIntPtrConstant(4)); 4838 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4839 DAG.getIntPtrConstant(0)); 4840 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4841 DAG.getIntPtrConstant(0)); 4842 4843 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4844 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4845 4846 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4847 N0 = LowerCONCAT_VECTORS(N0, DAG); 4848 4849 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4850 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4851 N0); 4852 return N0; 4853 } 4854 4855 // v4i16 sdiv ... Convert to float. 4856 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4857 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4858 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4859 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4860 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4861 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4862 4863 // Use reciprocal estimate and two refinement steps. 4864 // float4 recip = vrecpeq_f32(yf); 4865 // recip *= vrecpsq_f32(yf, recip); 4866 // recip *= vrecpsq_f32(yf, recip); 4867 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4868 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 4869 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4870 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4871 BN1, N2); 4872 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4873 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4874 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4875 BN1, N2); 4876 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4877 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4878 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4879 // and that it will never cause us to return an answer too large). 4880 // float4 result = as_float4(as_int4(xf*recip) + 2); 4881 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4882 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4883 N1 = DAG.getConstant(2, MVT::i32); 4884 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4885 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4886 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4887 // Convert back to integer and return. 4888 // return vmovn_u32(vcvt_s32_f32(result)); 4889 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4890 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4891 return N0; 4892} 4893 4894static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 4895 EVT VT = Op.getNode()->getValueType(0); 4896 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4897 4898 unsigned Opc; 4899 bool ExtraOp = false; 4900 switch (Op.getOpcode()) { 4901 default: assert(0 && "Invalid code"); 4902 case ISD::ADDC: Opc = ARMISD::ADDC; break; 4903 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 4904 case ISD::SUBC: Opc = ARMISD::SUBC; break; 4905 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 4906 } 4907 4908 if (!ExtraOp) 4909 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4910 Op.getOperand(1)); 4911 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4912 Op.getOperand(1), Op.getOperand(2)); 4913} 4914 4915static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 4916 // Monotonic load/store is legal for all targets 4917 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 4918 return Op; 4919 4920 // Aquire/Release load/store is not legal for targets without a 4921 // dmb or equivalent available. 4922 return SDValue(); 4923} 4924 4925 4926static void 4927ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 4928 SelectionDAG &DAG, unsigned NewOp) { 4929 DebugLoc dl = Node->getDebugLoc(); 4930 assert (Node->getValueType(0) == MVT::i64 && 4931 "Only know how to expand i64 atomics"); 4932 4933 SmallVector<SDValue, 6> Ops; 4934 Ops.push_back(Node->getOperand(0)); // Chain 4935 Ops.push_back(Node->getOperand(1)); // Ptr 4936 // Low part of Val1 4937 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4938 Node->getOperand(2), DAG.getIntPtrConstant(0))); 4939 // High part of Val1 4940 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4941 Node->getOperand(2), DAG.getIntPtrConstant(1))); 4942 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 4943 // High part of Val1 4944 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4945 Node->getOperand(3), DAG.getIntPtrConstant(0))); 4946 // High part of Val2 4947 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4948 Node->getOperand(3), DAG.getIntPtrConstant(1))); 4949 } 4950 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4951 SDValue Result = 4952 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 4953 cast<MemSDNode>(Node)->getMemOperand()); 4954 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 4955 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 4956 Results.push_back(Result.getValue(2)); 4957} 4958 4959SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4960 switch (Op.getOpcode()) { 4961 default: llvm_unreachable("Don't know how to custom lower this!"); 4962 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4963 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4964 case ISD::GlobalAddress: 4965 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4966 LowerGlobalAddressELF(Op, DAG); 4967 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4968 case ISD::SELECT: return LowerSELECT(Op, DAG); 4969 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4970 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4971 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4972 case ISD::VASTART: return LowerVASTART(Op, DAG); 4973 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4974 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 4975 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4976 case ISD::SINT_TO_FP: 4977 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4978 case ISD::FP_TO_SINT: 4979 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4980 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4981 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4982 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4983 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4984 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4985 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4986 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4987 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4988 Subtarget); 4989 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4990 case ISD::SHL: 4991 case ISD::SRL: 4992 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4993 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4994 case ISD::SRL_PARTS: 4995 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4996 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4997 case ISD::SETCC: return LowerVSETCC(Op, DAG); 4998 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4999 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5000 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5001 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5002 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 5003 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5004 case ISD::MUL: return LowerMUL(Op, DAG); 5005 case ISD::SDIV: return LowerSDIV(Op, DAG); 5006 case ISD::UDIV: return LowerUDIV(Op, DAG); 5007 case ISD::ADDC: 5008 case ISD::ADDE: 5009 case ISD::SUBC: 5010 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 5011 case ISD::ATOMIC_LOAD: 5012 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 5013 } 5014 return SDValue(); 5015} 5016 5017/// ReplaceNodeResults - Replace the results of node with an illegal result 5018/// type with new values built out of custom code. 5019void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 5020 SmallVectorImpl<SDValue>&Results, 5021 SelectionDAG &DAG) const { 5022 SDValue Res; 5023 switch (N->getOpcode()) { 5024 default: 5025 llvm_unreachable("Don't know how to custom expand this!"); 5026 break; 5027 case ISD::BITCAST: 5028 Res = ExpandBITCAST(N, DAG); 5029 break; 5030 case ISD::SRL: 5031 case ISD::SRA: 5032 Res = Expand64BitShift(N, DAG, Subtarget); 5033 break; 5034 case ISD::ATOMIC_LOAD_ADD: 5035 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 5036 return; 5037 case ISD::ATOMIC_LOAD_AND: 5038 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 5039 return; 5040 case ISD::ATOMIC_LOAD_NAND: 5041 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 5042 return; 5043 case ISD::ATOMIC_LOAD_OR: 5044 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 5045 return; 5046 case ISD::ATOMIC_LOAD_SUB: 5047 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 5048 return; 5049 case ISD::ATOMIC_LOAD_XOR: 5050 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5051 return; 5052 case ISD::ATOMIC_SWAP: 5053 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5054 return; 5055 case ISD::ATOMIC_CMP_SWAP: 5056 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5057 return; 5058 } 5059 if (Res.getNode()) 5060 Results.push_back(Res); 5061} 5062 5063//===----------------------------------------------------------------------===// 5064// ARM Scheduler Hooks 5065//===----------------------------------------------------------------------===// 5066 5067MachineBasicBlock * 5068ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5069 MachineBasicBlock *BB, 5070 unsigned Size) const { 5071 unsigned dest = MI->getOperand(0).getReg(); 5072 unsigned ptr = MI->getOperand(1).getReg(); 5073 unsigned oldval = MI->getOperand(2).getReg(); 5074 unsigned newval = MI->getOperand(3).getReg(); 5075 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5076 DebugLoc dl = MI->getDebugLoc(); 5077 bool isThumb2 = Subtarget->isThumb2(); 5078 5079 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5080 unsigned scratch = 5081 MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass 5082 : ARM::GPRRegisterClass); 5083 5084 if (isThumb2) { 5085 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5086 MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass); 5087 MRI.constrainRegClass(newval, ARM::rGPRRegisterClass); 5088 } 5089 5090 unsigned ldrOpc, strOpc; 5091 switch (Size) { 5092 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5093 case 1: 5094 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5095 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5096 break; 5097 case 2: 5098 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5099 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5100 break; 5101 case 4: 5102 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5103 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5104 break; 5105 } 5106 5107 MachineFunction *MF = BB->getParent(); 5108 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5109 MachineFunction::iterator It = BB; 5110 ++It; // insert the new blocks after the current block 5111 5112 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5113 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5114 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5115 MF->insert(It, loop1MBB); 5116 MF->insert(It, loop2MBB); 5117 MF->insert(It, exitMBB); 5118 5119 // Transfer the remainder of BB and its successor edges to exitMBB. 5120 exitMBB->splice(exitMBB->begin(), BB, 5121 llvm::next(MachineBasicBlock::iterator(MI)), 5122 BB->end()); 5123 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5124 5125 // thisMBB: 5126 // ... 5127 // fallthrough --> loop1MBB 5128 BB->addSuccessor(loop1MBB); 5129 5130 // loop1MBB: 5131 // ldrex dest, [ptr] 5132 // cmp dest, oldval 5133 // bne exitMBB 5134 BB = loop1MBB; 5135 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5136 if (ldrOpc == ARM::t2LDREX) 5137 MIB.addImm(0); 5138 AddDefaultPred(MIB); 5139 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5140 .addReg(dest).addReg(oldval)); 5141 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5142 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5143 BB->addSuccessor(loop2MBB); 5144 BB->addSuccessor(exitMBB); 5145 5146 // loop2MBB: 5147 // strex scratch, newval, [ptr] 5148 // cmp scratch, #0 5149 // bne loop1MBB 5150 BB = loop2MBB; 5151 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5152 if (strOpc == ARM::t2STREX) 5153 MIB.addImm(0); 5154 AddDefaultPred(MIB); 5155 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5156 .addReg(scratch).addImm(0)); 5157 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5158 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5159 BB->addSuccessor(loop1MBB); 5160 BB->addSuccessor(exitMBB); 5161 5162 // exitMBB: 5163 // ... 5164 BB = exitMBB; 5165 5166 MI->eraseFromParent(); // The instruction is gone now. 5167 5168 return BB; 5169} 5170 5171MachineBasicBlock * 5172ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5173 unsigned Size, unsigned BinOpcode) const { 5174 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5175 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5176 5177 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5178 MachineFunction *MF = BB->getParent(); 5179 MachineFunction::iterator It = BB; 5180 ++It; 5181 5182 unsigned dest = MI->getOperand(0).getReg(); 5183 unsigned ptr = MI->getOperand(1).getReg(); 5184 unsigned incr = MI->getOperand(2).getReg(); 5185 DebugLoc dl = MI->getDebugLoc(); 5186 bool isThumb2 = Subtarget->isThumb2(); 5187 5188 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5189 if (isThumb2) { 5190 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5191 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5192 } 5193 5194 unsigned ldrOpc, strOpc; 5195 switch (Size) { 5196 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5197 case 1: 5198 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5199 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5200 break; 5201 case 2: 5202 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5203 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5204 break; 5205 case 4: 5206 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5207 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5208 break; 5209 } 5210 5211 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5212 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5213 MF->insert(It, loopMBB); 5214 MF->insert(It, exitMBB); 5215 5216 // Transfer the remainder of BB and its successor edges to exitMBB. 5217 exitMBB->splice(exitMBB->begin(), BB, 5218 llvm::next(MachineBasicBlock::iterator(MI)), 5219 BB->end()); 5220 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5221 5222 TargetRegisterClass *TRC = 5223 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5224 unsigned scratch = MRI.createVirtualRegister(TRC); 5225 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5226 5227 // thisMBB: 5228 // ... 5229 // fallthrough --> loopMBB 5230 BB->addSuccessor(loopMBB); 5231 5232 // loopMBB: 5233 // ldrex dest, ptr 5234 // <binop> scratch2, dest, incr 5235 // strex scratch, scratch2, ptr 5236 // cmp scratch, #0 5237 // bne- loopMBB 5238 // fallthrough --> exitMBB 5239 BB = loopMBB; 5240 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5241 if (ldrOpc == ARM::t2LDREX) 5242 MIB.addImm(0); 5243 AddDefaultPred(MIB); 5244 if (BinOpcode) { 5245 // operand order needs to go the other way for NAND 5246 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5247 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5248 addReg(incr).addReg(dest)).addReg(0); 5249 else 5250 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5251 addReg(dest).addReg(incr)).addReg(0); 5252 } 5253 5254 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5255 if (strOpc == ARM::t2STREX) 5256 MIB.addImm(0); 5257 AddDefaultPred(MIB); 5258 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5259 .addReg(scratch).addImm(0)); 5260 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5261 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5262 5263 BB->addSuccessor(loopMBB); 5264 BB->addSuccessor(exitMBB); 5265 5266 // exitMBB: 5267 // ... 5268 BB = exitMBB; 5269 5270 MI->eraseFromParent(); // The instruction is gone now. 5271 5272 return BB; 5273} 5274 5275MachineBasicBlock * 5276ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5277 MachineBasicBlock *BB, 5278 unsigned Size, 5279 bool signExtend, 5280 ARMCC::CondCodes Cond) const { 5281 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5282 5283 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5284 MachineFunction *MF = BB->getParent(); 5285 MachineFunction::iterator It = BB; 5286 ++It; 5287 5288 unsigned dest = MI->getOperand(0).getReg(); 5289 unsigned ptr = MI->getOperand(1).getReg(); 5290 unsigned incr = MI->getOperand(2).getReg(); 5291 unsigned oldval = dest; 5292 DebugLoc dl = MI->getDebugLoc(); 5293 bool isThumb2 = Subtarget->isThumb2(); 5294 5295 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5296 if (isThumb2) { 5297 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5298 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5299 } 5300 5301 unsigned ldrOpc, strOpc, extendOpc; 5302 switch (Size) { 5303 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5304 case 1: 5305 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5306 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5307 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 5308 break; 5309 case 2: 5310 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5311 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5312 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 5313 break; 5314 case 4: 5315 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5316 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5317 extendOpc = 0; 5318 break; 5319 } 5320 5321 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5322 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5323 MF->insert(It, loopMBB); 5324 MF->insert(It, exitMBB); 5325 5326 // Transfer the remainder of BB and its successor edges to exitMBB. 5327 exitMBB->splice(exitMBB->begin(), BB, 5328 llvm::next(MachineBasicBlock::iterator(MI)), 5329 BB->end()); 5330 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5331 5332 TargetRegisterClass *TRC = 5333 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5334 unsigned scratch = MRI.createVirtualRegister(TRC); 5335 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5336 5337 // thisMBB: 5338 // ... 5339 // fallthrough --> loopMBB 5340 BB->addSuccessor(loopMBB); 5341 5342 // loopMBB: 5343 // ldrex dest, ptr 5344 // (sign extend dest, if required) 5345 // cmp dest, incr 5346 // cmov.cond scratch2, dest, incr 5347 // strex scratch, scratch2, ptr 5348 // cmp scratch, #0 5349 // bne- loopMBB 5350 // fallthrough --> exitMBB 5351 BB = loopMBB; 5352 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5353 if (ldrOpc == ARM::t2LDREX) 5354 MIB.addImm(0); 5355 AddDefaultPred(MIB); 5356 5357 // Sign extend the value, if necessary. 5358 if (signExtend && extendOpc) { 5359 oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass); 5360 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 5361 .addReg(dest) 5362 .addImm(0)); 5363 } 5364 5365 // Build compare and cmov instructions. 5366 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5367 .addReg(oldval).addReg(incr)); 5368 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5369 .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); 5370 5371 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5372 if (strOpc == ARM::t2STREX) 5373 MIB.addImm(0); 5374 AddDefaultPred(MIB); 5375 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5376 .addReg(scratch).addImm(0)); 5377 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5378 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5379 5380 BB->addSuccessor(loopMBB); 5381 BB->addSuccessor(exitMBB); 5382 5383 // exitMBB: 5384 // ... 5385 BB = exitMBB; 5386 5387 MI->eraseFromParent(); // The instruction is gone now. 5388 5389 return BB; 5390} 5391 5392MachineBasicBlock * 5393ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 5394 unsigned Op1, unsigned Op2, 5395 bool NeedsCarry, bool IsCmpxchg) const { 5396 // This also handles ATOMIC_SWAP, indicated by Op1==0. 5397 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5398 5399 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5400 MachineFunction *MF = BB->getParent(); 5401 MachineFunction::iterator It = BB; 5402 ++It; 5403 5404 unsigned destlo = MI->getOperand(0).getReg(); 5405 unsigned desthi = MI->getOperand(1).getReg(); 5406 unsigned ptr = MI->getOperand(2).getReg(); 5407 unsigned vallo = MI->getOperand(3).getReg(); 5408 unsigned valhi = MI->getOperand(4).getReg(); 5409 DebugLoc dl = MI->getDebugLoc(); 5410 bool isThumb2 = Subtarget->isThumb2(); 5411 5412 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5413 if (isThumb2) { 5414 MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass); 5415 MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass); 5416 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5417 } 5418 5419 unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; 5420 unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD; 5421 5422 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5423 MachineBasicBlock *contBB = 0, *cont2BB = 0; 5424 if (IsCmpxchg) { 5425 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 5426 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 5427 } 5428 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5429 MF->insert(It, loopMBB); 5430 if (IsCmpxchg) { 5431 MF->insert(It, contBB); 5432 MF->insert(It, cont2BB); 5433 } 5434 MF->insert(It, exitMBB); 5435 5436 // Transfer the remainder of BB and its successor edges to exitMBB. 5437 exitMBB->splice(exitMBB->begin(), BB, 5438 llvm::next(MachineBasicBlock::iterator(MI)), 5439 BB->end()); 5440 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5441 5442 TargetRegisterClass *TRC = 5443 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5444 unsigned storesuccess = MRI.createVirtualRegister(TRC); 5445 5446 // thisMBB: 5447 // ... 5448 // fallthrough --> loopMBB 5449 BB->addSuccessor(loopMBB); 5450 5451 // loopMBB: 5452 // ldrexd r2, r3, ptr 5453 // <binopa> r0, r2, incr 5454 // <binopb> r1, r3, incr 5455 // strexd storesuccess, r0, r1, ptr 5456 // cmp storesuccess, #0 5457 // bne- loopMBB 5458 // fallthrough --> exitMBB 5459 // 5460 // Note that the registers are explicitly specified because there is not any 5461 // way to force the register allocator to allocate a register pair. 5462 // 5463 // FIXME: The hardcoded registers are not necessary for Thumb2, but we 5464 // need to properly enforce the restriction that the two output registers 5465 // for ldrexd must be different. 5466 BB = loopMBB; 5467 // Load 5468 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 5469 .addReg(ARM::R2, RegState::Define) 5470 .addReg(ARM::R3, RegState::Define).addReg(ptr)); 5471 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 5472 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2); 5473 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3); 5474 5475 if (IsCmpxchg) { 5476 // Add early exit 5477 for (unsigned i = 0; i < 2; i++) { 5478 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 5479 ARM::CMPrr)) 5480 .addReg(i == 0 ? destlo : desthi) 5481 .addReg(i == 0 ? vallo : valhi)); 5482 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5483 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5484 BB->addSuccessor(exitMBB); 5485 BB->addSuccessor(i == 0 ? contBB : cont2BB); 5486 BB = (i == 0 ? contBB : cont2BB); 5487 } 5488 5489 // Copy to physregs for strexd 5490 unsigned setlo = MI->getOperand(5).getReg(); 5491 unsigned sethi = MI->getOperand(6).getReg(); 5492 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo); 5493 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi); 5494 } else if (Op1) { 5495 // Perform binary operation 5496 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0) 5497 .addReg(destlo).addReg(vallo)) 5498 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 5499 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1) 5500 .addReg(desthi).addReg(valhi)).addReg(0); 5501 } else { 5502 // Copy to physregs for strexd 5503 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo); 5504 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi); 5505 } 5506 5507 // Store 5508 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 5509 .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr)); 5510 // Cmp+jump 5511 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5512 .addReg(storesuccess).addImm(0)); 5513 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5514 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5515 5516 BB->addSuccessor(loopMBB); 5517 BB->addSuccessor(exitMBB); 5518 5519 // exitMBB: 5520 // ... 5521 BB = exitMBB; 5522 5523 MI->eraseFromParent(); // The instruction is gone now. 5524 5525 return BB; 5526} 5527 5528/// EmitBasePointerRecalculation - For functions using a base pointer, we 5529/// rematerialize it (via the frame pointer). 5530void ARMTargetLowering:: 5531EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB, 5532 MachineBasicBlock *DispatchBB) const { 5533 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5534 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 5535 MachineFunction &MF = *MI->getParent()->getParent(); 5536 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 5537 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 5538 5539 if (!RI.hasBasePointer(MF)) return; 5540 5541 MachineBasicBlock::iterator MBBI = MI; 5542 5543 int32_t NumBytes = AFI->getFramePtrSpillOffset(); 5544 unsigned FramePtr = RI.getFrameRegister(MF); 5545 assert(MF.getTarget().getFrameLowering()->hasFP(MF) && 5546 "Base pointer without frame pointer?"); 5547 5548 if (AFI->isThumb2Function()) 5549 llvm::emitT2RegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5550 FramePtr, -NumBytes, ARMCC::AL, 0, *AII); 5551 else if (AFI->isThumbFunction()) 5552 llvm::emitThumbRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5553 FramePtr, -NumBytes, *AII, RI); 5554 else 5555 llvm::emitARMRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5556 FramePtr, -NumBytes, ARMCC::AL, 0, *AII); 5557 5558 if (!RI.needsStackRealignment(MF)) return; 5559 5560 // If there's dynamic realignment, adjust for it. 5561 MachineFrameInfo *MFI = MF.getFrameInfo(); 5562 unsigned MaxAlign = MFI->getMaxAlignment(); 5563 assert(!AFI->isThumb1OnlyFunction()); 5564 5565 // Emit bic r6, r6, MaxAlign 5566 unsigned bicOpc = AFI->isThumbFunction() ? ARM::t2BICri : ARM::BICri; 5567 AddDefaultCC( 5568 AddDefaultPred( 5569 BuildMI(*MBB, MBBI, MI->getDebugLoc(), TII->get(bicOpc), ARM::R6) 5570 .addReg(ARM::R6, RegState::Kill) 5571 .addImm(MaxAlign - 1))); 5572} 5573 5574/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 5575/// registers the function context. 5576void ARMTargetLowering:: 5577SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 5578 MachineBasicBlock *DispatchBB, int FI) const { 5579 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5580 DebugLoc dl = MI->getDebugLoc(); 5581 MachineFunction *MF = MBB->getParent(); 5582 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5583 MachineConstantPool *MCP = MF->getConstantPool(); 5584 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5585 const Function *F = MF->getFunction(); 5586 5587 bool isThumb = Subtarget->isThumb(); 5588 bool isThumb2 = Subtarget->isThumb2(); 5589 5590 unsigned PCLabelId = AFI->createPICLabelUId(); 5591 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 5592 ARMConstantPoolValue *CPV = 5593 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 5594 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 5595 5596 const TargetRegisterClass *TRC = 5597 isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5598 5599 // Grab constant pool and fixed stack memory operands. 5600 MachineMemOperand *CPMMO = 5601 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 5602 MachineMemOperand::MOLoad, 4, 4); 5603 5604 MachineMemOperand *FIMMOSt = 5605 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5606 MachineMemOperand::MOStore, 4, 4); 5607 5608 EmitBasePointerRecalculation(MI, MBB, DispatchBB); 5609 5610 // Load the address of the dispatch MBB into the jump buffer. 5611 if (isThumb2) { 5612 // Incoming value: jbuf 5613 // ldr.n r5, LCPI1_1 5614 // orr r5, r5, #1 5615 // add r5, pc 5616 // str r5, [$jbuf, #+4] ; &jbuf[1] 5617 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5618 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 5619 .addConstantPoolIndex(CPI) 5620 .addMemOperand(CPMMO)); 5621 // Set the low bit because of thumb mode. 5622 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5623 AddDefaultCC( 5624 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 5625 .addReg(NewVReg1, RegState::Kill) 5626 .addImm(0x01))); 5627 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5628 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 5629 .addReg(NewVReg2, RegState::Kill) 5630 .addImm(PCLabelId); 5631 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 5632 .addReg(NewVReg3, RegState::Kill) 5633 .addFrameIndex(FI) 5634 .addImm(36) // &jbuf[1] :: pc 5635 .addMemOperand(FIMMOSt)); 5636 } else if (isThumb) { 5637 // Incoming value: jbuf 5638 // ldr.n r1, LCPI1_4 5639 // add r1, pc 5640 // mov r2, #1 5641 // orrs r1, r2 5642 // add r2, $jbuf, #+4 ; &jbuf[1] 5643 // str r1, [r2] 5644 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5645 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 5646 .addConstantPoolIndex(CPI) 5647 .addMemOperand(CPMMO)); 5648 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5649 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 5650 .addReg(NewVReg1, RegState::Kill) 5651 .addImm(PCLabelId); 5652 // Set the low bit because of thumb mode. 5653 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5654 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 5655 .addReg(ARM::CPSR, RegState::Define) 5656 .addImm(1)); 5657 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5658 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 5659 .addReg(ARM::CPSR, RegState::Define) 5660 .addReg(NewVReg2, RegState::Kill) 5661 .addReg(NewVReg3, RegState::Kill)); 5662 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5663 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 5664 .addFrameIndex(FI) 5665 .addImm(36)); // &jbuf[1] :: pc 5666 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 5667 .addReg(NewVReg4, RegState::Kill) 5668 .addReg(NewVReg5, RegState::Kill) 5669 .addImm(0) 5670 .addMemOperand(FIMMOSt)); 5671 } else { 5672 // Incoming value: jbuf 5673 // ldr r1, LCPI1_1 5674 // add r1, pc, r1 5675 // str r1, [$jbuf, #+4] ; &jbuf[1] 5676 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5677 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 5678 .addConstantPoolIndex(CPI) 5679 .addImm(0) 5680 .addMemOperand(CPMMO)); 5681 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5682 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 5683 .addReg(NewVReg1, RegState::Kill) 5684 .addImm(PCLabelId)); 5685 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 5686 .addReg(NewVReg2, RegState::Kill) 5687 .addFrameIndex(FI) 5688 .addImm(36) // &jbuf[1] :: pc 5689 .addMemOperand(FIMMOSt)); 5690 } 5691} 5692 5693MachineBasicBlock *ARMTargetLowering:: 5694EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 5695 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5696 DebugLoc dl = MI->getDebugLoc(); 5697 MachineFunction *MF = MBB->getParent(); 5698 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5699 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5700 MachineFrameInfo *MFI = MF->getFrameInfo(); 5701 int FI = MFI->getFunctionContextIndex(); 5702 5703 const TargetRegisterClass *TRC = 5704 Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5705 5706 // Get a mapping of the call site numbers to all of the landing pads they're 5707 // associated with. 5708 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 5709 unsigned MaxCSNum = 0; 5710 MachineModuleInfo &MMI = MF->getMMI(); 5711 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; ++BB) { 5712 if (!BB->isLandingPad()) continue; 5713 5714 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 5715 // pad. 5716 for (MachineBasicBlock::iterator 5717 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 5718 if (!II->isEHLabel()) continue; 5719 5720 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 5721 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 5722 5723 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 5724 for (SmallVectorImpl<unsigned>::iterator 5725 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 5726 CSI != CSE; ++CSI) { 5727 CallSiteNumToLPad[*CSI].push_back(BB); 5728 MaxCSNum = std::max(MaxCSNum, *CSI); 5729 } 5730 break; 5731 } 5732 } 5733 5734 // Get an ordered list of the machine basic blocks for the jump table. 5735 std::vector<MachineBasicBlock*> LPadList; 5736 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 5737 LPadList.reserve(CallSiteNumToLPad.size()); 5738 for (unsigned I = 1; I <= MaxCSNum; ++I) { 5739 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 5740 for (SmallVectorImpl<MachineBasicBlock*>::iterator 5741 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 5742 LPadList.push_back(*II); 5743 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 5744 } 5745 } 5746 5747 assert(!LPadList.empty() && 5748 "No landing pad destinations for the dispatch jump table!"); 5749 5750 // Create the jump table and associated information. 5751 MachineJumpTableInfo *JTI = 5752 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 5753 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 5754 unsigned UId = AFI->createJumpTableUId(); 5755 5756 // Create the MBBs for the dispatch code. 5757 5758 // Shove the dispatch's address into the return slot in the function context. 5759 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 5760 DispatchBB->setIsLandingPad(); 5761 5762 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 5763 BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); 5764 DispatchBB->addSuccessor(TrapBB); 5765 5766 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 5767 DispatchBB->addSuccessor(DispContBB); 5768 5769 // Insert and MBBs. 5770 MF->insert(MF->end(), DispatchBB); 5771 MF->insert(MF->end(), DispContBB); 5772 MF->insert(MF->end(), TrapBB); 5773 5774 // Insert code into the entry block that creates and registers the function 5775 // context. 5776 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 5777 5778 MachineMemOperand *FIMMOLd = 5779 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5780 MachineMemOperand::MOLoad | 5781 MachineMemOperand::MOVolatile, 4, 4); 5782 5783 unsigned NumLPads = LPadList.size(); 5784 if (Subtarget->isThumb2()) { 5785 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5786 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 5787 .addFrameIndex(FI) 5788 .addImm(4) 5789 .addMemOperand(FIMMOLd)); 5790 5791 if (NumLPads < 256) { 5792 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 5793 .addReg(NewVReg1) 5794 .addImm(LPadList.size())); 5795 } else { 5796 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5797 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 5798 .addImm(NumLPads & 0xFFFF)); 5799 5800 unsigned VReg2 = VReg1; 5801 if ((NumLPads & 0xFFFF0000) != 0) { 5802 VReg2 = MRI->createVirtualRegister(TRC); 5803 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 5804 .addReg(VReg1) 5805 .addImm(NumLPads >> 16)); 5806 } 5807 5808 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 5809 .addReg(NewVReg1) 5810 .addReg(VReg2)); 5811 } 5812 5813 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 5814 .addMBB(TrapBB) 5815 .addImm(ARMCC::HI) 5816 .addReg(ARM::CPSR); 5817 5818 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5819 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 5820 .addJumpTableIndex(MJTI) 5821 .addImm(UId)); 5822 5823 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5824 AddDefaultCC( 5825 AddDefaultPred( 5826 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 5827 .addReg(NewVReg3, RegState::Kill) 5828 .addReg(NewVReg1) 5829 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 5830 5831 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 5832 .addReg(NewVReg4, RegState::Kill) 5833 .addReg(NewVReg1) 5834 .addJumpTableIndex(MJTI) 5835 .addImm(UId); 5836 } else if (Subtarget->isThumb()) { 5837 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5838 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 5839 .addFrameIndex(FI) 5840 .addImm(1) 5841 .addMemOperand(FIMMOLd)); 5842 5843 if (NumLPads < 256) { 5844 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 5845 .addReg(NewVReg1) 5846 .addImm(NumLPads)); 5847 } else { 5848 MachineConstantPool *ConstantPool = MF->getConstantPool(); 5849 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 5850 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 5851 5852 // MachineConstantPool wants an explicit alignment. 5853 unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); 5854 if (Align == 0) 5855 Align = getTargetData()->getTypeAllocSize(C->getType()); 5856 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 5857 5858 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5859 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 5860 .addReg(VReg1, RegState::Define) 5861 .addConstantPoolIndex(Idx)); 5862 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 5863 .addReg(NewVReg1) 5864 .addReg(VReg1)); 5865 } 5866 5867 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 5868 .addMBB(TrapBB) 5869 .addImm(ARMCC::HI) 5870 .addReg(ARM::CPSR); 5871 5872 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5873 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 5874 .addReg(ARM::CPSR, RegState::Define) 5875 .addReg(NewVReg1) 5876 .addImm(2)); 5877 5878 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5879 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 5880 .addJumpTableIndex(MJTI) 5881 .addImm(UId)); 5882 5883 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5884 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 5885 .addReg(ARM::CPSR, RegState::Define) 5886 .addReg(NewVReg2, RegState::Kill) 5887 .addReg(NewVReg3)); 5888 5889 MachineMemOperand *JTMMOLd = 5890 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 5891 MachineMemOperand::MOLoad, 4, 4); 5892 5893 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5894 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 5895 .addReg(NewVReg4, RegState::Kill) 5896 .addImm(0) 5897 .addMemOperand(JTMMOLd)); 5898 5899 unsigned NewVReg6 = MRI->createVirtualRegister(TRC); 5900 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 5901 .addReg(ARM::CPSR, RegState::Define) 5902 .addReg(NewVReg5, RegState::Kill) 5903 .addReg(NewVReg3)); 5904 5905 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 5906 .addReg(NewVReg6, RegState::Kill) 5907 .addJumpTableIndex(MJTI) 5908 .addImm(UId); 5909 } else { 5910 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5911 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 5912 .addFrameIndex(FI) 5913 .addImm(4) 5914 .addMemOperand(FIMMOLd)); 5915 5916 if (NumLPads < 256) { 5917 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 5918 .addReg(NewVReg1) 5919 .addImm(NumLPads)); 5920 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 5921 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5922 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 5923 .addImm(NumLPads & 0xFFFF)); 5924 5925 unsigned VReg2 = VReg1; 5926 if ((NumLPads & 0xFFFF0000) != 0) { 5927 VReg2 = MRI->createVirtualRegister(TRC); 5928 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 5929 .addReg(VReg1) 5930 .addImm(NumLPads >> 16)); 5931 } 5932 5933 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 5934 .addReg(NewVReg1) 5935 .addReg(VReg2)); 5936 } else { 5937 MachineConstantPool *ConstantPool = MF->getConstantPool(); 5938 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 5939 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 5940 5941 // MachineConstantPool wants an explicit alignment. 5942 unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); 5943 if (Align == 0) 5944 Align = getTargetData()->getTypeAllocSize(C->getType()); 5945 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 5946 5947 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5948 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 5949 .addReg(VReg1, RegState::Define) 5950 .addConstantPoolIndex(Idx) 5951 .addImm(0)); 5952 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 5953 .addReg(NewVReg1) 5954 .addReg(VReg1, RegState::Kill)); 5955 } 5956 5957 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 5958 .addMBB(TrapBB) 5959 .addImm(ARMCC::HI) 5960 .addReg(ARM::CPSR); 5961 5962 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5963 AddDefaultCC( 5964 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 5965 .addReg(NewVReg1) 5966 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 5967 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5968 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 5969 .addJumpTableIndex(MJTI) 5970 .addImm(UId)); 5971 5972 MachineMemOperand *JTMMOLd = 5973 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 5974 MachineMemOperand::MOLoad, 4, 4); 5975 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5976 AddDefaultPred( 5977 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 5978 .addReg(NewVReg3, RegState::Kill) 5979 .addReg(NewVReg4) 5980 .addImm(0) 5981 .addMemOperand(JTMMOLd)); 5982 5983 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 5984 .addReg(NewVReg5, RegState::Kill) 5985 .addReg(NewVReg4) 5986 .addJumpTableIndex(MJTI) 5987 .addImm(UId); 5988 } 5989 5990 // Add the jump table entries as successors to the MBB. 5991 MachineBasicBlock *PrevMBB = 0; 5992 for (std::vector<MachineBasicBlock*>::iterator 5993 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 5994 MachineBasicBlock *CurMBB = *I; 5995 if (PrevMBB != CurMBB) 5996 DispContBB->addSuccessor(CurMBB); 5997 PrevMBB = CurMBB; 5998 } 5999 6000 // N.B. the order the invoke BBs are processed in doesn't matter here. 6001 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 6002 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 6003 const unsigned *SavedRegs = RI.getCalleeSavedRegs(MF); 6004 SmallVector<MachineBasicBlock*, 64> MBBLPads; 6005 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 6006 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 6007 MachineBasicBlock *BB = *I; 6008 6009 // Remove the landing pad successor from the invoke block and replace it 6010 // with the new dispatch block. 6011 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 6012 BB->succ_end()); 6013 while (!Successors.empty()) { 6014 MachineBasicBlock *SMBB = Successors.pop_back_val(); 6015 if (SMBB->isLandingPad()) { 6016 BB->removeSuccessor(SMBB); 6017 MBBLPads.push_back(SMBB); 6018 } 6019 } 6020 6021 BB->addSuccessor(DispatchBB); 6022 6023 // Find the invoke call and mark all of the callee-saved registers as 6024 // 'implicit defined' so that they're spilled. This prevents code from 6025 // moving instructions to before the EH block, where they will never be 6026 // executed. 6027 for (MachineBasicBlock::reverse_iterator 6028 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 6029 if (!II->getDesc().isCall()) continue; 6030 6031 DenseMap<unsigned, bool> DefRegs; 6032 for (MachineInstr::mop_iterator 6033 OI = II->operands_begin(), OE = II->operands_end(); 6034 OI != OE; ++OI) { 6035 if (!OI->isReg()) continue; 6036 DefRegs[OI->getReg()] = true; 6037 } 6038 6039 MachineInstrBuilder MIB(&*II); 6040 6041 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 6042 unsigned Reg = SavedRegs[i]; 6043 if (Subtarget->isThumb2() && 6044 !ARM::tGPRRegisterClass->contains(Reg) && 6045 !ARM::hGPRRegisterClass->contains(Reg)) 6046 continue; 6047 else if (Subtarget->isThumb1Only() && 6048 !ARM::tGPRRegisterClass->contains(Reg)) 6049 continue; 6050 else if (!Subtarget->isThumb() && 6051 !ARM::GPRRegisterClass->contains(Reg)) 6052 continue; 6053 if (!DefRegs[Reg]) 6054 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 6055 } 6056 6057 break; 6058 } 6059 } 6060 6061 // Mark all former landing pads as non-landing pads. The dispatch is the only 6062 // landing pad now. 6063 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6064 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 6065 (*I)->setIsLandingPad(false); 6066 6067 // The instruction is gone now. 6068 MI->eraseFromParent(); 6069 6070 return MBB; 6071} 6072 6073static 6074MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 6075 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 6076 E = MBB->succ_end(); I != E; ++I) 6077 if (*I != Succ) 6078 return *I; 6079 llvm_unreachable("Expecting a BB with two successors!"); 6080} 6081 6082MachineBasicBlock * 6083ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6084 MachineBasicBlock *BB) const { 6085 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6086 DebugLoc dl = MI->getDebugLoc(); 6087 bool isThumb2 = Subtarget->isThumb2(); 6088 switch (MI->getOpcode()) { 6089 default: { 6090 MI->dump(); 6091 llvm_unreachable("Unexpected instr type to insert"); 6092 } 6093 // The Thumb2 pre-indexed stores have the same MI operands, they just 6094 // define them differently in the .td files from the isel patterns, so 6095 // they need pseudos. 6096 case ARM::t2STR_preidx: 6097 MI->setDesc(TII->get(ARM::t2STR_PRE)); 6098 return BB; 6099 case ARM::t2STRB_preidx: 6100 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 6101 return BB; 6102 case ARM::t2STRH_preidx: 6103 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 6104 return BB; 6105 6106 case ARM::STRi_preidx: 6107 case ARM::STRBi_preidx: { 6108 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 6109 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 6110 // Decode the offset. 6111 unsigned Offset = MI->getOperand(4).getImm(); 6112 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 6113 Offset = ARM_AM::getAM2Offset(Offset); 6114 if (isSub) 6115 Offset = -Offset; 6116 6117 MachineMemOperand *MMO = *MI->memoperands_begin(); 6118 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 6119 .addOperand(MI->getOperand(0)) // Rn_wb 6120 .addOperand(MI->getOperand(1)) // Rt 6121 .addOperand(MI->getOperand(2)) // Rn 6122 .addImm(Offset) // offset (skip GPR==zero_reg) 6123 .addOperand(MI->getOperand(5)) // pred 6124 .addOperand(MI->getOperand(6)) 6125 .addMemOperand(MMO); 6126 MI->eraseFromParent(); 6127 return BB; 6128 } 6129 case ARM::STRr_preidx: 6130 case ARM::STRBr_preidx: 6131 case ARM::STRH_preidx: { 6132 unsigned NewOpc; 6133 switch (MI->getOpcode()) { 6134 default: llvm_unreachable("unexpected opcode!"); 6135 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 6136 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 6137 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 6138 } 6139 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 6140 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 6141 MIB.addOperand(MI->getOperand(i)); 6142 MI->eraseFromParent(); 6143 return BB; 6144 } 6145 case ARM::ATOMIC_LOAD_ADD_I8: 6146 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6147 case ARM::ATOMIC_LOAD_ADD_I16: 6148 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6149 case ARM::ATOMIC_LOAD_ADD_I32: 6150 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6151 6152 case ARM::ATOMIC_LOAD_AND_I8: 6153 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6154 case ARM::ATOMIC_LOAD_AND_I16: 6155 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6156 case ARM::ATOMIC_LOAD_AND_I32: 6157 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6158 6159 case ARM::ATOMIC_LOAD_OR_I8: 6160 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6161 case ARM::ATOMIC_LOAD_OR_I16: 6162 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6163 case ARM::ATOMIC_LOAD_OR_I32: 6164 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6165 6166 case ARM::ATOMIC_LOAD_XOR_I8: 6167 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6168 case ARM::ATOMIC_LOAD_XOR_I16: 6169 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6170 case ARM::ATOMIC_LOAD_XOR_I32: 6171 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6172 6173 case ARM::ATOMIC_LOAD_NAND_I8: 6174 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6175 case ARM::ATOMIC_LOAD_NAND_I16: 6176 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6177 case ARM::ATOMIC_LOAD_NAND_I32: 6178 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6179 6180 case ARM::ATOMIC_LOAD_SUB_I8: 6181 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6182 case ARM::ATOMIC_LOAD_SUB_I16: 6183 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6184 case ARM::ATOMIC_LOAD_SUB_I32: 6185 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6186 6187 case ARM::ATOMIC_LOAD_MIN_I8: 6188 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 6189 case ARM::ATOMIC_LOAD_MIN_I16: 6190 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 6191 case ARM::ATOMIC_LOAD_MIN_I32: 6192 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 6193 6194 case ARM::ATOMIC_LOAD_MAX_I8: 6195 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 6196 case ARM::ATOMIC_LOAD_MAX_I16: 6197 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 6198 case ARM::ATOMIC_LOAD_MAX_I32: 6199 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 6200 6201 case ARM::ATOMIC_LOAD_UMIN_I8: 6202 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 6203 case ARM::ATOMIC_LOAD_UMIN_I16: 6204 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 6205 case ARM::ATOMIC_LOAD_UMIN_I32: 6206 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 6207 6208 case ARM::ATOMIC_LOAD_UMAX_I8: 6209 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 6210 case ARM::ATOMIC_LOAD_UMAX_I16: 6211 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 6212 case ARM::ATOMIC_LOAD_UMAX_I32: 6213 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 6214 6215 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 6216 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 6217 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 6218 6219 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 6220 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 6221 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 6222 6223 6224 case ARM::ATOMADD6432: 6225 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 6226 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 6227 /*NeedsCarry*/ true); 6228 case ARM::ATOMSUB6432: 6229 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6230 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6231 /*NeedsCarry*/ true); 6232 case ARM::ATOMOR6432: 6233 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 6234 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6235 case ARM::ATOMXOR6432: 6236 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 6237 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6238 case ARM::ATOMAND6432: 6239 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 6240 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6241 case ARM::ATOMSWAP6432: 6242 return EmitAtomicBinary64(MI, BB, 0, 0, false); 6243 case ARM::ATOMCMPXCHG6432: 6244 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6245 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6246 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 6247 6248 case ARM::tMOVCCr_pseudo: { 6249 // To "insert" a SELECT_CC instruction, we actually have to insert the 6250 // diamond control-flow pattern. The incoming instruction knows the 6251 // destination vreg to set, the condition code register to branch on, the 6252 // true/false values to select between, and a branch opcode to use. 6253 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6254 MachineFunction::iterator It = BB; 6255 ++It; 6256 6257 // thisMBB: 6258 // ... 6259 // TrueVal = ... 6260 // cmpTY ccX, r1, r2 6261 // bCC copy1MBB 6262 // fallthrough --> copy0MBB 6263 MachineBasicBlock *thisMBB = BB; 6264 MachineFunction *F = BB->getParent(); 6265 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6266 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6267 F->insert(It, copy0MBB); 6268 F->insert(It, sinkMBB); 6269 6270 // Transfer the remainder of BB and its successor edges to sinkMBB. 6271 sinkMBB->splice(sinkMBB->begin(), BB, 6272 llvm::next(MachineBasicBlock::iterator(MI)), 6273 BB->end()); 6274 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6275 6276 BB->addSuccessor(copy0MBB); 6277 BB->addSuccessor(sinkMBB); 6278 6279 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 6280 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 6281 6282 // copy0MBB: 6283 // %FalseValue = ... 6284 // # fallthrough to sinkMBB 6285 BB = copy0MBB; 6286 6287 // Update machine-CFG edges 6288 BB->addSuccessor(sinkMBB); 6289 6290 // sinkMBB: 6291 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6292 // ... 6293 BB = sinkMBB; 6294 BuildMI(*BB, BB->begin(), dl, 6295 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 6296 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 6297 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6298 6299 MI->eraseFromParent(); // The pseudo instruction is gone now. 6300 return BB; 6301 } 6302 6303 case ARM::BCCi64: 6304 case ARM::BCCZi64: { 6305 // If there is an unconditional branch to the other successor, remove it. 6306 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 6307 6308 // Compare both parts that make up the double comparison separately for 6309 // equality. 6310 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 6311 6312 unsigned LHS1 = MI->getOperand(1).getReg(); 6313 unsigned LHS2 = MI->getOperand(2).getReg(); 6314 if (RHSisZero) { 6315 AddDefaultPred(BuildMI(BB, dl, 6316 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6317 .addReg(LHS1).addImm(0)); 6318 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6319 .addReg(LHS2).addImm(0) 6320 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6321 } else { 6322 unsigned RHS1 = MI->getOperand(3).getReg(); 6323 unsigned RHS2 = MI->getOperand(4).getReg(); 6324 AddDefaultPred(BuildMI(BB, dl, 6325 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6326 .addReg(LHS1).addReg(RHS1)); 6327 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6328 .addReg(LHS2).addReg(RHS2) 6329 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6330 } 6331 6332 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 6333 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 6334 if (MI->getOperand(0).getImm() == ARMCC::NE) 6335 std::swap(destMBB, exitMBB); 6336 6337 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6338 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 6339 if (isThumb2) 6340 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 6341 else 6342 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 6343 6344 MI->eraseFromParent(); // The pseudo instruction is gone now. 6345 return BB; 6346 } 6347 6348 case ARM::Int_eh_sjlj_setjmp: 6349 case ARM::Int_eh_sjlj_setjmp_nofp: 6350 case ARM::tInt_eh_sjlj_setjmp: 6351 case ARM::t2Int_eh_sjlj_setjmp: 6352 case ARM::t2Int_eh_sjlj_setjmp_nofp: 6353 EmitSjLjDispatchBlock(MI, BB); 6354 return BB; 6355 6356 case ARM::ABS: 6357 case ARM::t2ABS: { 6358 // To insert an ABS instruction, we have to insert the 6359 // diamond control-flow pattern. The incoming instruction knows the 6360 // source vreg to test against 0, the destination vreg to set, 6361 // the condition code register to branch on, the 6362 // true/false values to select between, and a branch opcode to use. 6363 // It transforms 6364 // V1 = ABS V0 6365 // into 6366 // V2 = MOVS V0 6367 // BCC (branch to SinkBB if V0 >= 0) 6368 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 6369 // SinkBB: V1 = PHI(V2, V3) 6370 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6371 MachineFunction::iterator BBI = BB; 6372 ++BBI; 6373 MachineFunction *Fn = BB->getParent(); 6374 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6375 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6376 Fn->insert(BBI, RSBBB); 6377 Fn->insert(BBI, SinkBB); 6378 6379 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 6380 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 6381 bool isThumb2 = Subtarget->isThumb2(); 6382 MachineRegisterInfo &MRI = Fn->getRegInfo(); 6383 // In Thumb mode S must not be specified if source register is the SP or 6384 // PC and if destination register is the SP, so restrict register class 6385 unsigned NewMovDstReg = MRI.createVirtualRegister( 6386 isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); 6387 unsigned NewRsbDstReg = MRI.createVirtualRegister( 6388 isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); 6389 6390 // Transfer the remainder of BB and its successor edges to sinkMBB. 6391 SinkBB->splice(SinkBB->begin(), BB, 6392 llvm::next(MachineBasicBlock::iterator(MI)), 6393 BB->end()); 6394 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 6395 6396 BB->addSuccessor(RSBBB); 6397 BB->addSuccessor(SinkBB); 6398 6399 // fall through to SinkMBB 6400 RSBBB->addSuccessor(SinkBB); 6401 6402 // insert a movs at the end of BB 6403 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVr : ARM::MOVr), 6404 NewMovDstReg) 6405 .addReg(ABSSrcReg, RegState::Kill) 6406 .addImm((unsigned)ARMCC::AL).addReg(0) 6407 .addReg(ARM::CPSR, RegState::Define); 6408 6409 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 6410 BuildMI(BB, dl, 6411 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 6412 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 6413 6414 // insert rsbri in RSBBB 6415 // Note: BCC and rsbri will be converted into predicated rsbmi 6416 // by if-conversion pass 6417 BuildMI(*RSBBB, RSBBB->begin(), dl, 6418 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 6419 .addReg(NewMovDstReg, RegState::Kill) 6420 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 6421 6422 // insert PHI in SinkBB, 6423 // reuse ABSDstReg to not change uses of ABS instruction 6424 BuildMI(*SinkBB, SinkBB->begin(), dl, 6425 TII->get(ARM::PHI), ABSDstReg) 6426 .addReg(NewRsbDstReg).addMBB(RSBBB) 6427 .addReg(NewMovDstReg).addMBB(BB); 6428 6429 // remove ABS instruction 6430 MI->eraseFromParent(); 6431 6432 // return last added BB 6433 return SinkBB; 6434 } 6435 } 6436} 6437 6438void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 6439 SDNode *Node) const { 6440 const MCInstrDesc *MCID = &MI->getDesc(); 6441 if (!MCID->hasPostISelHook()) { 6442 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 6443 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 6444 return; 6445 } 6446 6447 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 6448 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 6449 // operand is still set to noreg. If needed, set the optional operand's 6450 // register to CPSR, and remove the redundant implicit def. 6451 // 6452 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 6453 6454 // Rename pseudo opcodes. 6455 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 6456 if (NewOpc) { 6457 const ARMBaseInstrInfo *TII = 6458 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 6459 MCID = &TII->get(NewOpc); 6460 6461 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 6462 "converted opcode should be the same except for cc_out"); 6463 6464 MI->setDesc(*MCID); 6465 6466 // Add the optional cc_out operand 6467 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 6468 } 6469 unsigned ccOutIdx = MCID->getNumOperands() - 1; 6470 6471 // Any ARM instruction that sets the 's' bit should specify an optional 6472 // "cc_out" operand in the last operand position. 6473 if (!MCID->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 6474 assert(!NewOpc && "Optional cc_out operand required"); 6475 return; 6476 } 6477 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 6478 // since we already have an optional CPSR def. 6479 bool definesCPSR = false; 6480 bool deadCPSR = false; 6481 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 6482 i != e; ++i) { 6483 const MachineOperand &MO = MI->getOperand(i); 6484 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 6485 definesCPSR = true; 6486 if (MO.isDead()) 6487 deadCPSR = true; 6488 MI->RemoveOperand(i); 6489 break; 6490 } 6491 } 6492 if (!definesCPSR) { 6493 assert(!NewOpc && "Optional cc_out operand required"); 6494 return; 6495 } 6496 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 6497 if (deadCPSR) { 6498 assert(!MI->getOperand(ccOutIdx).getReg() && 6499 "expect uninitialized optional cc_out operand"); 6500 return; 6501 } 6502 6503 // If this instruction was defined with an optional CPSR def and its dag node 6504 // had a live implicit CPSR def, then activate the optional CPSR def. 6505 MachineOperand &MO = MI->getOperand(ccOutIdx); 6506 MO.setReg(ARM::CPSR); 6507 MO.setIsDef(true); 6508} 6509 6510//===----------------------------------------------------------------------===// 6511// ARM Optimization Hooks 6512//===----------------------------------------------------------------------===// 6513 6514static 6515SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 6516 TargetLowering::DAGCombinerInfo &DCI) { 6517 SelectionDAG &DAG = DCI.DAG; 6518 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6519 EVT VT = N->getValueType(0); 6520 unsigned Opc = N->getOpcode(); 6521 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 6522 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 6523 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 6524 ISD::CondCode CC = ISD::SETCC_INVALID; 6525 6526 if (isSlctCC) { 6527 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 6528 } else { 6529 SDValue CCOp = Slct.getOperand(0); 6530 if (CCOp.getOpcode() == ISD::SETCC) 6531 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 6532 } 6533 6534 bool DoXform = false; 6535 bool InvCC = false; 6536 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 6537 "Bad input!"); 6538 6539 if (LHS.getOpcode() == ISD::Constant && 6540 cast<ConstantSDNode>(LHS)->isNullValue()) { 6541 DoXform = true; 6542 } else if (CC != ISD::SETCC_INVALID && 6543 RHS.getOpcode() == ISD::Constant && 6544 cast<ConstantSDNode>(RHS)->isNullValue()) { 6545 std::swap(LHS, RHS); 6546 SDValue Op0 = Slct.getOperand(0); 6547 EVT OpVT = isSlctCC ? Op0.getValueType() : 6548 Op0.getOperand(0).getValueType(); 6549 bool isInt = OpVT.isInteger(); 6550 CC = ISD::getSetCCInverse(CC, isInt); 6551 6552 if (!TLI.isCondCodeLegal(CC, OpVT)) 6553 return SDValue(); // Inverse operator isn't legal. 6554 6555 DoXform = true; 6556 InvCC = true; 6557 } 6558 6559 if (DoXform) { 6560 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 6561 if (isSlctCC) 6562 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 6563 Slct.getOperand(0), Slct.getOperand(1), CC); 6564 SDValue CCOp = Slct.getOperand(0); 6565 if (InvCC) 6566 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 6567 CCOp.getOperand(0), CCOp.getOperand(1), CC); 6568 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 6569 CCOp, OtherOp, Result); 6570 } 6571 return SDValue(); 6572} 6573 6574// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 6575// (only after legalization). 6576static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 6577 TargetLowering::DAGCombinerInfo &DCI, 6578 const ARMSubtarget *Subtarget) { 6579 6580 // Only perform optimization if after legalize, and if NEON is available. We 6581 // also expected both operands to be BUILD_VECTORs. 6582 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 6583 || N0.getOpcode() != ISD::BUILD_VECTOR 6584 || N1.getOpcode() != ISD::BUILD_VECTOR) 6585 return SDValue(); 6586 6587 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 6588 EVT VT = N->getValueType(0); 6589 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 6590 return SDValue(); 6591 6592 // Check that the vector operands are of the right form. 6593 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 6594 // operands, where N is the size of the formed vector. 6595 // Each EXTRACT_VECTOR should have the same input vector and odd or even 6596 // index such that we have a pair wise add pattern. 6597 6598 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 6599 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6600 return SDValue(); 6601 SDValue Vec = N0->getOperand(0)->getOperand(0); 6602 SDNode *V = Vec.getNode(); 6603 unsigned nextIndex = 0; 6604 6605 // For each operands to the ADD which are BUILD_VECTORs, 6606 // check to see if each of their operands are an EXTRACT_VECTOR with 6607 // the same vector and appropriate index. 6608 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 6609 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 6610 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 6611 6612 SDValue ExtVec0 = N0->getOperand(i); 6613 SDValue ExtVec1 = N1->getOperand(i); 6614 6615 // First operand is the vector, verify its the same. 6616 if (V != ExtVec0->getOperand(0).getNode() || 6617 V != ExtVec1->getOperand(0).getNode()) 6618 return SDValue(); 6619 6620 // Second is the constant, verify its correct. 6621 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 6622 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 6623 6624 // For the constant, we want to see all the even or all the odd. 6625 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 6626 || C1->getZExtValue() != nextIndex+1) 6627 return SDValue(); 6628 6629 // Increment index. 6630 nextIndex+=2; 6631 } else 6632 return SDValue(); 6633 } 6634 6635 // Create VPADDL node. 6636 SelectionDAG &DAG = DCI.DAG; 6637 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6638 6639 // Build operand list. 6640 SmallVector<SDValue, 8> Ops; 6641 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 6642 TLI.getPointerTy())); 6643 6644 // Input is the vector. 6645 Ops.push_back(Vec); 6646 6647 // Get widened type and narrowed type. 6648 MVT widenType; 6649 unsigned numElem = VT.getVectorNumElements(); 6650 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 6651 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 6652 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 6653 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 6654 default: 6655 assert(0 && "Invalid vector element type for padd optimization."); 6656 } 6657 6658 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 6659 widenType, &Ops[0], Ops.size()); 6660 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 6661} 6662 6663/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 6664/// operands N0 and N1. This is a helper for PerformADDCombine that is 6665/// called with the default operands, and if that fails, with commuted 6666/// operands. 6667static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 6668 TargetLowering::DAGCombinerInfo &DCI, 6669 const ARMSubtarget *Subtarget){ 6670 6671 // Attempt to create vpaddl for this add. 6672 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 6673 if (Result.getNode()) 6674 return Result; 6675 6676 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 6677 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 6678 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 6679 if (Result.getNode()) return Result; 6680 } 6681 return SDValue(); 6682} 6683 6684/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 6685/// 6686static SDValue PerformADDCombine(SDNode *N, 6687 TargetLowering::DAGCombinerInfo &DCI, 6688 const ARMSubtarget *Subtarget) { 6689 SDValue N0 = N->getOperand(0); 6690 SDValue N1 = N->getOperand(1); 6691 6692 // First try with the default operand order. 6693 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 6694 if (Result.getNode()) 6695 return Result; 6696 6697 // If that didn't work, try again with the operands commuted. 6698 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 6699} 6700 6701/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 6702/// 6703static SDValue PerformSUBCombine(SDNode *N, 6704 TargetLowering::DAGCombinerInfo &DCI) { 6705 SDValue N0 = N->getOperand(0); 6706 SDValue N1 = N->getOperand(1); 6707 6708 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 6709 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 6710 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 6711 if (Result.getNode()) return Result; 6712 } 6713 6714 return SDValue(); 6715} 6716 6717/// PerformVMULCombine 6718/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 6719/// special multiplier accumulator forwarding. 6720/// vmul d3, d0, d2 6721/// vmla d3, d1, d2 6722/// is faster than 6723/// vadd d3, d0, d1 6724/// vmul d3, d3, d2 6725static SDValue PerformVMULCombine(SDNode *N, 6726 TargetLowering::DAGCombinerInfo &DCI, 6727 const ARMSubtarget *Subtarget) { 6728 if (!Subtarget->hasVMLxForwarding()) 6729 return SDValue(); 6730 6731 SelectionDAG &DAG = DCI.DAG; 6732 SDValue N0 = N->getOperand(0); 6733 SDValue N1 = N->getOperand(1); 6734 unsigned Opcode = N0.getOpcode(); 6735 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6736 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 6737 Opcode = N1.getOpcode(); 6738 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6739 Opcode != ISD::FADD && Opcode != ISD::FSUB) 6740 return SDValue(); 6741 std::swap(N0, N1); 6742 } 6743 6744 EVT VT = N->getValueType(0); 6745 DebugLoc DL = N->getDebugLoc(); 6746 SDValue N00 = N0->getOperand(0); 6747 SDValue N01 = N0->getOperand(1); 6748 return DAG.getNode(Opcode, DL, VT, 6749 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 6750 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 6751} 6752 6753static SDValue PerformMULCombine(SDNode *N, 6754 TargetLowering::DAGCombinerInfo &DCI, 6755 const ARMSubtarget *Subtarget) { 6756 SelectionDAG &DAG = DCI.DAG; 6757 6758 if (Subtarget->isThumb1Only()) 6759 return SDValue(); 6760 6761 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6762 return SDValue(); 6763 6764 EVT VT = N->getValueType(0); 6765 if (VT.is64BitVector() || VT.is128BitVector()) 6766 return PerformVMULCombine(N, DCI, Subtarget); 6767 if (VT != MVT::i32) 6768 return SDValue(); 6769 6770 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6771 if (!C) 6772 return SDValue(); 6773 6774 uint64_t MulAmt = C->getZExtValue(); 6775 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 6776 ShiftAmt = ShiftAmt & (32 - 1); 6777 SDValue V = N->getOperand(0); 6778 DebugLoc DL = N->getDebugLoc(); 6779 6780 SDValue Res; 6781 MulAmt >>= ShiftAmt; 6782 if (isPowerOf2_32(MulAmt - 1)) { 6783 // (mul x, 2^N + 1) => (add (shl x, N), x) 6784 Res = DAG.getNode(ISD::ADD, DL, VT, 6785 V, DAG.getNode(ISD::SHL, DL, VT, 6786 V, DAG.getConstant(Log2_32(MulAmt-1), 6787 MVT::i32))); 6788 } else if (isPowerOf2_32(MulAmt + 1)) { 6789 // (mul x, 2^N - 1) => (sub (shl x, N), x) 6790 Res = DAG.getNode(ISD::SUB, DL, VT, 6791 DAG.getNode(ISD::SHL, DL, VT, 6792 V, DAG.getConstant(Log2_32(MulAmt+1), 6793 MVT::i32)), 6794 V); 6795 } else 6796 return SDValue(); 6797 6798 if (ShiftAmt != 0) 6799 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 6800 DAG.getConstant(ShiftAmt, MVT::i32)); 6801 6802 // Do not add new nodes to DAG combiner worklist. 6803 DCI.CombineTo(N, Res, false); 6804 return SDValue(); 6805} 6806 6807static SDValue PerformANDCombine(SDNode *N, 6808 TargetLowering::DAGCombinerInfo &DCI) { 6809 6810 // Attempt to use immediate-form VBIC 6811 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6812 DebugLoc dl = N->getDebugLoc(); 6813 EVT VT = N->getValueType(0); 6814 SelectionDAG &DAG = DCI.DAG; 6815 6816 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6817 return SDValue(); 6818 6819 APInt SplatBits, SplatUndef; 6820 unsigned SplatBitSize; 6821 bool HasAnyUndefs; 6822 if (BVN && 6823 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6824 if (SplatBitSize <= 64) { 6825 EVT VbicVT; 6826 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 6827 SplatUndef.getZExtValue(), SplatBitSize, 6828 DAG, VbicVT, VT.is128BitVector(), 6829 OtherModImm); 6830 if (Val.getNode()) { 6831 SDValue Input = 6832 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 6833 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 6834 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 6835 } 6836 } 6837 } 6838 6839 return SDValue(); 6840} 6841 6842/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 6843static SDValue PerformORCombine(SDNode *N, 6844 TargetLowering::DAGCombinerInfo &DCI, 6845 const ARMSubtarget *Subtarget) { 6846 // Attempt to use immediate-form VORR 6847 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6848 DebugLoc dl = N->getDebugLoc(); 6849 EVT VT = N->getValueType(0); 6850 SelectionDAG &DAG = DCI.DAG; 6851 6852 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6853 return SDValue(); 6854 6855 APInt SplatBits, SplatUndef; 6856 unsigned SplatBitSize; 6857 bool HasAnyUndefs; 6858 if (BVN && Subtarget->hasNEON() && 6859 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6860 if (SplatBitSize <= 64) { 6861 EVT VorrVT; 6862 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 6863 SplatUndef.getZExtValue(), SplatBitSize, 6864 DAG, VorrVT, VT.is128BitVector(), 6865 OtherModImm); 6866 if (Val.getNode()) { 6867 SDValue Input = 6868 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 6869 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 6870 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 6871 } 6872 } 6873 } 6874 6875 SDValue N0 = N->getOperand(0); 6876 if (N0.getOpcode() != ISD::AND) 6877 return SDValue(); 6878 SDValue N1 = N->getOperand(1); 6879 6880 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 6881 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 6882 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 6883 APInt SplatUndef; 6884 unsigned SplatBitSize; 6885 bool HasAnyUndefs; 6886 6887 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 6888 APInt SplatBits0; 6889 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 6890 HasAnyUndefs) && !HasAnyUndefs) { 6891 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 6892 APInt SplatBits1; 6893 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 6894 HasAnyUndefs) && !HasAnyUndefs && 6895 SplatBits0 == ~SplatBits1) { 6896 // Canonicalize the vector type to make instruction selection simpler. 6897 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 6898 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 6899 N0->getOperand(1), N0->getOperand(0), 6900 N1->getOperand(0)); 6901 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 6902 } 6903 } 6904 } 6905 6906 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 6907 // reasonable. 6908 6909 // BFI is only available on V6T2+ 6910 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 6911 return SDValue(); 6912 6913 DebugLoc DL = N->getDebugLoc(); 6914 // 1) or (and A, mask), val => ARMbfi A, val, mask 6915 // iff (val & mask) == val 6916 // 6917 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6918 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 6919 // && mask == ~mask2 6920 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 6921 // && ~mask == mask2 6922 // (i.e., copy a bitfield value into another bitfield of the same width) 6923 6924 if (VT != MVT::i32) 6925 return SDValue(); 6926 6927 SDValue N00 = N0.getOperand(0); 6928 6929 // The value and the mask need to be constants so we can verify this is 6930 // actually a bitfield set. If the mask is 0xffff, we can do better 6931 // via a movt instruction, so don't use BFI in that case. 6932 SDValue MaskOp = N0.getOperand(1); 6933 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 6934 if (!MaskC) 6935 return SDValue(); 6936 unsigned Mask = MaskC->getZExtValue(); 6937 if (Mask == 0xffff) 6938 return SDValue(); 6939 SDValue Res; 6940 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 6941 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 6942 if (N1C) { 6943 unsigned Val = N1C->getZExtValue(); 6944 if ((Val & ~Mask) != Val) 6945 return SDValue(); 6946 6947 if (ARM::isBitFieldInvertedMask(Mask)) { 6948 Val >>= CountTrailingZeros_32(~Mask); 6949 6950 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 6951 DAG.getConstant(Val, MVT::i32), 6952 DAG.getConstant(Mask, MVT::i32)); 6953 6954 // Do not add new nodes to DAG combiner worklist. 6955 DCI.CombineTo(N, Res, false); 6956 return SDValue(); 6957 } 6958 } else if (N1.getOpcode() == ISD::AND) { 6959 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6960 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 6961 if (!N11C) 6962 return SDValue(); 6963 unsigned Mask2 = N11C->getZExtValue(); 6964 6965 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 6966 // as is to match. 6967 if (ARM::isBitFieldInvertedMask(Mask) && 6968 (Mask == ~Mask2)) { 6969 // The pack halfword instruction works better for masks that fit it, 6970 // so use that when it's available. 6971 if (Subtarget->hasT2ExtractPack() && 6972 (Mask == 0xffff || Mask == 0xffff0000)) 6973 return SDValue(); 6974 // 2a 6975 unsigned amt = CountTrailingZeros_32(Mask2); 6976 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 6977 DAG.getConstant(amt, MVT::i32)); 6978 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 6979 DAG.getConstant(Mask, MVT::i32)); 6980 // Do not add new nodes to DAG combiner worklist. 6981 DCI.CombineTo(N, Res, false); 6982 return SDValue(); 6983 } else if (ARM::isBitFieldInvertedMask(~Mask) && 6984 (~Mask == Mask2)) { 6985 // The pack halfword instruction works better for masks that fit it, 6986 // so use that when it's available. 6987 if (Subtarget->hasT2ExtractPack() && 6988 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 6989 return SDValue(); 6990 // 2b 6991 unsigned lsb = CountTrailingZeros_32(Mask); 6992 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 6993 DAG.getConstant(lsb, MVT::i32)); 6994 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 6995 DAG.getConstant(Mask2, MVT::i32)); 6996 // Do not add new nodes to DAG combiner worklist. 6997 DCI.CombineTo(N, Res, false); 6998 return SDValue(); 6999 } 7000 } 7001 7002 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 7003 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 7004 ARM::isBitFieldInvertedMask(~Mask)) { 7005 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 7006 // where lsb(mask) == #shamt and masked bits of B are known zero. 7007 SDValue ShAmt = N00.getOperand(1); 7008 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 7009 unsigned LSB = CountTrailingZeros_32(Mask); 7010 if (ShAmtC != LSB) 7011 return SDValue(); 7012 7013 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 7014 DAG.getConstant(~Mask, MVT::i32)); 7015 7016 // Do not add new nodes to DAG combiner worklist. 7017 DCI.CombineTo(N, Res, false); 7018 } 7019 7020 return SDValue(); 7021} 7022 7023/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 7024/// the bits being cleared by the AND are not demanded by the BFI. 7025static SDValue PerformBFICombine(SDNode *N, 7026 TargetLowering::DAGCombinerInfo &DCI) { 7027 SDValue N1 = N->getOperand(1); 7028 if (N1.getOpcode() == ISD::AND) { 7029 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 7030 if (!N11C) 7031 return SDValue(); 7032 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 7033 unsigned LSB = CountTrailingZeros_32(~InvMask); 7034 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 7035 unsigned Mask = (1 << Width)-1; 7036 unsigned Mask2 = N11C->getZExtValue(); 7037 if ((Mask & (~Mask2)) == 0) 7038 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 7039 N->getOperand(0), N1.getOperand(0), 7040 N->getOperand(2)); 7041 } 7042 return SDValue(); 7043} 7044 7045/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 7046/// ARMISD::VMOVRRD. 7047static SDValue PerformVMOVRRDCombine(SDNode *N, 7048 TargetLowering::DAGCombinerInfo &DCI) { 7049 // vmovrrd(vmovdrr x, y) -> x,y 7050 SDValue InDouble = N->getOperand(0); 7051 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 7052 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 7053 7054 // vmovrrd(load f64) -> (load i32), (load i32) 7055 SDNode *InNode = InDouble.getNode(); 7056 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 7057 InNode->getValueType(0) == MVT::f64 && 7058 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 7059 !cast<LoadSDNode>(InNode)->isVolatile()) { 7060 // TODO: Should this be done for non-FrameIndex operands? 7061 LoadSDNode *LD = cast<LoadSDNode>(InNode); 7062 7063 SelectionDAG &DAG = DCI.DAG; 7064 DebugLoc DL = LD->getDebugLoc(); 7065 SDValue BasePtr = LD->getBasePtr(); 7066 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 7067 LD->getPointerInfo(), LD->isVolatile(), 7068 LD->isNonTemporal(), LD->getAlignment()); 7069 7070 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 7071 DAG.getConstant(4, MVT::i32)); 7072 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 7073 LD->getPointerInfo(), LD->isVolatile(), 7074 LD->isNonTemporal(), 7075 std::min(4U, LD->getAlignment() / 2)); 7076 7077 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 7078 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 7079 DCI.RemoveFromWorklist(LD); 7080 DAG.DeleteNode(LD); 7081 return Result; 7082 } 7083 7084 return SDValue(); 7085} 7086 7087/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 7088/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 7089static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 7090 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 7091 SDValue Op0 = N->getOperand(0); 7092 SDValue Op1 = N->getOperand(1); 7093 if (Op0.getOpcode() == ISD::BITCAST) 7094 Op0 = Op0.getOperand(0); 7095 if (Op1.getOpcode() == ISD::BITCAST) 7096 Op1 = Op1.getOperand(0); 7097 if (Op0.getOpcode() == ARMISD::VMOVRRD && 7098 Op0.getNode() == Op1.getNode() && 7099 Op0.getResNo() == 0 && Op1.getResNo() == 1) 7100 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 7101 N->getValueType(0), Op0.getOperand(0)); 7102 return SDValue(); 7103} 7104 7105/// PerformSTORECombine - Target-specific dag combine xforms for 7106/// ISD::STORE. 7107static SDValue PerformSTORECombine(SDNode *N, 7108 TargetLowering::DAGCombinerInfo &DCI) { 7109 // Bitcast an i64 store extracted from a vector to f64. 7110 // Otherwise, the i64 value will be legalized to a pair of i32 values. 7111 StoreSDNode *St = cast<StoreSDNode>(N); 7112 SDValue StVal = St->getValue(); 7113 if (!ISD::isNormalStore(St) || St->isVolatile()) 7114 return SDValue(); 7115 7116 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 7117 StVal.getNode()->hasOneUse() && !St->isVolatile()) { 7118 SelectionDAG &DAG = DCI.DAG; 7119 DebugLoc DL = St->getDebugLoc(); 7120 SDValue BasePtr = St->getBasePtr(); 7121 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 7122 StVal.getNode()->getOperand(0), BasePtr, 7123 St->getPointerInfo(), St->isVolatile(), 7124 St->isNonTemporal(), St->getAlignment()); 7125 7126 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 7127 DAG.getConstant(4, MVT::i32)); 7128 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 7129 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 7130 St->isNonTemporal(), 7131 std::min(4U, St->getAlignment() / 2)); 7132 } 7133 7134 if (StVal.getValueType() != MVT::i64 || 7135 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7136 return SDValue(); 7137 7138 SelectionDAG &DAG = DCI.DAG; 7139 DebugLoc dl = StVal.getDebugLoc(); 7140 SDValue IntVec = StVal.getOperand(0); 7141 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 7142 IntVec.getValueType().getVectorNumElements()); 7143 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 7144 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7145 Vec, StVal.getOperand(1)); 7146 dl = N->getDebugLoc(); 7147 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 7148 // Make the DAGCombiner fold the bitcasts. 7149 DCI.AddToWorklist(Vec.getNode()); 7150 DCI.AddToWorklist(ExtElt.getNode()); 7151 DCI.AddToWorklist(V.getNode()); 7152 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 7153 St->getPointerInfo(), St->isVolatile(), 7154 St->isNonTemporal(), St->getAlignment(), 7155 St->getTBAAInfo()); 7156} 7157 7158/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 7159/// are normal, non-volatile loads. If so, it is profitable to bitcast an 7160/// i64 vector to have f64 elements, since the value can then be loaded 7161/// directly into a VFP register. 7162static bool hasNormalLoadOperand(SDNode *N) { 7163 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 7164 for (unsigned i = 0; i < NumElts; ++i) { 7165 SDNode *Elt = N->getOperand(i).getNode(); 7166 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 7167 return true; 7168 } 7169 return false; 7170} 7171 7172/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 7173/// ISD::BUILD_VECTOR. 7174static SDValue PerformBUILD_VECTORCombine(SDNode *N, 7175 TargetLowering::DAGCombinerInfo &DCI){ 7176 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 7177 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 7178 // into a pair of GPRs, which is fine when the value is used as a scalar, 7179 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 7180 SelectionDAG &DAG = DCI.DAG; 7181 if (N->getNumOperands() == 2) { 7182 SDValue RV = PerformVMOVDRRCombine(N, DAG); 7183 if (RV.getNode()) 7184 return RV; 7185 } 7186 7187 // Load i64 elements as f64 values so that type legalization does not split 7188 // them up into i32 values. 7189 EVT VT = N->getValueType(0); 7190 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 7191 return SDValue(); 7192 DebugLoc dl = N->getDebugLoc(); 7193 SmallVector<SDValue, 8> Ops; 7194 unsigned NumElts = VT.getVectorNumElements(); 7195 for (unsigned i = 0; i < NumElts; ++i) { 7196 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 7197 Ops.push_back(V); 7198 // Make the DAGCombiner fold the bitcast. 7199 DCI.AddToWorklist(V.getNode()); 7200 } 7201 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 7202 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 7203 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 7204} 7205 7206/// PerformInsertEltCombine - Target-specific dag combine xforms for 7207/// ISD::INSERT_VECTOR_ELT. 7208static SDValue PerformInsertEltCombine(SDNode *N, 7209 TargetLowering::DAGCombinerInfo &DCI) { 7210 // Bitcast an i64 load inserted into a vector to f64. 7211 // Otherwise, the i64 value will be legalized to a pair of i32 values. 7212 EVT VT = N->getValueType(0); 7213 SDNode *Elt = N->getOperand(1).getNode(); 7214 if (VT.getVectorElementType() != MVT::i64 || 7215 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 7216 return SDValue(); 7217 7218 SelectionDAG &DAG = DCI.DAG; 7219 DebugLoc dl = N->getDebugLoc(); 7220 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 7221 VT.getVectorNumElements()); 7222 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 7223 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 7224 // Make the DAGCombiner fold the bitcasts. 7225 DCI.AddToWorklist(Vec.getNode()); 7226 DCI.AddToWorklist(V.getNode()); 7227 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 7228 Vec, V, N->getOperand(2)); 7229 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 7230} 7231 7232/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 7233/// ISD::VECTOR_SHUFFLE. 7234static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 7235 // The LLVM shufflevector instruction does not require the shuffle mask 7236 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 7237 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 7238 // operands do not match the mask length, they are extended by concatenating 7239 // them with undef vectors. That is probably the right thing for other 7240 // targets, but for NEON it is better to concatenate two double-register 7241 // size vector operands into a single quad-register size vector. Do that 7242 // transformation here: 7243 // shuffle(concat(v1, undef), concat(v2, undef)) -> 7244 // shuffle(concat(v1, v2), undef) 7245 SDValue Op0 = N->getOperand(0); 7246 SDValue Op1 = N->getOperand(1); 7247 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 7248 Op1.getOpcode() != ISD::CONCAT_VECTORS || 7249 Op0.getNumOperands() != 2 || 7250 Op1.getNumOperands() != 2) 7251 return SDValue(); 7252 SDValue Concat0Op1 = Op0.getOperand(1); 7253 SDValue Concat1Op1 = Op1.getOperand(1); 7254 if (Concat0Op1.getOpcode() != ISD::UNDEF || 7255 Concat1Op1.getOpcode() != ISD::UNDEF) 7256 return SDValue(); 7257 // Skip the transformation if any of the types are illegal. 7258 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7259 EVT VT = N->getValueType(0); 7260 if (!TLI.isTypeLegal(VT) || 7261 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 7262 !TLI.isTypeLegal(Concat1Op1.getValueType())) 7263 return SDValue(); 7264 7265 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 7266 Op0.getOperand(0), Op1.getOperand(0)); 7267 // Translate the shuffle mask. 7268 SmallVector<int, 16> NewMask; 7269 unsigned NumElts = VT.getVectorNumElements(); 7270 unsigned HalfElts = NumElts/2; 7271 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 7272 for (unsigned n = 0; n < NumElts; ++n) { 7273 int MaskElt = SVN->getMaskElt(n); 7274 int NewElt = -1; 7275 if (MaskElt < (int)HalfElts) 7276 NewElt = MaskElt; 7277 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 7278 NewElt = HalfElts + MaskElt - NumElts; 7279 NewMask.push_back(NewElt); 7280 } 7281 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 7282 DAG.getUNDEF(VT), NewMask.data()); 7283} 7284 7285/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 7286/// NEON load/store intrinsics to merge base address updates. 7287static SDValue CombineBaseUpdate(SDNode *N, 7288 TargetLowering::DAGCombinerInfo &DCI) { 7289 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 7290 return SDValue(); 7291 7292 SelectionDAG &DAG = DCI.DAG; 7293 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 7294 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 7295 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 7296 SDValue Addr = N->getOperand(AddrOpIdx); 7297 7298 // Search for a use of the address operand that is an increment. 7299 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 7300 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 7301 SDNode *User = *UI; 7302 if (User->getOpcode() != ISD::ADD || 7303 UI.getUse().getResNo() != Addr.getResNo()) 7304 continue; 7305 7306 // Check that the add is independent of the load/store. Otherwise, folding 7307 // it would create a cycle. 7308 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 7309 continue; 7310 7311 // Find the new opcode for the updating load/store. 7312 bool isLoad = true; 7313 bool isLaneOp = false; 7314 unsigned NewOpc = 0; 7315 unsigned NumVecs = 0; 7316 if (isIntrinsic) { 7317 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7318 switch (IntNo) { 7319 default: assert(0 && "unexpected intrinsic for Neon base update"); 7320 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 7321 NumVecs = 1; break; 7322 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 7323 NumVecs = 2; break; 7324 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 7325 NumVecs = 3; break; 7326 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 7327 NumVecs = 4; break; 7328 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 7329 NumVecs = 2; isLaneOp = true; break; 7330 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 7331 NumVecs = 3; isLaneOp = true; break; 7332 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 7333 NumVecs = 4; isLaneOp = true; break; 7334 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 7335 NumVecs = 1; isLoad = false; break; 7336 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 7337 NumVecs = 2; isLoad = false; break; 7338 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 7339 NumVecs = 3; isLoad = false; break; 7340 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 7341 NumVecs = 4; isLoad = false; break; 7342 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 7343 NumVecs = 2; isLoad = false; isLaneOp = true; break; 7344 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 7345 NumVecs = 3; isLoad = false; isLaneOp = true; break; 7346 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 7347 NumVecs = 4; isLoad = false; isLaneOp = true; break; 7348 } 7349 } else { 7350 isLaneOp = true; 7351 switch (N->getOpcode()) { 7352 default: assert(0 && "unexpected opcode for Neon base update"); 7353 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 7354 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 7355 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 7356 } 7357 } 7358 7359 // Find the size of memory referenced by the load/store. 7360 EVT VecTy; 7361 if (isLoad) 7362 VecTy = N->getValueType(0); 7363 else 7364 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 7365 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 7366 if (isLaneOp) 7367 NumBytes /= VecTy.getVectorNumElements(); 7368 7369 // If the increment is a constant, it must match the memory ref size. 7370 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 7371 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 7372 uint64_t IncVal = CInc->getZExtValue(); 7373 if (IncVal != NumBytes) 7374 continue; 7375 } else if (NumBytes >= 3 * 16) { 7376 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 7377 // separate instructions that make it harder to use a non-constant update. 7378 continue; 7379 } 7380 7381 // Create the new updating load/store node. 7382 EVT Tys[6]; 7383 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 7384 unsigned n; 7385 for (n = 0; n < NumResultVecs; ++n) 7386 Tys[n] = VecTy; 7387 Tys[n++] = MVT::i32; 7388 Tys[n] = MVT::Other; 7389 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 7390 SmallVector<SDValue, 8> Ops; 7391 Ops.push_back(N->getOperand(0)); // incoming chain 7392 Ops.push_back(N->getOperand(AddrOpIdx)); 7393 Ops.push_back(Inc); 7394 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 7395 Ops.push_back(N->getOperand(i)); 7396 } 7397 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 7398 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 7399 Ops.data(), Ops.size(), 7400 MemInt->getMemoryVT(), 7401 MemInt->getMemOperand()); 7402 7403 // Update the uses. 7404 std::vector<SDValue> NewResults; 7405 for (unsigned i = 0; i < NumResultVecs; ++i) { 7406 NewResults.push_back(SDValue(UpdN.getNode(), i)); 7407 } 7408 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 7409 DCI.CombineTo(N, NewResults); 7410 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 7411 7412 break; 7413 } 7414 return SDValue(); 7415} 7416 7417/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 7418/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 7419/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 7420/// return true. 7421static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 7422 SelectionDAG &DAG = DCI.DAG; 7423 EVT VT = N->getValueType(0); 7424 // vldN-dup instructions only support 64-bit vectors for N > 1. 7425 if (!VT.is64BitVector()) 7426 return false; 7427 7428 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 7429 SDNode *VLD = N->getOperand(0).getNode(); 7430 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 7431 return false; 7432 unsigned NumVecs = 0; 7433 unsigned NewOpc = 0; 7434 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 7435 if (IntNo == Intrinsic::arm_neon_vld2lane) { 7436 NumVecs = 2; 7437 NewOpc = ARMISD::VLD2DUP; 7438 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 7439 NumVecs = 3; 7440 NewOpc = ARMISD::VLD3DUP; 7441 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 7442 NumVecs = 4; 7443 NewOpc = ARMISD::VLD4DUP; 7444 } else { 7445 return false; 7446 } 7447 7448 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 7449 // numbers match the load. 7450 unsigned VLDLaneNo = 7451 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 7452 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7453 UI != UE; ++UI) { 7454 // Ignore uses of the chain result. 7455 if (UI.getUse().getResNo() == NumVecs) 7456 continue; 7457 SDNode *User = *UI; 7458 if (User->getOpcode() != ARMISD::VDUPLANE || 7459 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 7460 return false; 7461 } 7462 7463 // Create the vldN-dup node. 7464 EVT Tys[5]; 7465 unsigned n; 7466 for (n = 0; n < NumVecs; ++n) 7467 Tys[n] = VT; 7468 Tys[n] = MVT::Other; 7469 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 7470 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 7471 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 7472 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 7473 Ops, 2, VLDMemInt->getMemoryVT(), 7474 VLDMemInt->getMemOperand()); 7475 7476 // Update the uses. 7477 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7478 UI != UE; ++UI) { 7479 unsigned ResNo = UI.getUse().getResNo(); 7480 // Ignore uses of the chain result. 7481 if (ResNo == NumVecs) 7482 continue; 7483 SDNode *User = *UI; 7484 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 7485 } 7486 7487 // Now the vldN-lane intrinsic is dead except for its chain result. 7488 // Update uses of the chain. 7489 std::vector<SDValue> VLDDupResults; 7490 for (unsigned n = 0; n < NumVecs; ++n) 7491 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 7492 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 7493 DCI.CombineTo(VLD, VLDDupResults); 7494 7495 return true; 7496} 7497 7498/// PerformVDUPLANECombine - Target-specific dag combine xforms for 7499/// ARMISD::VDUPLANE. 7500static SDValue PerformVDUPLANECombine(SDNode *N, 7501 TargetLowering::DAGCombinerInfo &DCI) { 7502 SDValue Op = N->getOperand(0); 7503 7504 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 7505 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 7506 if (CombineVLDDUP(N, DCI)) 7507 return SDValue(N, 0); 7508 7509 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 7510 // redundant. Ignore bit_converts for now; element sizes are checked below. 7511 while (Op.getOpcode() == ISD::BITCAST) 7512 Op = Op.getOperand(0); 7513 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 7514 return SDValue(); 7515 7516 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 7517 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 7518 // The canonical VMOV for a zero vector uses a 32-bit element size. 7519 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7520 unsigned EltBits; 7521 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 7522 EltSize = 8; 7523 EVT VT = N->getValueType(0); 7524 if (EltSize > VT.getVectorElementType().getSizeInBits()) 7525 return SDValue(); 7526 7527 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 7528} 7529 7530// isConstVecPow2 - Return true if each vector element is a power of 2, all 7531// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 7532static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 7533{ 7534 integerPart cN; 7535 integerPart c0 = 0; 7536 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 7537 I != E; I++) { 7538 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 7539 if (!C) 7540 return false; 7541 7542 bool isExact; 7543 APFloat APF = C->getValueAPF(); 7544 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 7545 != APFloat::opOK || !isExact) 7546 return false; 7547 7548 c0 = (I == 0) ? cN : c0; 7549 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 7550 return false; 7551 } 7552 C = c0; 7553 return true; 7554} 7555 7556/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 7557/// can replace combinations of VMUL and VCVT (floating-point to integer) 7558/// when the VMUL has a constant operand that is a power of 2. 7559/// 7560/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7561/// vmul.f32 d16, d17, d16 7562/// vcvt.s32.f32 d16, d16 7563/// becomes: 7564/// vcvt.s32.f32 d16, d16, #3 7565static SDValue PerformVCVTCombine(SDNode *N, 7566 TargetLowering::DAGCombinerInfo &DCI, 7567 const ARMSubtarget *Subtarget) { 7568 SelectionDAG &DAG = DCI.DAG; 7569 SDValue Op = N->getOperand(0); 7570 7571 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 7572 Op.getOpcode() != ISD::FMUL) 7573 return SDValue(); 7574 7575 uint64_t C; 7576 SDValue N0 = Op->getOperand(0); 7577 SDValue ConstVec = Op->getOperand(1); 7578 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 7579 7580 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7581 !isConstVecPow2(ConstVec, isSigned, C)) 7582 return SDValue(); 7583 7584 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 7585 Intrinsic::arm_neon_vcvtfp2fxu; 7586 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7587 N->getValueType(0), 7588 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 7589 DAG.getConstant(Log2_64(C), MVT::i32)); 7590} 7591 7592/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 7593/// can replace combinations of VCVT (integer to floating-point) and VDIV 7594/// when the VDIV has a constant operand that is a power of 2. 7595/// 7596/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7597/// vcvt.f32.s32 d16, d16 7598/// vdiv.f32 d16, d17, d16 7599/// becomes: 7600/// vcvt.f32.s32 d16, d16, #3 7601static SDValue PerformVDIVCombine(SDNode *N, 7602 TargetLowering::DAGCombinerInfo &DCI, 7603 const ARMSubtarget *Subtarget) { 7604 SelectionDAG &DAG = DCI.DAG; 7605 SDValue Op = N->getOperand(0); 7606 unsigned OpOpcode = Op.getNode()->getOpcode(); 7607 7608 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 7609 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 7610 return SDValue(); 7611 7612 uint64_t C; 7613 SDValue ConstVec = N->getOperand(1); 7614 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 7615 7616 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7617 !isConstVecPow2(ConstVec, isSigned, C)) 7618 return SDValue(); 7619 7620 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 7621 Intrinsic::arm_neon_vcvtfxu2fp; 7622 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7623 Op.getValueType(), 7624 DAG.getConstant(IntrinsicOpcode, MVT::i32), 7625 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 7626} 7627 7628/// Getvshiftimm - Check if this is a valid build_vector for the immediate 7629/// operand of a vector shift operation, where all the elements of the 7630/// build_vector must have the same constant integer value. 7631static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 7632 // Ignore bit_converts. 7633 while (Op.getOpcode() == ISD::BITCAST) 7634 Op = Op.getOperand(0); 7635 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7636 APInt SplatBits, SplatUndef; 7637 unsigned SplatBitSize; 7638 bool HasAnyUndefs; 7639 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 7640 HasAnyUndefs, ElementBits) || 7641 SplatBitSize > ElementBits) 7642 return false; 7643 Cnt = SplatBits.getSExtValue(); 7644 return true; 7645} 7646 7647/// isVShiftLImm - Check if this is a valid build_vector for the immediate 7648/// operand of a vector shift left operation. That value must be in the range: 7649/// 0 <= Value < ElementBits for a left shift; or 7650/// 0 <= Value <= ElementBits for a long left shift. 7651static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 7652 assert(VT.isVector() && "vector shift count is not a vector type"); 7653 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7654 if (! getVShiftImm(Op, ElementBits, Cnt)) 7655 return false; 7656 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 7657} 7658 7659/// isVShiftRImm - Check if this is a valid build_vector for the immediate 7660/// operand of a vector shift right operation. For a shift opcode, the value 7661/// is positive, but for an intrinsic the value count must be negative. The 7662/// absolute value must be in the range: 7663/// 1 <= |Value| <= ElementBits for a right shift; or 7664/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 7665static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 7666 int64_t &Cnt) { 7667 assert(VT.isVector() && "vector shift count is not a vector type"); 7668 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7669 if (! getVShiftImm(Op, ElementBits, Cnt)) 7670 return false; 7671 if (isIntrinsic) 7672 Cnt = -Cnt; 7673 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 7674} 7675 7676/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 7677static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 7678 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 7679 switch (IntNo) { 7680 default: 7681 // Don't do anything for most intrinsics. 7682 break; 7683 7684 // Vector shifts: check for immediate versions and lower them. 7685 // Note: This is done during DAG combining instead of DAG legalizing because 7686 // the build_vectors for 64-bit vector element shift counts are generally 7687 // not legal, and it is hard to see their values after they get legalized to 7688 // loads from a constant pool. 7689 case Intrinsic::arm_neon_vshifts: 7690 case Intrinsic::arm_neon_vshiftu: 7691 case Intrinsic::arm_neon_vshiftls: 7692 case Intrinsic::arm_neon_vshiftlu: 7693 case Intrinsic::arm_neon_vshiftn: 7694 case Intrinsic::arm_neon_vrshifts: 7695 case Intrinsic::arm_neon_vrshiftu: 7696 case Intrinsic::arm_neon_vrshiftn: 7697 case Intrinsic::arm_neon_vqshifts: 7698 case Intrinsic::arm_neon_vqshiftu: 7699 case Intrinsic::arm_neon_vqshiftsu: 7700 case Intrinsic::arm_neon_vqshiftns: 7701 case Intrinsic::arm_neon_vqshiftnu: 7702 case Intrinsic::arm_neon_vqshiftnsu: 7703 case Intrinsic::arm_neon_vqrshiftns: 7704 case Intrinsic::arm_neon_vqrshiftnu: 7705 case Intrinsic::arm_neon_vqrshiftnsu: { 7706 EVT VT = N->getOperand(1).getValueType(); 7707 int64_t Cnt; 7708 unsigned VShiftOpc = 0; 7709 7710 switch (IntNo) { 7711 case Intrinsic::arm_neon_vshifts: 7712 case Intrinsic::arm_neon_vshiftu: 7713 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 7714 VShiftOpc = ARMISD::VSHL; 7715 break; 7716 } 7717 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 7718 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 7719 ARMISD::VSHRs : ARMISD::VSHRu); 7720 break; 7721 } 7722 return SDValue(); 7723 7724 case Intrinsic::arm_neon_vshiftls: 7725 case Intrinsic::arm_neon_vshiftlu: 7726 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 7727 break; 7728 llvm_unreachable("invalid shift count for vshll intrinsic"); 7729 7730 case Intrinsic::arm_neon_vrshifts: 7731 case Intrinsic::arm_neon_vrshiftu: 7732 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 7733 break; 7734 return SDValue(); 7735 7736 case Intrinsic::arm_neon_vqshifts: 7737 case Intrinsic::arm_neon_vqshiftu: 7738 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7739 break; 7740 return SDValue(); 7741 7742 case Intrinsic::arm_neon_vqshiftsu: 7743 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7744 break; 7745 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 7746 7747 case Intrinsic::arm_neon_vshiftn: 7748 case Intrinsic::arm_neon_vrshiftn: 7749 case Intrinsic::arm_neon_vqshiftns: 7750 case Intrinsic::arm_neon_vqshiftnu: 7751 case Intrinsic::arm_neon_vqshiftnsu: 7752 case Intrinsic::arm_neon_vqrshiftns: 7753 case Intrinsic::arm_neon_vqrshiftnu: 7754 case Intrinsic::arm_neon_vqrshiftnsu: 7755 // Narrowing shifts require an immediate right shift. 7756 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 7757 break; 7758 llvm_unreachable("invalid shift count for narrowing vector shift " 7759 "intrinsic"); 7760 7761 default: 7762 llvm_unreachable("unhandled vector shift"); 7763 } 7764 7765 switch (IntNo) { 7766 case Intrinsic::arm_neon_vshifts: 7767 case Intrinsic::arm_neon_vshiftu: 7768 // Opcode already set above. 7769 break; 7770 case Intrinsic::arm_neon_vshiftls: 7771 case Intrinsic::arm_neon_vshiftlu: 7772 if (Cnt == VT.getVectorElementType().getSizeInBits()) 7773 VShiftOpc = ARMISD::VSHLLi; 7774 else 7775 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 7776 ARMISD::VSHLLs : ARMISD::VSHLLu); 7777 break; 7778 case Intrinsic::arm_neon_vshiftn: 7779 VShiftOpc = ARMISD::VSHRN; break; 7780 case Intrinsic::arm_neon_vrshifts: 7781 VShiftOpc = ARMISD::VRSHRs; break; 7782 case Intrinsic::arm_neon_vrshiftu: 7783 VShiftOpc = ARMISD::VRSHRu; break; 7784 case Intrinsic::arm_neon_vrshiftn: 7785 VShiftOpc = ARMISD::VRSHRN; break; 7786 case Intrinsic::arm_neon_vqshifts: 7787 VShiftOpc = ARMISD::VQSHLs; break; 7788 case Intrinsic::arm_neon_vqshiftu: 7789 VShiftOpc = ARMISD::VQSHLu; break; 7790 case Intrinsic::arm_neon_vqshiftsu: 7791 VShiftOpc = ARMISD::VQSHLsu; break; 7792 case Intrinsic::arm_neon_vqshiftns: 7793 VShiftOpc = ARMISD::VQSHRNs; break; 7794 case Intrinsic::arm_neon_vqshiftnu: 7795 VShiftOpc = ARMISD::VQSHRNu; break; 7796 case Intrinsic::arm_neon_vqshiftnsu: 7797 VShiftOpc = ARMISD::VQSHRNsu; break; 7798 case Intrinsic::arm_neon_vqrshiftns: 7799 VShiftOpc = ARMISD::VQRSHRNs; break; 7800 case Intrinsic::arm_neon_vqrshiftnu: 7801 VShiftOpc = ARMISD::VQRSHRNu; break; 7802 case Intrinsic::arm_neon_vqrshiftnsu: 7803 VShiftOpc = ARMISD::VQRSHRNsu; break; 7804 } 7805 7806 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7807 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 7808 } 7809 7810 case Intrinsic::arm_neon_vshiftins: { 7811 EVT VT = N->getOperand(1).getValueType(); 7812 int64_t Cnt; 7813 unsigned VShiftOpc = 0; 7814 7815 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 7816 VShiftOpc = ARMISD::VSLI; 7817 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 7818 VShiftOpc = ARMISD::VSRI; 7819 else { 7820 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 7821 } 7822 7823 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7824 N->getOperand(1), N->getOperand(2), 7825 DAG.getConstant(Cnt, MVT::i32)); 7826 } 7827 7828 case Intrinsic::arm_neon_vqrshifts: 7829 case Intrinsic::arm_neon_vqrshiftu: 7830 // No immediate versions of these to check for. 7831 break; 7832 } 7833 7834 return SDValue(); 7835} 7836 7837/// PerformShiftCombine - Checks for immediate versions of vector shifts and 7838/// lowers them. As with the vector shift intrinsics, this is done during DAG 7839/// combining instead of DAG legalizing because the build_vectors for 64-bit 7840/// vector element shift counts are generally not legal, and it is hard to see 7841/// their values after they get legalized to loads from a constant pool. 7842static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 7843 const ARMSubtarget *ST) { 7844 EVT VT = N->getValueType(0); 7845 7846 // Nothing to be done for scalar shifts. 7847 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7848 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 7849 return SDValue(); 7850 7851 assert(ST->hasNEON() && "unexpected vector shift"); 7852 int64_t Cnt; 7853 7854 switch (N->getOpcode()) { 7855 default: llvm_unreachable("unexpected shift opcode"); 7856 7857 case ISD::SHL: 7858 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 7859 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 7860 DAG.getConstant(Cnt, MVT::i32)); 7861 break; 7862 7863 case ISD::SRA: 7864 case ISD::SRL: 7865 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 7866 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 7867 ARMISD::VSHRs : ARMISD::VSHRu); 7868 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 7869 DAG.getConstant(Cnt, MVT::i32)); 7870 } 7871 } 7872 return SDValue(); 7873} 7874 7875/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 7876/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 7877static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 7878 const ARMSubtarget *ST) { 7879 SDValue N0 = N->getOperand(0); 7880 7881 // Check for sign- and zero-extensions of vector extract operations of 8- 7882 // and 16-bit vector elements. NEON supports these directly. They are 7883 // handled during DAG combining because type legalization will promote them 7884 // to 32-bit types and it is messy to recognize the operations after that. 7885 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7886 SDValue Vec = N0.getOperand(0); 7887 SDValue Lane = N0.getOperand(1); 7888 EVT VT = N->getValueType(0); 7889 EVT EltVT = N0.getValueType(); 7890 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7891 7892 if (VT == MVT::i32 && 7893 (EltVT == MVT::i8 || EltVT == MVT::i16) && 7894 TLI.isTypeLegal(Vec.getValueType()) && 7895 isa<ConstantSDNode>(Lane)) { 7896 7897 unsigned Opc = 0; 7898 switch (N->getOpcode()) { 7899 default: llvm_unreachable("unexpected opcode"); 7900 case ISD::SIGN_EXTEND: 7901 Opc = ARMISD::VGETLANEs; 7902 break; 7903 case ISD::ZERO_EXTEND: 7904 case ISD::ANY_EXTEND: 7905 Opc = ARMISD::VGETLANEu; 7906 break; 7907 } 7908 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 7909 } 7910 } 7911 7912 return SDValue(); 7913} 7914 7915/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 7916/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 7917static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 7918 const ARMSubtarget *ST) { 7919 // If the target supports NEON, try to use vmax/vmin instructions for f32 7920 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 7921 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 7922 // a NaN; only do the transformation when it matches that behavior. 7923 7924 // For now only do this when using NEON for FP operations; if using VFP, it 7925 // is not obvious that the benefit outweighs the cost of switching to the 7926 // NEON pipeline. 7927 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 7928 N->getValueType(0) != MVT::f32) 7929 return SDValue(); 7930 7931 SDValue CondLHS = N->getOperand(0); 7932 SDValue CondRHS = N->getOperand(1); 7933 SDValue LHS = N->getOperand(2); 7934 SDValue RHS = N->getOperand(3); 7935 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 7936 7937 unsigned Opcode = 0; 7938 bool IsReversed; 7939 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 7940 IsReversed = false; // x CC y ? x : y 7941 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 7942 IsReversed = true ; // x CC y ? y : x 7943 } else { 7944 return SDValue(); 7945 } 7946 7947 bool IsUnordered; 7948 switch (CC) { 7949 default: break; 7950 case ISD::SETOLT: 7951 case ISD::SETOLE: 7952 case ISD::SETLT: 7953 case ISD::SETLE: 7954 case ISD::SETULT: 7955 case ISD::SETULE: 7956 // If LHS is NaN, an ordered comparison will be false and the result will 7957 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 7958 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7959 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 7960 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7961 break; 7962 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 7963 // will return -0, so vmin can only be used for unsafe math or if one of 7964 // the operands is known to be nonzero. 7965 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 7966 !UnsafeFPMath && 7967 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7968 break; 7969 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 7970 break; 7971 7972 case ISD::SETOGT: 7973 case ISD::SETOGE: 7974 case ISD::SETGT: 7975 case ISD::SETGE: 7976 case ISD::SETUGT: 7977 case ISD::SETUGE: 7978 // If LHS is NaN, an ordered comparison will be false and the result will 7979 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 7980 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7981 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 7982 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7983 break; 7984 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 7985 // will return +0, so vmax can only be used for unsafe math or if one of 7986 // the operands is known to be nonzero. 7987 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 7988 !UnsafeFPMath && 7989 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7990 break; 7991 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 7992 break; 7993 } 7994 7995 if (!Opcode) 7996 return SDValue(); 7997 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 7998} 7999 8000/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 8001SDValue 8002ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 8003 SDValue Cmp = N->getOperand(4); 8004 if (Cmp.getOpcode() != ARMISD::CMPZ) 8005 // Only looking at EQ and NE cases. 8006 return SDValue(); 8007 8008 EVT VT = N->getValueType(0); 8009 DebugLoc dl = N->getDebugLoc(); 8010 SDValue LHS = Cmp.getOperand(0); 8011 SDValue RHS = Cmp.getOperand(1); 8012 SDValue FalseVal = N->getOperand(0); 8013 SDValue TrueVal = N->getOperand(1); 8014 SDValue ARMcc = N->getOperand(2); 8015 ARMCC::CondCodes CC = 8016 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 8017 8018 // Simplify 8019 // mov r1, r0 8020 // cmp r1, x 8021 // mov r0, y 8022 // moveq r0, x 8023 // to 8024 // cmp r0, x 8025 // movne r0, y 8026 // 8027 // mov r1, r0 8028 // cmp r1, x 8029 // mov r0, x 8030 // movne r0, y 8031 // to 8032 // cmp r0, x 8033 // movne r0, y 8034 /// FIXME: Turn this into a target neutral optimization? 8035 SDValue Res; 8036 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 8037 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 8038 N->getOperand(3), Cmp); 8039 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 8040 SDValue ARMcc; 8041 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 8042 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 8043 N->getOperand(3), NewCmp); 8044 } 8045 8046 if (Res.getNode()) { 8047 APInt KnownZero, KnownOne; 8048 APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()); 8049 DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne); 8050 // Capture demanded bits information that would be otherwise lost. 8051 if (KnownZero == 0xfffffffe) 8052 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8053 DAG.getValueType(MVT::i1)); 8054 else if (KnownZero == 0xffffff00) 8055 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8056 DAG.getValueType(MVT::i8)); 8057 else if (KnownZero == 0xffff0000) 8058 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8059 DAG.getValueType(MVT::i16)); 8060 } 8061 8062 return Res; 8063} 8064 8065SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 8066 DAGCombinerInfo &DCI) const { 8067 switch (N->getOpcode()) { 8068 default: break; 8069 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 8070 case ISD::SUB: return PerformSUBCombine(N, DCI); 8071 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 8072 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 8073 case ISD::AND: return PerformANDCombine(N, DCI); 8074 case ARMISD::BFI: return PerformBFICombine(N, DCI); 8075 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 8076 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 8077 case ISD::STORE: return PerformSTORECombine(N, DCI); 8078 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 8079 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 8080 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 8081 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 8082 case ISD::FP_TO_SINT: 8083 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 8084 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 8085 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 8086 case ISD::SHL: 8087 case ISD::SRA: 8088 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 8089 case ISD::SIGN_EXTEND: 8090 case ISD::ZERO_EXTEND: 8091 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 8092 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 8093 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 8094 case ARMISD::VLD2DUP: 8095 case ARMISD::VLD3DUP: 8096 case ARMISD::VLD4DUP: 8097 return CombineBaseUpdate(N, DCI); 8098 case ISD::INTRINSIC_VOID: 8099 case ISD::INTRINSIC_W_CHAIN: 8100 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 8101 case Intrinsic::arm_neon_vld1: 8102 case Intrinsic::arm_neon_vld2: 8103 case Intrinsic::arm_neon_vld3: 8104 case Intrinsic::arm_neon_vld4: 8105 case Intrinsic::arm_neon_vld2lane: 8106 case Intrinsic::arm_neon_vld3lane: 8107 case Intrinsic::arm_neon_vld4lane: 8108 case Intrinsic::arm_neon_vst1: 8109 case Intrinsic::arm_neon_vst2: 8110 case Intrinsic::arm_neon_vst3: 8111 case Intrinsic::arm_neon_vst4: 8112 case Intrinsic::arm_neon_vst2lane: 8113 case Intrinsic::arm_neon_vst3lane: 8114 case Intrinsic::arm_neon_vst4lane: 8115 return CombineBaseUpdate(N, DCI); 8116 default: break; 8117 } 8118 break; 8119 } 8120 return SDValue(); 8121} 8122 8123bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 8124 EVT VT) const { 8125 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 8126} 8127 8128bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 8129 if (!Subtarget->allowsUnalignedMem()) 8130 return false; 8131 8132 switch (VT.getSimpleVT().SimpleTy) { 8133 default: 8134 return false; 8135 case MVT::i8: 8136 case MVT::i16: 8137 case MVT::i32: 8138 return true; 8139 // FIXME: VLD1 etc with standard alignment is legal. 8140 } 8141} 8142 8143static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 8144 if (V < 0) 8145 return false; 8146 8147 unsigned Scale = 1; 8148 switch (VT.getSimpleVT().SimpleTy) { 8149 default: return false; 8150 case MVT::i1: 8151 case MVT::i8: 8152 // Scale == 1; 8153 break; 8154 case MVT::i16: 8155 // Scale == 2; 8156 Scale = 2; 8157 break; 8158 case MVT::i32: 8159 // Scale == 4; 8160 Scale = 4; 8161 break; 8162 } 8163 8164 if ((V & (Scale - 1)) != 0) 8165 return false; 8166 V /= Scale; 8167 return V == (V & ((1LL << 5) - 1)); 8168} 8169 8170static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 8171 const ARMSubtarget *Subtarget) { 8172 bool isNeg = false; 8173 if (V < 0) { 8174 isNeg = true; 8175 V = - V; 8176 } 8177 8178 switch (VT.getSimpleVT().SimpleTy) { 8179 default: return false; 8180 case MVT::i1: 8181 case MVT::i8: 8182 case MVT::i16: 8183 case MVT::i32: 8184 // + imm12 or - imm8 8185 if (isNeg) 8186 return V == (V & ((1LL << 8) - 1)); 8187 return V == (V & ((1LL << 12) - 1)); 8188 case MVT::f32: 8189 case MVT::f64: 8190 // Same as ARM mode. FIXME: NEON? 8191 if (!Subtarget->hasVFP2()) 8192 return false; 8193 if ((V & 3) != 0) 8194 return false; 8195 V >>= 2; 8196 return V == (V & ((1LL << 8) - 1)); 8197 } 8198} 8199 8200/// isLegalAddressImmediate - Return true if the integer value can be used 8201/// as the offset of the target addressing mode for load / store of the 8202/// given type. 8203static bool isLegalAddressImmediate(int64_t V, EVT VT, 8204 const ARMSubtarget *Subtarget) { 8205 if (V == 0) 8206 return true; 8207 8208 if (!VT.isSimple()) 8209 return false; 8210 8211 if (Subtarget->isThumb1Only()) 8212 return isLegalT1AddressImmediate(V, VT); 8213 else if (Subtarget->isThumb2()) 8214 return isLegalT2AddressImmediate(V, VT, Subtarget); 8215 8216 // ARM mode. 8217 if (V < 0) 8218 V = - V; 8219 switch (VT.getSimpleVT().SimpleTy) { 8220 default: return false; 8221 case MVT::i1: 8222 case MVT::i8: 8223 case MVT::i32: 8224 // +- imm12 8225 return V == (V & ((1LL << 12) - 1)); 8226 case MVT::i16: 8227 // +- imm8 8228 return V == (V & ((1LL << 8) - 1)); 8229 case MVT::f32: 8230 case MVT::f64: 8231 if (!Subtarget->hasVFP2()) // FIXME: NEON? 8232 return false; 8233 if ((V & 3) != 0) 8234 return false; 8235 V >>= 2; 8236 return V == (V & ((1LL << 8) - 1)); 8237 } 8238} 8239 8240bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 8241 EVT VT) const { 8242 int Scale = AM.Scale; 8243 if (Scale < 0) 8244 return false; 8245 8246 switch (VT.getSimpleVT().SimpleTy) { 8247 default: return false; 8248 case MVT::i1: 8249 case MVT::i8: 8250 case MVT::i16: 8251 case MVT::i32: 8252 if (Scale == 1) 8253 return true; 8254 // r + r << imm 8255 Scale = Scale & ~1; 8256 return Scale == 2 || Scale == 4 || Scale == 8; 8257 case MVT::i64: 8258 // r + r 8259 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 8260 return true; 8261 return false; 8262 case MVT::isVoid: 8263 // Note, we allow "void" uses (basically, uses that aren't loads or 8264 // stores), because arm allows folding a scale into many arithmetic 8265 // operations. This should be made more precise and revisited later. 8266 8267 // Allow r << imm, but the imm has to be a multiple of two. 8268 if (Scale & 1) return false; 8269 return isPowerOf2_32(Scale); 8270 } 8271} 8272 8273/// isLegalAddressingMode - Return true if the addressing mode represented 8274/// by AM is legal for this target, for a load/store of the specified type. 8275bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 8276 Type *Ty) const { 8277 EVT VT = getValueType(Ty, true); 8278 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 8279 return false; 8280 8281 // Can never fold addr of global into load/store. 8282 if (AM.BaseGV) 8283 return false; 8284 8285 switch (AM.Scale) { 8286 case 0: // no scale reg, must be "r+i" or "r", or "i". 8287 break; 8288 case 1: 8289 if (Subtarget->isThumb1Only()) 8290 return false; 8291 // FALL THROUGH. 8292 default: 8293 // ARM doesn't support any R+R*scale+imm addr modes. 8294 if (AM.BaseOffs) 8295 return false; 8296 8297 if (!VT.isSimple()) 8298 return false; 8299 8300 if (Subtarget->isThumb2()) 8301 return isLegalT2ScaledAddressingMode(AM, VT); 8302 8303 int Scale = AM.Scale; 8304 switch (VT.getSimpleVT().SimpleTy) { 8305 default: return false; 8306 case MVT::i1: 8307 case MVT::i8: 8308 case MVT::i32: 8309 if (Scale < 0) Scale = -Scale; 8310 if (Scale == 1) 8311 return true; 8312 // r + r << imm 8313 return isPowerOf2_32(Scale & ~1); 8314 case MVT::i16: 8315 case MVT::i64: 8316 // r + r 8317 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 8318 return true; 8319 return false; 8320 8321 case MVT::isVoid: 8322 // Note, we allow "void" uses (basically, uses that aren't loads or 8323 // stores), because arm allows folding a scale into many arithmetic 8324 // operations. This should be made more precise and revisited later. 8325 8326 // Allow r << imm, but the imm has to be a multiple of two. 8327 if (Scale & 1) return false; 8328 return isPowerOf2_32(Scale); 8329 } 8330 break; 8331 } 8332 return true; 8333} 8334 8335/// isLegalICmpImmediate - Return true if the specified immediate is legal 8336/// icmp immediate, that is the target has icmp instructions which can compare 8337/// a register against the immediate without having to materialize the 8338/// immediate into a register. 8339bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 8340 if (!Subtarget->isThumb()) 8341 return ARM_AM::getSOImmVal(Imm) != -1; 8342 if (Subtarget->isThumb2()) 8343 return ARM_AM::getT2SOImmVal(Imm) != -1; 8344 return Imm >= 0 && Imm <= 255; 8345} 8346 8347/// isLegalAddImmediate - Return true if the specified immediate is legal 8348/// add immediate, that is the target has add instructions which can add 8349/// a register with the immediate without having to materialize the 8350/// immediate into a register. 8351bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 8352 return ARM_AM::getSOImmVal(Imm) != -1; 8353} 8354 8355static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 8356 bool isSEXTLoad, SDValue &Base, 8357 SDValue &Offset, bool &isInc, 8358 SelectionDAG &DAG) { 8359 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8360 return false; 8361 8362 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 8363 // AddressingMode 3 8364 Base = Ptr->getOperand(0); 8365 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8366 int RHSC = (int)RHS->getZExtValue(); 8367 if (RHSC < 0 && RHSC > -256) { 8368 assert(Ptr->getOpcode() == ISD::ADD); 8369 isInc = false; 8370 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8371 return true; 8372 } 8373 } 8374 isInc = (Ptr->getOpcode() == ISD::ADD); 8375 Offset = Ptr->getOperand(1); 8376 return true; 8377 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 8378 // AddressingMode 2 8379 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8380 int RHSC = (int)RHS->getZExtValue(); 8381 if (RHSC < 0 && RHSC > -0x1000) { 8382 assert(Ptr->getOpcode() == ISD::ADD); 8383 isInc = false; 8384 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8385 Base = Ptr->getOperand(0); 8386 return true; 8387 } 8388 } 8389 8390 if (Ptr->getOpcode() == ISD::ADD) { 8391 isInc = true; 8392 ARM_AM::ShiftOpc ShOpcVal= 8393 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 8394 if (ShOpcVal != ARM_AM::no_shift) { 8395 Base = Ptr->getOperand(1); 8396 Offset = Ptr->getOperand(0); 8397 } else { 8398 Base = Ptr->getOperand(0); 8399 Offset = Ptr->getOperand(1); 8400 } 8401 return true; 8402 } 8403 8404 isInc = (Ptr->getOpcode() == ISD::ADD); 8405 Base = Ptr->getOperand(0); 8406 Offset = Ptr->getOperand(1); 8407 return true; 8408 } 8409 8410 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 8411 return false; 8412} 8413 8414static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 8415 bool isSEXTLoad, SDValue &Base, 8416 SDValue &Offset, bool &isInc, 8417 SelectionDAG &DAG) { 8418 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8419 return false; 8420 8421 Base = Ptr->getOperand(0); 8422 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8423 int RHSC = (int)RHS->getZExtValue(); 8424 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 8425 assert(Ptr->getOpcode() == ISD::ADD); 8426 isInc = false; 8427 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8428 return true; 8429 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 8430 isInc = Ptr->getOpcode() == ISD::ADD; 8431 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 8432 return true; 8433 } 8434 } 8435 8436 return false; 8437} 8438 8439/// getPreIndexedAddressParts - returns true by value, base pointer and 8440/// offset pointer and addressing mode by reference if the node's address 8441/// can be legally represented as pre-indexed load / store address. 8442bool 8443ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 8444 SDValue &Offset, 8445 ISD::MemIndexedMode &AM, 8446 SelectionDAG &DAG) const { 8447 if (Subtarget->isThumb1Only()) 8448 return false; 8449 8450 EVT VT; 8451 SDValue Ptr; 8452 bool isSEXTLoad = false; 8453 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8454 Ptr = LD->getBasePtr(); 8455 VT = LD->getMemoryVT(); 8456 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8457 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8458 Ptr = ST->getBasePtr(); 8459 VT = ST->getMemoryVT(); 8460 } else 8461 return false; 8462 8463 bool isInc; 8464 bool isLegal = false; 8465 if (Subtarget->isThumb2()) 8466 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8467 Offset, isInc, DAG); 8468 else 8469 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8470 Offset, isInc, DAG); 8471 if (!isLegal) 8472 return false; 8473 8474 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 8475 return true; 8476} 8477 8478/// getPostIndexedAddressParts - returns true by value, base pointer and 8479/// offset pointer and addressing mode by reference if this node can be 8480/// combined with a load / store to form a post-indexed load / store. 8481bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 8482 SDValue &Base, 8483 SDValue &Offset, 8484 ISD::MemIndexedMode &AM, 8485 SelectionDAG &DAG) const { 8486 if (Subtarget->isThumb1Only()) 8487 return false; 8488 8489 EVT VT; 8490 SDValue Ptr; 8491 bool isSEXTLoad = false; 8492 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8493 VT = LD->getMemoryVT(); 8494 Ptr = LD->getBasePtr(); 8495 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8496 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8497 VT = ST->getMemoryVT(); 8498 Ptr = ST->getBasePtr(); 8499 } else 8500 return false; 8501 8502 bool isInc; 8503 bool isLegal = false; 8504 if (Subtarget->isThumb2()) 8505 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8506 isInc, DAG); 8507 else 8508 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8509 isInc, DAG); 8510 if (!isLegal) 8511 return false; 8512 8513 if (Ptr != Base) { 8514 // Swap base ptr and offset to catch more post-index load / store when 8515 // it's legal. In Thumb2 mode, offset must be an immediate. 8516 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 8517 !Subtarget->isThumb2()) 8518 std::swap(Base, Offset); 8519 8520 // Post-indexed load / store update the base pointer. 8521 if (Ptr != Base) 8522 return false; 8523 } 8524 8525 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 8526 return true; 8527} 8528 8529void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 8530 const APInt &Mask, 8531 APInt &KnownZero, 8532 APInt &KnownOne, 8533 const SelectionDAG &DAG, 8534 unsigned Depth) const { 8535 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 8536 switch (Op.getOpcode()) { 8537 default: break; 8538 case ARMISD::CMOV: { 8539 // Bits are known zero/one if known on the LHS and RHS. 8540 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 8541 if (KnownZero == 0 && KnownOne == 0) return; 8542 8543 APInt KnownZeroRHS, KnownOneRHS; 8544 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 8545 KnownZeroRHS, KnownOneRHS, Depth+1); 8546 KnownZero &= KnownZeroRHS; 8547 KnownOne &= KnownOneRHS; 8548 return; 8549 } 8550 } 8551} 8552 8553//===----------------------------------------------------------------------===// 8554// ARM Inline Assembly Support 8555//===----------------------------------------------------------------------===// 8556 8557bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 8558 // Looking for "rev" which is V6+. 8559 if (!Subtarget->hasV6Ops()) 8560 return false; 8561 8562 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 8563 std::string AsmStr = IA->getAsmString(); 8564 SmallVector<StringRef, 4> AsmPieces; 8565 SplitString(AsmStr, AsmPieces, ";\n"); 8566 8567 switch (AsmPieces.size()) { 8568 default: return false; 8569 case 1: 8570 AsmStr = AsmPieces[0]; 8571 AsmPieces.clear(); 8572 SplitString(AsmStr, AsmPieces, " \t,"); 8573 8574 // rev $0, $1 8575 if (AsmPieces.size() == 3 && 8576 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 8577 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 8578 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 8579 if (Ty && Ty->getBitWidth() == 32) 8580 return IntrinsicLowering::LowerToByteSwap(CI); 8581 } 8582 break; 8583 } 8584 8585 return false; 8586} 8587 8588/// getConstraintType - Given a constraint letter, return the type of 8589/// constraint it is for this target. 8590ARMTargetLowering::ConstraintType 8591ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 8592 if (Constraint.size() == 1) { 8593 switch (Constraint[0]) { 8594 default: break; 8595 case 'l': return C_RegisterClass; 8596 case 'w': return C_RegisterClass; 8597 case 'h': return C_RegisterClass; 8598 case 'x': return C_RegisterClass; 8599 case 't': return C_RegisterClass; 8600 case 'j': return C_Other; // Constant for movw. 8601 // An address with a single base register. Due to the way we 8602 // currently handle addresses it is the same as an 'r' memory constraint. 8603 case 'Q': return C_Memory; 8604 } 8605 } else if (Constraint.size() == 2) { 8606 switch (Constraint[0]) { 8607 default: break; 8608 // All 'U+' constraints are addresses. 8609 case 'U': return C_Memory; 8610 } 8611 } 8612 return TargetLowering::getConstraintType(Constraint); 8613} 8614 8615/// Examine constraint type and operand type and determine a weight value. 8616/// This object must already have been set up with the operand type 8617/// and the current alternative constraint selected. 8618TargetLowering::ConstraintWeight 8619ARMTargetLowering::getSingleConstraintMatchWeight( 8620 AsmOperandInfo &info, const char *constraint) const { 8621 ConstraintWeight weight = CW_Invalid; 8622 Value *CallOperandVal = info.CallOperandVal; 8623 // If we don't have a value, we can't do a match, 8624 // but allow it at the lowest weight. 8625 if (CallOperandVal == NULL) 8626 return CW_Default; 8627 Type *type = CallOperandVal->getType(); 8628 // Look at the constraint type. 8629 switch (*constraint) { 8630 default: 8631 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 8632 break; 8633 case 'l': 8634 if (type->isIntegerTy()) { 8635 if (Subtarget->isThumb()) 8636 weight = CW_SpecificReg; 8637 else 8638 weight = CW_Register; 8639 } 8640 break; 8641 case 'w': 8642 if (type->isFloatingPointTy()) 8643 weight = CW_Register; 8644 break; 8645 } 8646 return weight; 8647} 8648 8649typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 8650RCPair 8651ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 8652 EVT VT) const { 8653 if (Constraint.size() == 1) { 8654 // GCC ARM Constraint Letters 8655 switch (Constraint[0]) { 8656 case 'l': // Low regs or general regs. 8657 if (Subtarget->isThumb()) 8658 return RCPair(0U, ARM::tGPRRegisterClass); 8659 else 8660 return RCPair(0U, ARM::GPRRegisterClass); 8661 case 'h': // High regs or no regs. 8662 if (Subtarget->isThumb()) 8663 return RCPair(0U, ARM::hGPRRegisterClass); 8664 break; 8665 case 'r': 8666 return RCPair(0U, ARM::GPRRegisterClass); 8667 case 'w': 8668 if (VT == MVT::f32) 8669 return RCPair(0U, ARM::SPRRegisterClass); 8670 if (VT.getSizeInBits() == 64) 8671 return RCPair(0U, ARM::DPRRegisterClass); 8672 if (VT.getSizeInBits() == 128) 8673 return RCPair(0U, ARM::QPRRegisterClass); 8674 break; 8675 case 'x': 8676 if (VT == MVT::f32) 8677 return RCPair(0U, ARM::SPR_8RegisterClass); 8678 if (VT.getSizeInBits() == 64) 8679 return RCPair(0U, ARM::DPR_8RegisterClass); 8680 if (VT.getSizeInBits() == 128) 8681 return RCPair(0U, ARM::QPR_8RegisterClass); 8682 break; 8683 case 't': 8684 if (VT == MVT::f32) 8685 return RCPair(0U, ARM::SPRRegisterClass); 8686 break; 8687 } 8688 } 8689 if (StringRef("{cc}").equals_lower(Constraint)) 8690 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 8691 8692 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 8693} 8694 8695/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 8696/// vector. If it is invalid, don't add anything to Ops. 8697void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 8698 std::string &Constraint, 8699 std::vector<SDValue>&Ops, 8700 SelectionDAG &DAG) const { 8701 SDValue Result(0, 0); 8702 8703 // Currently only support length 1 constraints. 8704 if (Constraint.length() != 1) return; 8705 8706 char ConstraintLetter = Constraint[0]; 8707 switch (ConstraintLetter) { 8708 default: break; 8709 case 'j': 8710 case 'I': case 'J': case 'K': case 'L': 8711 case 'M': case 'N': case 'O': 8712 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 8713 if (!C) 8714 return; 8715 8716 int64_t CVal64 = C->getSExtValue(); 8717 int CVal = (int) CVal64; 8718 // None of these constraints allow values larger than 32 bits. Check 8719 // that the value fits in an int. 8720 if (CVal != CVal64) 8721 return; 8722 8723 switch (ConstraintLetter) { 8724 case 'j': 8725 // Constant suitable for movw, must be between 0 and 8726 // 65535. 8727 if (Subtarget->hasV6T2Ops()) 8728 if (CVal >= 0 && CVal <= 65535) 8729 break; 8730 return; 8731 case 'I': 8732 if (Subtarget->isThumb1Only()) { 8733 // This must be a constant between 0 and 255, for ADD 8734 // immediates. 8735 if (CVal >= 0 && CVal <= 255) 8736 break; 8737 } else if (Subtarget->isThumb2()) { 8738 // A constant that can be used as an immediate value in a 8739 // data-processing instruction. 8740 if (ARM_AM::getT2SOImmVal(CVal) != -1) 8741 break; 8742 } else { 8743 // A constant that can be used as an immediate value in a 8744 // data-processing instruction. 8745 if (ARM_AM::getSOImmVal(CVal) != -1) 8746 break; 8747 } 8748 return; 8749 8750 case 'J': 8751 if (Subtarget->isThumb()) { // FIXME thumb2 8752 // This must be a constant between -255 and -1, for negated ADD 8753 // immediates. This can be used in GCC with an "n" modifier that 8754 // prints the negated value, for use with SUB instructions. It is 8755 // not useful otherwise but is implemented for compatibility. 8756 if (CVal >= -255 && CVal <= -1) 8757 break; 8758 } else { 8759 // This must be a constant between -4095 and 4095. It is not clear 8760 // what this constraint is intended for. Implemented for 8761 // compatibility with GCC. 8762 if (CVal >= -4095 && CVal <= 4095) 8763 break; 8764 } 8765 return; 8766 8767 case 'K': 8768 if (Subtarget->isThumb1Only()) { 8769 // A 32-bit value where only one byte has a nonzero value. Exclude 8770 // zero to match GCC. This constraint is used by GCC internally for 8771 // constants that can be loaded with a move/shift combination. 8772 // It is not useful otherwise but is implemented for compatibility. 8773 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 8774 break; 8775 } else if (Subtarget->isThumb2()) { 8776 // A constant whose bitwise inverse can be used as an immediate 8777 // value in a data-processing instruction. This can be used in GCC 8778 // with a "B" modifier that prints the inverted value, for use with 8779 // BIC and MVN instructions. It is not useful otherwise but is 8780 // implemented for compatibility. 8781 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 8782 break; 8783 } else { 8784 // A constant whose bitwise inverse can be used as an immediate 8785 // value in a data-processing instruction. This can be used in GCC 8786 // with a "B" modifier that prints the inverted value, for use with 8787 // BIC and MVN instructions. It is not useful otherwise but is 8788 // implemented for compatibility. 8789 if (ARM_AM::getSOImmVal(~CVal) != -1) 8790 break; 8791 } 8792 return; 8793 8794 case 'L': 8795 if (Subtarget->isThumb1Only()) { 8796 // This must be a constant between -7 and 7, 8797 // for 3-operand ADD/SUB immediate instructions. 8798 if (CVal >= -7 && CVal < 7) 8799 break; 8800 } else if (Subtarget->isThumb2()) { 8801 // A constant whose negation can be used as an immediate value in a 8802 // data-processing instruction. This can be used in GCC with an "n" 8803 // modifier that prints the negated value, for use with SUB 8804 // instructions. It is not useful otherwise but is implemented for 8805 // compatibility. 8806 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 8807 break; 8808 } else { 8809 // A constant whose negation can be used as an immediate value in a 8810 // data-processing instruction. This can be used in GCC with an "n" 8811 // modifier that prints the negated value, for use with SUB 8812 // instructions. It is not useful otherwise but is implemented for 8813 // compatibility. 8814 if (ARM_AM::getSOImmVal(-CVal) != -1) 8815 break; 8816 } 8817 return; 8818 8819 case 'M': 8820 if (Subtarget->isThumb()) { // FIXME thumb2 8821 // This must be a multiple of 4 between 0 and 1020, for 8822 // ADD sp + immediate. 8823 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 8824 break; 8825 } else { 8826 // A power of two or a constant between 0 and 32. This is used in 8827 // GCC for the shift amount on shifted register operands, but it is 8828 // useful in general for any shift amounts. 8829 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 8830 break; 8831 } 8832 return; 8833 8834 case 'N': 8835 if (Subtarget->isThumb()) { // FIXME thumb2 8836 // This must be a constant between 0 and 31, for shift amounts. 8837 if (CVal >= 0 && CVal <= 31) 8838 break; 8839 } 8840 return; 8841 8842 case 'O': 8843 if (Subtarget->isThumb()) { // FIXME thumb2 8844 // This must be a multiple of 4 between -508 and 508, for 8845 // ADD/SUB sp = sp + immediate. 8846 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 8847 break; 8848 } 8849 return; 8850 } 8851 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 8852 break; 8853 } 8854 8855 if (Result.getNode()) { 8856 Ops.push_back(Result); 8857 return; 8858 } 8859 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 8860} 8861 8862bool 8863ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 8864 // The ARM target isn't yet aware of offsets. 8865 return false; 8866} 8867 8868bool ARM::isBitFieldInvertedMask(unsigned v) { 8869 if (v == 0xffffffff) 8870 return 0; 8871 // there can be 1's on either or both "outsides", all the "inside" 8872 // bits must be 0's 8873 unsigned int lsb = 0, msb = 31; 8874 while (v & (1 << msb)) --msb; 8875 while (v & (1 << lsb)) ++lsb; 8876 for (unsigned int i = lsb; i <= msb; ++i) { 8877 if (v & (1 << i)) 8878 return 0; 8879 } 8880 return 1; 8881} 8882 8883/// isFPImmLegal - Returns true if the target can instruction select the 8884/// specified FP immediate natively. If false, the legalizer will 8885/// materialize the FP immediate as a load from a constant pool. 8886bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 8887 if (!Subtarget->hasVFP3()) 8888 return false; 8889 if (VT == MVT::f32) 8890 return ARM_AM::getFP32Imm(Imm) != -1; 8891 if (VT == MVT::f64) 8892 return ARM_AM::getFP64Imm(Imm) != -1; 8893 return false; 8894} 8895 8896/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 8897/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 8898/// specified in the intrinsic calls. 8899bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 8900 const CallInst &I, 8901 unsigned Intrinsic) const { 8902 switch (Intrinsic) { 8903 case Intrinsic::arm_neon_vld1: 8904 case Intrinsic::arm_neon_vld2: 8905 case Intrinsic::arm_neon_vld3: 8906 case Intrinsic::arm_neon_vld4: 8907 case Intrinsic::arm_neon_vld2lane: 8908 case Intrinsic::arm_neon_vld3lane: 8909 case Intrinsic::arm_neon_vld4lane: { 8910 Info.opc = ISD::INTRINSIC_W_CHAIN; 8911 // Conservatively set memVT to the entire set of vectors loaded. 8912 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 8913 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8914 Info.ptrVal = I.getArgOperand(0); 8915 Info.offset = 0; 8916 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8917 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8918 Info.vol = false; // volatile loads with NEON intrinsics not supported 8919 Info.readMem = true; 8920 Info.writeMem = false; 8921 return true; 8922 } 8923 case Intrinsic::arm_neon_vst1: 8924 case Intrinsic::arm_neon_vst2: 8925 case Intrinsic::arm_neon_vst3: 8926 case Intrinsic::arm_neon_vst4: 8927 case Intrinsic::arm_neon_vst2lane: 8928 case Intrinsic::arm_neon_vst3lane: 8929 case Intrinsic::arm_neon_vst4lane: { 8930 Info.opc = ISD::INTRINSIC_VOID; 8931 // Conservatively set memVT to the entire set of vectors stored. 8932 unsigned NumElts = 0; 8933 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 8934 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 8935 if (!ArgTy->isVectorTy()) 8936 break; 8937 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 8938 } 8939 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8940 Info.ptrVal = I.getArgOperand(0); 8941 Info.offset = 0; 8942 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8943 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8944 Info.vol = false; // volatile stores with NEON intrinsics not supported 8945 Info.readMem = false; 8946 Info.writeMem = true; 8947 return true; 8948 } 8949 case Intrinsic::arm_strexd: { 8950 Info.opc = ISD::INTRINSIC_W_CHAIN; 8951 Info.memVT = MVT::i64; 8952 Info.ptrVal = I.getArgOperand(2); 8953 Info.offset = 0; 8954 Info.align = 8; 8955 Info.vol = true; 8956 Info.readMem = false; 8957 Info.writeMem = true; 8958 return true; 8959 } 8960 case Intrinsic::arm_ldrexd: { 8961 Info.opc = ISD::INTRINSIC_W_CHAIN; 8962 Info.memVT = MVT::i64; 8963 Info.ptrVal = I.getArgOperand(0); 8964 Info.offset = 0; 8965 Info.align = 8; 8966 Info.vol = true; 8967 Info.readMem = true; 8968 Info.writeMem = false; 8969 return true; 8970 } 8971 default: 8972 break; 8973 } 8974 8975 return false; 8976} 8977