ARMISelLowering.cpp revision 217f0e9ca494a1752c591f50f04b4143eb1763c5
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMCallingConv.h" 18#include "ARMConstantPoolValue.h" 19#include "ARMISelLowering.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMRegisterInfo.h" 23#include "ARMSubtarget.h" 24#include "ARMTargetMachine.h" 25#include "ARMTargetObjectFile.h" 26#include "MCTargetDesc/ARMAddressingModes.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineModuleInfo.h" 42#include "llvm/CodeGen/MachineRegisterInfo.h" 43#include "llvm/CodeGen/PseudoSourceValue.h" 44#include "llvm/CodeGen/SelectionDAG.h" 45#include "llvm/MC/MCSectionMachO.h" 46#include "llvm/Target/TargetOptions.h" 47#include "llvm/ADT/VectorExtras.h" 48#include "llvm/ADT/StringExtras.h" 49#include "llvm/ADT/Statistic.h" 50#include "llvm/Support/CommandLine.h" 51#include "llvm/Support/ErrorHandling.h" 52#include "llvm/Support/MathExtras.h" 53#include "llvm/Support/raw_ostream.h" 54#include <sstream> 55using namespace llvm; 56 57STATISTIC(NumTailCalls, "Number of tail calls"); 58STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 59 60// This option should go away when tail calls fully work. 61static cl::opt<bool> 62EnableARMTailCalls("arm-tail-calls", cl::Hidden, 63 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 64 cl::init(false)); 65 66cl::opt<bool> 67EnableARMLongCalls("arm-long-calls", cl::Hidden, 68 cl::desc("Generate calls via indirect call instructions"), 69 cl::init(false)); 70 71static cl::opt<bool> 72ARMInterworking("arm-interworking", cl::Hidden, 73 cl::desc("Enable / disable ARM interworking (for debugging only)"), 74 cl::init(true)); 75 76namespace llvm { 77 class ARMCCState : public CCState { 78 public: 79 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 80 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 81 LLVMContext &C, ParmContext PC) 82 : CCState(CC, isVarArg, MF, TM, locs, C) { 83 assert(((PC == Call) || (PC == Prologue)) && 84 "ARMCCState users must specify whether their context is call" 85 "or prologue generation."); 86 CallOrPrologue = PC; 87 } 88 }; 89} 90 91// The APCS parameter registers. 92static const unsigned GPRArgRegs[] = { 93 ARM::R0, ARM::R1, ARM::R2, ARM::R3 94}; 95 96void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 97 EVT PromotedBitwiseVT) { 98 if (VT != PromotedLdStVT) { 99 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 100 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 101 PromotedLdStVT.getSimpleVT()); 102 103 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 104 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 105 PromotedLdStVT.getSimpleVT()); 106 } 107 108 EVT ElemTy = VT.getVectorElementType(); 109 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 110 setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom); 111 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 112 if (ElemTy != MVT::i32) { 113 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 114 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 115 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 116 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 117 } 118 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 119 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 120 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 121 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 122 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 123 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 124 if (VT.isInteger()) { 125 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 126 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 127 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 128 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 129 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 130 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 131 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 132 setTruncStoreAction(VT.getSimpleVT(), 133 (MVT::SimpleValueType)InnerVT, Expand); 134 } 135 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 136 137 // Promote all bit-wise operations. 138 if (VT.isInteger() && VT != PromotedBitwiseVT) { 139 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 140 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 141 PromotedBitwiseVT.getSimpleVT()); 142 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 143 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 144 PromotedBitwiseVT.getSimpleVT()); 145 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 146 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 147 PromotedBitwiseVT.getSimpleVT()); 148 } 149 150 // Neon does not support vector divide/remainder operations. 151 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 152 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 153 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 154 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 155 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 156 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 157} 158 159void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 160 addRegisterClass(VT, ARM::DPRRegisterClass); 161 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 162} 163 164void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 165 addRegisterClass(VT, ARM::QPRRegisterClass); 166 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 167} 168 169static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 170 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 171 return new TargetLoweringObjectFileMachO(); 172 173 return new ARMElfTargetObjectFile(); 174} 175 176ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 177 : TargetLowering(TM, createTLOF(TM)) { 178 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 179 RegInfo = TM.getRegisterInfo(); 180 Itins = TM.getInstrItineraryData(); 181 182 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 183 184 if (Subtarget->isTargetDarwin()) { 185 // Uses VFP for Thumb libfuncs if available. 186 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 187 // Single-precision floating-point arithmetic. 188 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 189 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 190 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 191 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 192 193 // Double-precision floating-point arithmetic. 194 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 195 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 196 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 197 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 198 199 // Single-precision comparisons. 200 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 201 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 202 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 203 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 204 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 205 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 206 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 207 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 208 209 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 210 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 216 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 217 218 // Double-precision comparisons. 219 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 220 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 221 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 222 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 223 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 224 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 225 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 226 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 227 228 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 229 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 230 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 231 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 232 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 233 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 234 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 235 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 236 237 // Floating-point to integer conversions. 238 // i64 conversions are done via library routines even when generating VFP 239 // instructions, so use the same ones. 240 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 241 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 242 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 243 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 244 245 // Conversions between floating types. 246 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 247 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 248 249 // Integer to floating-point conversions. 250 // i64 conversions are done via library routines even when generating VFP 251 // instructions, so use the same ones. 252 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 253 // e.g., __floatunsidf vs. __floatunssidfvfp. 254 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 255 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 256 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 257 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 258 } 259 } 260 261 // These libcalls are not available in 32-bit. 262 setLibcallName(RTLIB::SHL_I128, 0); 263 setLibcallName(RTLIB::SRL_I128, 0); 264 setLibcallName(RTLIB::SRA_I128, 0); 265 266 if (Subtarget->isAAPCS_ABI()) { 267 // Double-precision floating-point arithmetic helper functions 268 // RTABI chapter 4.1.2, Table 2 269 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 270 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 271 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 272 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 273 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 277 278 // Double-precision floating-point comparison helper functions 279 // RTABI chapter 4.1.2, Table 3 280 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 281 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 282 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 283 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 284 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 285 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 286 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 287 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 288 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 289 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 290 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 291 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 292 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 293 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 294 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 295 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 296 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 297 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 298 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 299 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 300 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 301 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 302 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 303 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 304 305 // Single-precision floating-point arithmetic helper functions 306 // RTABI chapter 4.1.2, Table 4 307 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 308 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 309 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 310 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 311 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 315 316 // Single-precision floating-point comparison helper functions 317 // RTABI chapter 4.1.2, Table 5 318 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 319 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 320 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 321 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 322 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 323 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 324 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 325 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 326 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 327 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 328 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 329 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 330 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 331 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 332 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 333 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 334 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 335 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 342 343 // Floating-point to integer conversions. 344 // RTABI chapter 4.1.2, Table 6 345 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 346 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 347 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 348 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 349 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 350 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 351 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 352 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 353 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 361 362 // Conversions between floating types. 363 // RTABI chapter 4.1.2, Table 7 364 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 365 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 366 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 367 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 368 369 // Integer to floating-point conversions. 370 // RTABI chapter 4.1.2, Table 8 371 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 372 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 373 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 374 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 375 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 376 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 377 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 378 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 379 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 380 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 384 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 385 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 387 388 // Long long helper functions 389 // RTABI chapter 4.2, Table 9 390 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 391 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 392 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 393 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 394 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 395 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 396 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 397 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 399 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 400 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 401 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 402 403 // Integer division functions 404 // RTABI chapter 4.3.1 405 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 406 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 407 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 408 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 409 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 410 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 411 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 412 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 413 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 414 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 415 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 416 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 417 418 // Memory operations 419 // RTABI chapter 4.3.4 420 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 421 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 422 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 423 } 424 425 if (Subtarget->isThumb1Only()) 426 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 427 else 428 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 429 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 430 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 431 if (!Subtarget->isFPOnlySP()) 432 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 433 434 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 435 } 436 437 if (Subtarget->hasNEON()) { 438 addDRTypeForNEON(MVT::v2f32); 439 addDRTypeForNEON(MVT::v8i8); 440 addDRTypeForNEON(MVT::v4i16); 441 addDRTypeForNEON(MVT::v2i32); 442 addDRTypeForNEON(MVT::v1i64); 443 444 addQRTypeForNEON(MVT::v4f32); 445 addQRTypeForNEON(MVT::v2f64); 446 addQRTypeForNEON(MVT::v16i8); 447 addQRTypeForNEON(MVT::v8i16); 448 addQRTypeForNEON(MVT::v4i32); 449 addQRTypeForNEON(MVT::v2i64); 450 451 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 452 // neither Neon nor VFP support any arithmetic operations on it. 453 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 454 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 455 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 456 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 457 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 458 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 459 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 460 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 461 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 462 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 463 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 464 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 465 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 466 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 467 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 468 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 469 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 470 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 471 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 472 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 473 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 474 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 475 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 476 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 477 478 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 479 480 // Neon does not support some operations on v1i64 and v2i64 types. 481 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 482 // Custom handling for some quad-vector types to detect VMULL. 483 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 484 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 485 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 486 // Custom handling for some vector types to avoid expensive expansions 487 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 488 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 489 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 490 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 491 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 492 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 493 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 494 // a destination type that is wider than the source. 495 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 496 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 497 498 setTargetDAGCombine(ISD::INTRINSIC_VOID); 499 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 500 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 501 setTargetDAGCombine(ISD::SHL); 502 setTargetDAGCombine(ISD::SRL); 503 setTargetDAGCombine(ISD::SRA); 504 setTargetDAGCombine(ISD::SIGN_EXTEND); 505 setTargetDAGCombine(ISD::ZERO_EXTEND); 506 setTargetDAGCombine(ISD::ANY_EXTEND); 507 setTargetDAGCombine(ISD::SELECT_CC); 508 setTargetDAGCombine(ISD::BUILD_VECTOR); 509 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 510 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 511 setTargetDAGCombine(ISD::STORE); 512 setTargetDAGCombine(ISD::FP_TO_SINT); 513 setTargetDAGCombine(ISD::FP_TO_UINT); 514 setTargetDAGCombine(ISD::FDIV); 515 } 516 517 computeRegisterProperties(); 518 519 // ARM does not have f32 extending load. 520 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 521 522 // ARM does not have i1 sign extending load. 523 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 524 525 // ARM supports all 4 flavors of integer indexed load / store. 526 if (!Subtarget->isThumb1Only()) { 527 for (unsigned im = (unsigned)ISD::PRE_INC; 528 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 529 setIndexedLoadAction(im, MVT::i1, Legal); 530 setIndexedLoadAction(im, MVT::i8, Legal); 531 setIndexedLoadAction(im, MVT::i16, Legal); 532 setIndexedLoadAction(im, MVT::i32, Legal); 533 setIndexedStoreAction(im, MVT::i1, Legal); 534 setIndexedStoreAction(im, MVT::i8, Legal); 535 setIndexedStoreAction(im, MVT::i16, Legal); 536 setIndexedStoreAction(im, MVT::i32, Legal); 537 } 538 } 539 540 // i64 operation support. 541 setOperationAction(ISD::MUL, MVT::i64, Expand); 542 setOperationAction(ISD::MULHU, MVT::i32, Expand); 543 if (Subtarget->isThumb1Only()) { 544 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 545 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 546 } 547 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 548 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 549 setOperationAction(ISD::MULHS, MVT::i32, Expand); 550 551 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 552 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 553 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 554 setOperationAction(ISD::SRL, MVT::i64, Custom); 555 setOperationAction(ISD::SRA, MVT::i64, Custom); 556 557 if (!Subtarget->isThumb1Only()) { 558 // FIXME: We should do this for Thumb1 as well. 559 setOperationAction(ISD::ADDC, MVT::i32, Custom); 560 setOperationAction(ISD::ADDE, MVT::i32, Custom); 561 setOperationAction(ISD::SUBC, MVT::i32, Custom); 562 setOperationAction(ISD::SUBE, MVT::i32, Custom); 563 } 564 565 // ARM does not have ROTL. 566 setOperationAction(ISD::ROTL, MVT::i32, Expand); 567 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 568 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 569 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 570 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 571 572 // Only ARMv6 has BSWAP. 573 if (!Subtarget->hasV6Ops()) 574 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 575 576 // These are expanded into libcalls. 577 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 578 // v7M has a hardware divider 579 setOperationAction(ISD::SDIV, MVT::i32, Expand); 580 setOperationAction(ISD::UDIV, MVT::i32, Expand); 581 } 582 setOperationAction(ISD::SREM, MVT::i32, Expand); 583 setOperationAction(ISD::UREM, MVT::i32, Expand); 584 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 585 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 586 587 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 588 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 589 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 590 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 591 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 592 593 setOperationAction(ISD::TRAP, MVT::Other, Legal); 594 595 // Use the default implementation. 596 setOperationAction(ISD::VASTART, MVT::Other, Custom); 597 setOperationAction(ISD::VAARG, MVT::Other, Expand); 598 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 599 setOperationAction(ISD::VAEND, MVT::Other, Expand); 600 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 601 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 602 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 603 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 604 setExceptionPointerRegister(ARM::R0); 605 setExceptionSelectorRegister(ARM::R1); 606 607 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 608 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 609 // the default expansion. 610 // FIXME: This should be checking for v6k, not just v6. 611 if (Subtarget->hasDataBarrier() || 612 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 613 // membarrier needs custom lowering; the rest are legal and handled 614 // normally. 615 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 616 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 617 // Custom lowering for 64-bit ops 618 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 619 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 620 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 621 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 622 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 623 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 624 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 625 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 626 setInsertFencesForAtomic(true); 627 } else { 628 // Set them all for expansion, which will force libcalls. 629 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 630 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 631 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 632 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 633 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 634 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 635 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 636 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 637 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 638 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 639 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 640 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 641 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 642 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 643 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 644 // Unordered/Monotonic case. 645 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 646 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 647 // Since the libcalls include locking, fold in the fences 648 setShouldFoldAtomicFences(true); 649 } 650 651 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 652 653 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 654 if (!Subtarget->hasV6Ops()) { 655 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 656 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 657 } 658 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 659 660 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 661 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 662 // iff target supports vfp2. 663 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 664 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 665 } 666 667 // We want to custom lower some of our intrinsics. 668 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 669 if (Subtarget->isTargetDarwin()) { 670 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 671 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 672 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 673 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 674 } 675 676 setOperationAction(ISD::SETCC, MVT::i32, Expand); 677 setOperationAction(ISD::SETCC, MVT::f32, Expand); 678 setOperationAction(ISD::SETCC, MVT::f64, Expand); 679 setOperationAction(ISD::SELECT, MVT::i32, Custom); 680 setOperationAction(ISD::SELECT, MVT::f32, Custom); 681 setOperationAction(ISD::SELECT, MVT::f64, Custom); 682 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 683 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 684 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 685 686 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 687 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 688 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 689 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 690 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 691 692 // We don't support sin/cos/fmod/copysign/pow 693 setOperationAction(ISD::FSIN, MVT::f64, Expand); 694 setOperationAction(ISD::FSIN, MVT::f32, Expand); 695 setOperationAction(ISD::FCOS, MVT::f32, Expand); 696 setOperationAction(ISD::FCOS, MVT::f64, Expand); 697 setOperationAction(ISD::FREM, MVT::f64, Expand); 698 setOperationAction(ISD::FREM, MVT::f32, Expand); 699 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 700 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 701 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 702 } 703 setOperationAction(ISD::FPOW, MVT::f64, Expand); 704 setOperationAction(ISD::FPOW, MVT::f32, Expand); 705 706 setOperationAction(ISD::FMA, MVT::f64, Expand); 707 setOperationAction(ISD::FMA, MVT::f32, Expand); 708 709 // Various VFP goodness 710 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 711 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 712 if (Subtarget->hasVFP2()) { 713 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 714 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 715 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 716 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 717 } 718 // Special handling for half-precision FP. 719 if (!Subtarget->hasFP16()) { 720 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 721 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 722 } 723 } 724 725 // We have target-specific dag combine patterns for the following nodes: 726 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 727 setTargetDAGCombine(ISD::ADD); 728 setTargetDAGCombine(ISD::SUB); 729 setTargetDAGCombine(ISD::MUL); 730 731 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 732 setTargetDAGCombine(ISD::OR); 733 if (Subtarget->hasNEON()) 734 setTargetDAGCombine(ISD::AND); 735 736 setStackPointerRegisterToSaveRestore(ARM::SP); 737 738 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 739 setSchedulingPreference(Sched::RegPressure); 740 else 741 setSchedulingPreference(Sched::Hybrid); 742 743 //// temporary - rewrite interface to use type 744 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 745 746 // On ARM arguments smaller than 4 bytes are extended, so all arguments 747 // are at least 4 bytes aligned. 748 setMinStackArgumentAlignment(4); 749 750 benefitFromCodePlacementOpt = true; 751 752 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 753} 754 755// FIXME: It might make sense to define the representative register class as the 756// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 757// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 758// SPR's representative would be DPR_VFP2. This should work well if register 759// pressure tracking were modified such that a register use would increment the 760// pressure of the register class's representative and all of it's super 761// classes' representatives transitively. We have not implemented this because 762// of the difficulty prior to coalescing of modeling operand register classes 763// due to the common occurrence of cross class copies and subregister insertions 764// and extractions. 765std::pair<const TargetRegisterClass*, uint8_t> 766ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 767 const TargetRegisterClass *RRC = 0; 768 uint8_t Cost = 1; 769 switch (VT.getSimpleVT().SimpleTy) { 770 default: 771 return TargetLowering::findRepresentativeClass(VT); 772 // Use DPR as representative register class for all floating point 773 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 774 // the cost is 1 for both f32 and f64. 775 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 776 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 777 RRC = ARM::DPRRegisterClass; 778 // When NEON is used for SP, only half of the register file is available 779 // because operations that define both SP and DP results will be constrained 780 // to the VFP2 class (D0-D15). We currently model this constraint prior to 781 // coalescing by double-counting the SP regs. See the FIXME above. 782 if (Subtarget->useNEONForSinglePrecisionFP()) 783 Cost = 2; 784 break; 785 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 786 case MVT::v4f32: case MVT::v2f64: 787 RRC = ARM::DPRRegisterClass; 788 Cost = 2; 789 break; 790 case MVT::v4i64: 791 RRC = ARM::DPRRegisterClass; 792 Cost = 4; 793 break; 794 case MVT::v8i64: 795 RRC = ARM::DPRRegisterClass; 796 Cost = 8; 797 break; 798 } 799 return std::make_pair(RRC, Cost); 800} 801 802const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 803 switch (Opcode) { 804 default: return 0; 805 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 806 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 807 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 808 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 809 case ARMISD::CALL: return "ARMISD::CALL"; 810 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 811 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 812 case ARMISD::tCALL: return "ARMISD::tCALL"; 813 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 814 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 815 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 816 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 817 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 818 case ARMISD::CMP: return "ARMISD::CMP"; 819 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 820 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 821 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 822 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 823 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 824 case ARMISD::CMOV: return "ARMISD::CMOV"; 825 826 case ARMISD::RBIT: return "ARMISD::RBIT"; 827 828 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 829 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 830 case ARMISD::SITOF: return "ARMISD::SITOF"; 831 case ARMISD::UITOF: return "ARMISD::UITOF"; 832 833 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 834 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 835 case ARMISD::RRX: return "ARMISD::RRX"; 836 837 case ARMISD::ADDC: return "ARMISD::ADDC"; 838 case ARMISD::ADDE: return "ARMISD::ADDE"; 839 case ARMISD::SUBC: return "ARMISD::SUBC"; 840 case ARMISD::SUBE: return "ARMISD::SUBE"; 841 842 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 843 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 844 845 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 846 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 847 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 848 849 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 850 851 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 852 853 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 854 855 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 856 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 857 858 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 859 860 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 861 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 862 case ARMISD::VCGE: return "ARMISD::VCGE"; 863 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 864 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 865 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 866 case ARMISD::VCGT: return "ARMISD::VCGT"; 867 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 868 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 869 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 870 case ARMISD::VTST: return "ARMISD::VTST"; 871 872 case ARMISD::VSHL: return "ARMISD::VSHL"; 873 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 874 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 875 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 876 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 877 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 878 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 879 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 880 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 881 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 882 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 883 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 884 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 885 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 886 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 887 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 888 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 889 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 890 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 891 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 892 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 893 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 894 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 895 case ARMISD::VDUP: return "ARMISD::VDUP"; 896 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 897 case ARMISD::VEXT: return "ARMISD::VEXT"; 898 case ARMISD::VREV64: return "ARMISD::VREV64"; 899 case ARMISD::VREV32: return "ARMISD::VREV32"; 900 case ARMISD::VREV16: return "ARMISD::VREV16"; 901 case ARMISD::VZIP: return "ARMISD::VZIP"; 902 case ARMISD::VUZP: return "ARMISD::VUZP"; 903 case ARMISD::VTRN: return "ARMISD::VTRN"; 904 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 905 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 906 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 907 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 908 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 909 case ARMISD::FMAX: return "ARMISD::FMAX"; 910 case ARMISD::FMIN: return "ARMISD::FMIN"; 911 case ARMISD::BFI: return "ARMISD::BFI"; 912 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 913 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 914 case ARMISD::VBSL: return "ARMISD::VBSL"; 915 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 916 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 917 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 918 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 919 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 920 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 921 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 922 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 923 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 924 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 925 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 926 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 927 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 928 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 929 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 930 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 931 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 932 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 933 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 934 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 935 } 936} 937 938EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 939 if (!VT.isVector()) return getPointerTy(); 940 return VT.changeVectorElementTypeToInteger(); 941} 942 943/// getRegClassFor - Return the register class that should be used for the 944/// specified value type. 945TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 946 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 947 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 948 // load / store 4 to 8 consecutive D registers. 949 if (Subtarget->hasNEON()) { 950 if (VT == MVT::v4i64) 951 return ARM::QQPRRegisterClass; 952 else if (VT == MVT::v8i64) 953 return ARM::QQQQPRRegisterClass; 954 } 955 return TargetLowering::getRegClassFor(VT); 956} 957 958// Create a fast isel object. 959FastISel * 960ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 961 return ARM::createFastISel(funcInfo); 962} 963 964/// getMaximalGlobalOffset - Returns the maximal possible offset which can 965/// be used for loads / stores from the global. 966unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 967 return (Subtarget->isThumb1Only() ? 127 : 4095); 968} 969 970Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 971 unsigned NumVals = N->getNumValues(); 972 if (!NumVals) 973 return Sched::RegPressure; 974 975 for (unsigned i = 0; i != NumVals; ++i) { 976 EVT VT = N->getValueType(i); 977 if (VT == MVT::Glue || VT == MVT::Other) 978 continue; 979 if (VT.isFloatingPoint() || VT.isVector()) 980 return Sched::Latency; 981 } 982 983 if (!N->isMachineOpcode()) 984 return Sched::RegPressure; 985 986 // Load are scheduled for latency even if there instruction itinerary 987 // is not available. 988 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 989 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 990 991 if (MCID.getNumDefs() == 0) 992 return Sched::RegPressure; 993 if (!Itins->isEmpty() && 994 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 995 return Sched::Latency; 996 997 return Sched::RegPressure; 998} 999 1000//===----------------------------------------------------------------------===// 1001// Lowering Code 1002//===----------------------------------------------------------------------===// 1003 1004/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1005static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1006 switch (CC) { 1007 default: llvm_unreachable("Unknown condition code!"); 1008 case ISD::SETNE: return ARMCC::NE; 1009 case ISD::SETEQ: return ARMCC::EQ; 1010 case ISD::SETGT: return ARMCC::GT; 1011 case ISD::SETGE: return ARMCC::GE; 1012 case ISD::SETLT: return ARMCC::LT; 1013 case ISD::SETLE: return ARMCC::LE; 1014 case ISD::SETUGT: return ARMCC::HI; 1015 case ISD::SETUGE: return ARMCC::HS; 1016 case ISD::SETULT: return ARMCC::LO; 1017 case ISD::SETULE: return ARMCC::LS; 1018 } 1019} 1020 1021/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1022static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1023 ARMCC::CondCodes &CondCode2) { 1024 CondCode2 = ARMCC::AL; 1025 switch (CC) { 1026 default: llvm_unreachable("Unknown FP condition!"); 1027 case ISD::SETEQ: 1028 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1029 case ISD::SETGT: 1030 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1031 case ISD::SETGE: 1032 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1033 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1034 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1035 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1036 case ISD::SETO: CondCode = ARMCC::VC; break; 1037 case ISD::SETUO: CondCode = ARMCC::VS; break; 1038 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1039 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1040 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1041 case ISD::SETLT: 1042 case ISD::SETULT: CondCode = ARMCC::LT; break; 1043 case ISD::SETLE: 1044 case ISD::SETULE: CondCode = ARMCC::LE; break; 1045 case ISD::SETNE: 1046 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1047 } 1048} 1049 1050//===----------------------------------------------------------------------===// 1051// Calling Convention Implementation 1052//===----------------------------------------------------------------------===// 1053 1054#include "ARMGenCallingConv.inc" 1055 1056/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1057/// given CallingConvention value. 1058CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1059 bool Return, 1060 bool isVarArg) const { 1061 switch (CC) { 1062 default: 1063 llvm_unreachable("Unsupported calling convention"); 1064 case CallingConv::Fast: 1065 if (Subtarget->hasVFP2() && !isVarArg) { 1066 if (!Subtarget->isAAPCS_ABI()) 1067 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1068 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1069 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1070 } 1071 // Fallthrough 1072 case CallingConv::C: { 1073 // Use target triple & subtarget features to do actual dispatch. 1074 if (!Subtarget->isAAPCS_ABI()) 1075 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1076 else if (Subtarget->hasVFP2() && 1077 FloatABIType == FloatABI::Hard && !isVarArg) 1078 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1079 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1080 } 1081 case CallingConv::ARM_AAPCS_VFP: 1082 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1083 case CallingConv::ARM_AAPCS: 1084 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1085 case CallingConv::ARM_APCS: 1086 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1087 } 1088} 1089 1090/// LowerCallResult - Lower the result values of a call into the 1091/// appropriate copies out of appropriate physical registers. 1092SDValue 1093ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1094 CallingConv::ID CallConv, bool isVarArg, 1095 const SmallVectorImpl<ISD::InputArg> &Ins, 1096 DebugLoc dl, SelectionDAG &DAG, 1097 SmallVectorImpl<SDValue> &InVals) const { 1098 1099 // Assign locations to each value returned by this call. 1100 SmallVector<CCValAssign, 16> RVLocs; 1101 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1102 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1103 CCInfo.AnalyzeCallResult(Ins, 1104 CCAssignFnForNode(CallConv, /* Return*/ true, 1105 isVarArg)); 1106 1107 // Copy all of the result registers out of their specified physreg. 1108 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1109 CCValAssign VA = RVLocs[i]; 1110 1111 SDValue Val; 1112 if (VA.needsCustom()) { 1113 // Handle f64 or half of a v2f64. 1114 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1115 InFlag); 1116 Chain = Lo.getValue(1); 1117 InFlag = Lo.getValue(2); 1118 VA = RVLocs[++i]; // skip ahead to next loc 1119 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1120 InFlag); 1121 Chain = Hi.getValue(1); 1122 InFlag = Hi.getValue(2); 1123 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1124 1125 if (VA.getLocVT() == MVT::v2f64) { 1126 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1127 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1128 DAG.getConstant(0, MVT::i32)); 1129 1130 VA = RVLocs[++i]; // skip ahead to next loc 1131 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1132 Chain = Lo.getValue(1); 1133 InFlag = Lo.getValue(2); 1134 VA = RVLocs[++i]; // skip ahead to next loc 1135 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1136 Chain = Hi.getValue(1); 1137 InFlag = Hi.getValue(2); 1138 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1139 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1140 DAG.getConstant(1, MVT::i32)); 1141 } 1142 } else { 1143 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1144 InFlag); 1145 Chain = Val.getValue(1); 1146 InFlag = Val.getValue(2); 1147 } 1148 1149 switch (VA.getLocInfo()) { 1150 default: llvm_unreachable("Unknown loc info!"); 1151 case CCValAssign::Full: break; 1152 case CCValAssign::BCvt: 1153 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1154 break; 1155 } 1156 1157 InVals.push_back(Val); 1158 } 1159 1160 return Chain; 1161} 1162 1163/// LowerMemOpCallTo - Store the argument to the stack. 1164SDValue 1165ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1166 SDValue StackPtr, SDValue Arg, 1167 DebugLoc dl, SelectionDAG &DAG, 1168 const CCValAssign &VA, 1169 ISD::ArgFlagsTy Flags) const { 1170 unsigned LocMemOffset = VA.getLocMemOffset(); 1171 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1172 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1173 return DAG.getStore(Chain, dl, Arg, PtrOff, 1174 MachinePointerInfo::getStack(LocMemOffset), 1175 false, false, 0); 1176} 1177 1178void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1179 SDValue Chain, SDValue &Arg, 1180 RegsToPassVector &RegsToPass, 1181 CCValAssign &VA, CCValAssign &NextVA, 1182 SDValue &StackPtr, 1183 SmallVector<SDValue, 8> &MemOpChains, 1184 ISD::ArgFlagsTy Flags) const { 1185 1186 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1187 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1188 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1189 1190 if (NextVA.isRegLoc()) 1191 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1192 else { 1193 assert(NextVA.isMemLoc()); 1194 if (StackPtr.getNode() == 0) 1195 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1196 1197 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1198 dl, DAG, NextVA, 1199 Flags)); 1200 } 1201} 1202 1203/// LowerCall - Lowering a call into a callseq_start <- 1204/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1205/// nodes. 1206SDValue 1207ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1208 CallingConv::ID CallConv, bool isVarArg, 1209 bool &isTailCall, 1210 const SmallVectorImpl<ISD::OutputArg> &Outs, 1211 const SmallVectorImpl<SDValue> &OutVals, 1212 const SmallVectorImpl<ISD::InputArg> &Ins, 1213 DebugLoc dl, SelectionDAG &DAG, 1214 SmallVectorImpl<SDValue> &InVals) const { 1215 MachineFunction &MF = DAG.getMachineFunction(); 1216 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1217 bool IsSibCall = false; 1218 // Temporarily disable tail calls so things don't break. 1219 if (!EnableARMTailCalls) 1220 isTailCall = false; 1221 if (isTailCall) { 1222 // Check if it's really possible to do a tail call. 1223 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1224 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1225 Outs, OutVals, Ins, DAG); 1226 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1227 // detected sibcalls. 1228 if (isTailCall) { 1229 ++NumTailCalls; 1230 IsSibCall = true; 1231 } 1232 } 1233 1234 // Analyze operands of the call, assigning locations to each operand. 1235 SmallVector<CCValAssign, 16> ArgLocs; 1236 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1237 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1238 CCInfo.AnalyzeCallOperands(Outs, 1239 CCAssignFnForNode(CallConv, /* Return*/ false, 1240 isVarArg)); 1241 1242 // Get a count of how many bytes are to be pushed on the stack. 1243 unsigned NumBytes = CCInfo.getNextStackOffset(); 1244 1245 // For tail calls, memory operands are available in our caller's stack. 1246 if (IsSibCall) 1247 NumBytes = 0; 1248 1249 // Adjust the stack pointer for the new arguments... 1250 // These operations are automatically eliminated by the prolog/epilog pass 1251 if (!IsSibCall) 1252 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1253 1254 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1255 1256 RegsToPassVector RegsToPass; 1257 SmallVector<SDValue, 8> MemOpChains; 1258 1259 // Walk the register/memloc assignments, inserting copies/loads. In the case 1260 // of tail call optimization, arguments are handled later. 1261 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1262 i != e; 1263 ++i, ++realArgIdx) { 1264 CCValAssign &VA = ArgLocs[i]; 1265 SDValue Arg = OutVals[realArgIdx]; 1266 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1267 bool isByVal = Flags.isByVal(); 1268 1269 // Promote the value if needed. 1270 switch (VA.getLocInfo()) { 1271 default: llvm_unreachable("Unknown loc info!"); 1272 case CCValAssign::Full: break; 1273 case CCValAssign::SExt: 1274 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1275 break; 1276 case CCValAssign::ZExt: 1277 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1278 break; 1279 case CCValAssign::AExt: 1280 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1281 break; 1282 case CCValAssign::BCvt: 1283 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1284 break; 1285 } 1286 1287 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1288 if (VA.needsCustom()) { 1289 if (VA.getLocVT() == MVT::v2f64) { 1290 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1291 DAG.getConstant(0, MVT::i32)); 1292 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1293 DAG.getConstant(1, MVT::i32)); 1294 1295 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1296 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1297 1298 VA = ArgLocs[++i]; // skip ahead to next loc 1299 if (VA.isRegLoc()) { 1300 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1301 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1302 } else { 1303 assert(VA.isMemLoc()); 1304 1305 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1306 dl, DAG, VA, Flags)); 1307 } 1308 } else { 1309 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1310 StackPtr, MemOpChains, Flags); 1311 } 1312 } else if (VA.isRegLoc()) { 1313 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1314 } else if (isByVal) { 1315 assert(VA.isMemLoc()); 1316 unsigned offset = 0; 1317 1318 // True if this byval aggregate will be split between registers 1319 // and memory. 1320 if (CCInfo.isFirstByValRegValid()) { 1321 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1322 unsigned int i, j; 1323 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1324 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1325 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1326 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1327 MachinePointerInfo(), 1328 false, false, 0); 1329 MemOpChains.push_back(Load.getValue(1)); 1330 RegsToPass.push_back(std::make_pair(j, Load)); 1331 } 1332 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1333 CCInfo.clearFirstByValReg(); 1334 } 1335 1336 unsigned LocMemOffset = VA.getLocMemOffset(); 1337 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1338 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1339 StkPtrOff); 1340 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1341 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1342 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1343 MVT::i32); 1344 // TODO: Disable AlwaysInline when it becomes possible 1345 // to emit a nested call sequence. 1346 MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 1347 Flags.getByValAlign(), 1348 /*isVolatile=*/false, 1349 /*AlwaysInline=*/true, 1350 MachinePointerInfo(0), 1351 MachinePointerInfo(0))); 1352 1353 } else if (!IsSibCall) { 1354 assert(VA.isMemLoc()); 1355 1356 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1357 dl, DAG, VA, Flags)); 1358 } 1359 } 1360 1361 if (!MemOpChains.empty()) 1362 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1363 &MemOpChains[0], MemOpChains.size()); 1364 1365 // Build a sequence of copy-to-reg nodes chained together with token chain 1366 // and flag operands which copy the outgoing args into the appropriate regs. 1367 SDValue InFlag; 1368 // Tail call byval lowering might overwrite argument registers so in case of 1369 // tail call optimization the copies to registers are lowered later. 1370 if (!isTailCall) 1371 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1372 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1373 RegsToPass[i].second, InFlag); 1374 InFlag = Chain.getValue(1); 1375 } 1376 1377 // For tail calls lower the arguments to the 'real' stack slot. 1378 if (isTailCall) { 1379 // Force all the incoming stack arguments to be loaded from the stack 1380 // before any new outgoing arguments are stored to the stack, because the 1381 // outgoing stack slots may alias the incoming argument stack slots, and 1382 // the alias isn't otherwise explicit. This is slightly more conservative 1383 // than necessary, because it means that each store effectively depends 1384 // on every argument instead of just those arguments it would clobber. 1385 1386 // Do not flag preceding copytoreg stuff together with the following stuff. 1387 InFlag = SDValue(); 1388 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1389 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1390 RegsToPass[i].second, InFlag); 1391 InFlag = Chain.getValue(1); 1392 } 1393 InFlag =SDValue(); 1394 } 1395 1396 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1397 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1398 // node so that legalize doesn't hack it. 1399 bool isDirect = false; 1400 bool isARMFunc = false; 1401 bool isLocalARMFunc = false; 1402 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1403 1404 if (EnableARMLongCalls) { 1405 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1406 && "long-calls with non-static relocation model!"); 1407 // Handle a global address or an external symbol. If it's not one of 1408 // those, the target's already in a register, so we don't need to do 1409 // anything extra. 1410 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1411 const GlobalValue *GV = G->getGlobal(); 1412 // Create a constant pool entry for the callee address 1413 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1414 ARMConstantPoolValue *CPV = 1415 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1416 1417 // Get the address of the callee into a register 1418 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1419 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1420 Callee = DAG.getLoad(getPointerTy(), dl, 1421 DAG.getEntryNode(), CPAddr, 1422 MachinePointerInfo::getConstantPool(), 1423 false, false, 0); 1424 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1425 const char *Sym = S->getSymbol(); 1426 1427 // Create a constant pool entry for the callee address 1428 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1429 ARMConstantPoolValue *CPV = 1430 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1431 ARMPCLabelIndex, 0); 1432 // Get the address of the callee into a register 1433 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1434 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1435 Callee = DAG.getLoad(getPointerTy(), dl, 1436 DAG.getEntryNode(), CPAddr, 1437 MachinePointerInfo::getConstantPool(), 1438 false, false, 0); 1439 } 1440 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1441 const GlobalValue *GV = G->getGlobal(); 1442 isDirect = true; 1443 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1444 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1445 getTargetMachine().getRelocationModel() != Reloc::Static; 1446 isARMFunc = !Subtarget->isThumb() || isStub; 1447 // ARM call to a local ARM function is predicable. 1448 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1449 // tBX takes a register source operand. 1450 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1451 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1452 ARMConstantPoolValue *CPV = 1453 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1454 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1455 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1456 Callee = DAG.getLoad(getPointerTy(), dl, 1457 DAG.getEntryNode(), CPAddr, 1458 MachinePointerInfo::getConstantPool(), 1459 false, false, 0); 1460 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1461 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1462 getPointerTy(), Callee, PICLabel); 1463 } else { 1464 // On ELF targets for PIC code, direct calls should go through the PLT 1465 unsigned OpFlags = 0; 1466 if (Subtarget->isTargetELF() && 1467 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1468 OpFlags = ARMII::MO_PLT; 1469 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1470 } 1471 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1472 isDirect = true; 1473 bool isStub = Subtarget->isTargetDarwin() && 1474 getTargetMachine().getRelocationModel() != Reloc::Static; 1475 isARMFunc = !Subtarget->isThumb() || isStub; 1476 // tBX takes a register source operand. 1477 const char *Sym = S->getSymbol(); 1478 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1479 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1480 ARMConstantPoolValue *CPV = 1481 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1482 ARMPCLabelIndex, 4); 1483 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1484 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1485 Callee = DAG.getLoad(getPointerTy(), dl, 1486 DAG.getEntryNode(), CPAddr, 1487 MachinePointerInfo::getConstantPool(), 1488 false, false, 0); 1489 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1490 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1491 getPointerTy(), Callee, PICLabel); 1492 } else { 1493 unsigned OpFlags = 0; 1494 // On ELF targets for PIC code, direct calls should go through the PLT 1495 if (Subtarget->isTargetELF() && 1496 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1497 OpFlags = ARMII::MO_PLT; 1498 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1499 } 1500 } 1501 1502 // FIXME: handle tail calls differently. 1503 unsigned CallOpc; 1504 if (Subtarget->isThumb()) { 1505 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1506 CallOpc = ARMISD::CALL_NOLINK; 1507 else 1508 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1509 } else { 1510 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1511 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1512 : ARMISD::CALL_NOLINK; 1513 } 1514 1515 std::vector<SDValue> Ops; 1516 Ops.push_back(Chain); 1517 Ops.push_back(Callee); 1518 1519 // Add argument registers to the end of the list so that they are known live 1520 // into the call. 1521 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1522 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1523 RegsToPass[i].second.getValueType())); 1524 1525 if (InFlag.getNode()) 1526 Ops.push_back(InFlag); 1527 1528 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1529 if (isTailCall) 1530 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1531 1532 // Returns a chain and a flag for retval copy to use. 1533 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1534 InFlag = Chain.getValue(1); 1535 1536 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1537 DAG.getIntPtrConstant(0, true), InFlag); 1538 if (!Ins.empty()) 1539 InFlag = Chain.getValue(1); 1540 1541 // Handle result values, copying them out of physregs into vregs that we 1542 // return. 1543 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1544 dl, DAG, InVals); 1545} 1546 1547/// HandleByVal - Every parameter *after* a byval parameter is passed 1548/// on the stack. Remember the next parameter register to allocate, 1549/// and then confiscate the rest of the parameter registers to insure 1550/// this. 1551void 1552llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { 1553 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1554 assert((State->getCallOrPrologue() == Prologue || 1555 State->getCallOrPrologue() == Call) && 1556 "unhandled ParmContext"); 1557 if ((!State->isFirstByValRegValid()) && 1558 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1559 State->setFirstByValReg(reg); 1560 // At a call site, a byval parameter that is split between 1561 // registers and memory needs its size truncated here. In a 1562 // function prologue, such byval parameters are reassembled in 1563 // memory, and are not truncated. 1564 if (State->getCallOrPrologue() == Call) { 1565 unsigned excess = 4 * (ARM::R4 - reg); 1566 assert(size >= excess && "expected larger existing stack allocation"); 1567 size -= excess; 1568 } 1569 } 1570 // Confiscate any remaining parameter registers to preclude their 1571 // assignment to subsequent parameters. 1572 while (State->AllocateReg(GPRArgRegs, 4)) 1573 ; 1574} 1575 1576/// MatchingStackOffset - Return true if the given stack call argument is 1577/// already available in the same position (relatively) of the caller's 1578/// incoming argument stack. 1579static 1580bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1581 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1582 const ARMInstrInfo *TII) { 1583 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1584 int FI = INT_MAX; 1585 if (Arg.getOpcode() == ISD::CopyFromReg) { 1586 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1587 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1588 return false; 1589 MachineInstr *Def = MRI->getVRegDef(VR); 1590 if (!Def) 1591 return false; 1592 if (!Flags.isByVal()) { 1593 if (!TII->isLoadFromStackSlot(Def, FI)) 1594 return false; 1595 } else { 1596 return false; 1597 } 1598 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1599 if (Flags.isByVal()) 1600 // ByVal argument is passed in as a pointer but it's now being 1601 // dereferenced. e.g. 1602 // define @foo(%struct.X* %A) { 1603 // tail call @bar(%struct.X* byval %A) 1604 // } 1605 return false; 1606 SDValue Ptr = Ld->getBasePtr(); 1607 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1608 if (!FINode) 1609 return false; 1610 FI = FINode->getIndex(); 1611 } else 1612 return false; 1613 1614 assert(FI != INT_MAX); 1615 if (!MFI->isFixedObjectIndex(FI)) 1616 return false; 1617 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1618} 1619 1620/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1621/// for tail call optimization. Targets which want to do tail call 1622/// optimization should implement this function. 1623bool 1624ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1625 CallingConv::ID CalleeCC, 1626 bool isVarArg, 1627 bool isCalleeStructRet, 1628 bool isCallerStructRet, 1629 const SmallVectorImpl<ISD::OutputArg> &Outs, 1630 const SmallVectorImpl<SDValue> &OutVals, 1631 const SmallVectorImpl<ISD::InputArg> &Ins, 1632 SelectionDAG& DAG) const { 1633 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1634 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1635 bool CCMatch = CallerCC == CalleeCC; 1636 1637 // Look for obvious safe cases to perform tail call optimization that do not 1638 // require ABI changes. This is what gcc calls sibcall. 1639 1640 // Do not sibcall optimize vararg calls unless the call site is not passing 1641 // any arguments. 1642 if (isVarArg && !Outs.empty()) 1643 return false; 1644 1645 // Also avoid sibcall optimization if either caller or callee uses struct 1646 // return semantics. 1647 if (isCalleeStructRet || isCallerStructRet) 1648 return false; 1649 1650 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1651 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1652 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1653 // support in the assembler and linker to be used. This would need to be 1654 // fixed to fully support tail calls in Thumb1. 1655 // 1656 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1657 // LR. This means if we need to reload LR, it takes an extra instructions, 1658 // which outweighs the value of the tail call; but here we don't know yet 1659 // whether LR is going to be used. Probably the right approach is to 1660 // generate the tail call here and turn it back into CALL/RET in 1661 // emitEpilogue if LR is used. 1662 1663 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1664 // but we need to make sure there are enough registers; the only valid 1665 // registers are the 4 used for parameters. We don't currently do this 1666 // case. 1667 if (Subtarget->isThumb1Only()) 1668 return false; 1669 1670 // If the calling conventions do not match, then we'd better make sure the 1671 // results are returned in the same way as what the caller expects. 1672 if (!CCMatch) { 1673 SmallVector<CCValAssign, 16> RVLocs1; 1674 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1675 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1676 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1677 1678 SmallVector<CCValAssign, 16> RVLocs2; 1679 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1680 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1681 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1682 1683 if (RVLocs1.size() != RVLocs2.size()) 1684 return false; 1685 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1686 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1687 return false; 1688 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1689 return false; 1690 if (RVLocs1[i].isRegLoc()) { 1691 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1692 return false; 1693 } else { 1694 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1695 return false; 1696 } 1697 } 1698 } 1699 1700 // If the callee takes no arguments then go on to check the results of the 1701 // call. 1702 if (!Outs.empty()) { 1703 // Check if stack adjustment is needed. For now, do not do this if any 1704 // argument is passed on the stack. 1705 SmallVector<CCValAssign, 16> ArgLocs; 1706 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1707 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1708 CCInfo.AnalyzeCallOperands(Outs, 1709 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1710 if (CCInfo.getNextStackOffset()) { 1711 MachineFunction &MF = DAG.getMachineFunction(); 1712 1713 // Check if the arguments are already laid out in the right way as 1714 // the caller's fixed stack objects. 1715 MachineFrameInfo *MFI = MF.getFrameInfo(); 1716 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1717 const ARMInstrInfo *TII = 1718 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1719 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1720 i != e; 1721 ++i, ++realArgIdx) { 1722 CCValAssign &VA = ArgLocs[i]; 1723 EVT RegVT = VA.getLocVT(); 1724 SDValue Arg = OutVals[realArgIdx]; 1725 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1726 if (VA.getLocInfo() == CCValAssign::Indirect) 1727 return false; 1728 if (VA.needsCustom()) { 1729 // f64 and vector types are split into multiple registers or 1730 // register/stack-slot combinations. The types will not match 1731 // the registers; give up on memory f64 refs until we figure 1732 // out what to do about this. 1733 if (!VA.isRegLoc()) 1734 return false; 1735 if (!ArgLocs[++i].isRegLoc()) 1736 return false; 1737 if (RegVT == MVT::v2f64) { 1738 if (!ArgLocs[++i].isRegLoc()) 1739 return false; 1740 if (!ArgLocs[++i].isRegLoc()) 1741 return false; 1742 } 1743 } else if (!VA.isRegLoc()) { 1744 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1745 MFI, MRI, TII)) 1746 return false; 1747 } 1748 } 1749 } 1750 } 1751 1752 return true; 1753} 1754 1755SDValue 1756ARMTargetLowering::LowerReturn(SDValue Chain, 1757 CallingConv::ID CallConv, bool isVarArg, 1758 const SmallVectorImpl<ISD::OutputArg> &Outs, 1759 const SmallVectorImpl<SDValue> &OutVals, 1760 DebugLoc dl, SelectionDAG &DAG) const { 1761 1762 // CCValAssign - represent the assignment of the return value to a location. 1763 SmallVector<CCValAssign, 16> RVLocs; 1764 1765 // CCState - Info about the registers and stack slots. 1766 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1767 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1768 1769 // Analyze outgoing return values. 1770 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1771 isVarArg)); 1772 1773 // If this is the first return lowered for this function, add 1774 // the regs to the liveout set for the function. 1775 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1776 for (unsigned i = 0; i != RVLocs.size(); ++i) 1777 if (RVLocs[i].isRegLoc()) 1778 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1779 } 1780 1781 SDValue Flag; 1782 1783 // Copy the result values into the output registers. 1784 for (unsigned i = 0, realRVLocIdx = 0; 1785 i != RVLocs.size(); 1786 ++i, ++realRVLocIdx) { 1787 CCValAssign &VA = RVLocs[i]; 1788 assert(VA.isRegLoc() && "Can only return in registers!"); 1789 1790 SDValue Arg = OutVals[realRVLocIdx]; 1791 1792 switch (VA.getLocInfo()) { 1793 default: llvm_unreachable("Unknown loc info!"); 1794 case CCValAssign::Full: break; 1795 case CCValAssign::BCvt: 1796 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1797 break; 1798 } 1799 1800 if (VA.needsCustom()) { 1801 if (VA.getLocVT() == MVT::v2f64) { 1802 // Extract the first half and return it in two registers. 1803 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1804 DAG.getConstant(0, MVT::i32)); 1805 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1806 DAG.getVTList(MVT::i32, MVT::i32), Half); 1807 1808 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1809 Flag = Chain.getValue(1); 1810 VA = RVLocs[++i]; // skip ahead to next loc 1811 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1812 HalfGPRs.getValue(1), Flag); 1813 Flag = Chain.getValue(1); 1814 VA = RVLocs[++i]; // skip ahead to next loc 1815 1816 // Extract the 2nd half and fall through to handle it as an f64 value. 1817 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1818 DAG.getConstant(1, MVT::i32)); 1819 } 1820 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1821 // available. 1822 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1823 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1824 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1825 Flag = Chain.getValue(1); 1826 VA = RVLocs[++i]; // skip ahead to next loc 1827 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1828 Flag); 1829 } else 1830 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1831 1832 // Guarantee that all emitted copies are 1833 // stuck together, avoiding something bad. 1834 Flag = Chain.getValue(1); 1835 } 1836 1837 SDValue result; 1838 if (Flag.getNode()) 1839 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1840 else // Return Void 1841 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1842 1843 return result; 1844} 1845 1846bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1847 if (N->getNumValues() != 1) 1848 return false; 1849 if (!N->hasNUsesOfValue(1, 0)) 1850 return false; 1851 1852 unsigned NumCopies = 0; 1853 SDNode* Copies[2]; 1854 SDNode *Use = *N->use_begin(); 1855 if (Use->getOpcode() == ISD::CopyToReg) { 1856 Copies[NumCopies++] = Use; 1857 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1858 // f64 returned in a pair of GPRs. 1859 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1860 UI != UE; ++UI) { 1861 if (UI->getOpcode() != ISD::CopyToReg) 1862 return false; 1863 Copies[UI.getUse().getResNo()] = *UI; 1864 ++NumCopies; 1865 } 1866 } else if (Use->getOpcode() == ISD::BITCAST) { 1867 // f32 returned in a single GPR. 1868 if (!Use->hasNUsesOfValue(1, 0)) 1869 return false; 1870 Use = *Use->use_begin(); 1871 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1872 return false; 1873 Copies[NumCopies++] = Use; 1874 } else { 1875 return false; 1876 } 1877 1878 if (NumCopies != 1 && NumCopies != 2) 1879 return false; 1880 1881 bool HasRet = false; 1882 for (unsigned i = 0; i < NumCopies; ++i) { 1883 SDNode *Copy = Copies[i]; 1884 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1885 UI != UE; ++UI) { 1886 if (UI->getOpcode() == ISD::CopyToReg) { 1887 SDNode *Use = *UI; 1888 if (Use == Copies[0] || Use == Copies[1]) 1889 continue; 1890 return false; 1891 } 1892 if (UI->getOpcode() != ARMISD::RET_FLAG) 1893 return false; 1894 HasRet = true; 1895 } 1896 } 1897 1898 return HasRet; 1899} 1900 1901bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1902 if (!EnableARMTailCalls) 1903 return false; 1904 1905 if (!CI->isTailCall()) 1906 return false; 1907 1908 return !Subtarget->isThumb1Only(); 1909} 1910 1911// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1912// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1913// one of the above mentioned nodes. It has to be wrapped because otherwise 1914// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1915// be used to form addressing mode. These wrapped nodes will be selected 1916// into MOVi. 1917static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1918 EVT PtrVT = Op.getValueType(); 1919 // FIXME there is no actual debug info here 1920 DebugLoc dl = Op.getDebugLoc(); 1921 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1922 SDValue Res; 1923 if (CP->isMachineConstantPoolEntry()) 1924 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1925 CP->getAlignment()); 1926 else 1927 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1928 CP->getAlignment()); 1929 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1930} 1931 1932unsigned ARMTargetLowering::getJumpTableEncoding() const { 1933 return MachineJumpTableInfo::EK_Inline; 1934} 1935 1936SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1937 SelectionDAG &DAG) const { 1938 MachineFunction &MF = DAG.getMachineFunction(); 1939 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1940 unsigned ARMPCLabelIndex = 0; 1941 DebugLoc DL = Op.getDebugLoc(); 1942 EVT PtrVT = getPointerTy(); 1943 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1944 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1945 SDValue CPAddr; 1946 if (RelocM == Reloc::Static) { 1947 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1948 } else { 1949 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1950 ARMPCLabelIndex = AFI->createPICLabelUId(); 1951 ARMConstantPoolValue *CPV = 1952 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 1953 ARMCP::CPBlockAddress, PCAdj); 1954 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1955 } 1956 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1957 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1958 MachinePointerInfo::getConstantPool(), 1959 false, false, 0); 1960 if (RelocM == Reloc::Static) 1961 return Result; 1962 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1963 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1964} 1965 1966// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1967SDValue 1968ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1969 SelectionDAG &DAG) const { 1970 DebugLoc dl = GA->getDebugLoc(); 1971 EVT PtrVT = getPointerTy(); 1972 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1973 MachineFunction &MF = DAG.getMachineFunction(); 1974 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1975 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1976 ARMConstantPoolValue *CPV = 1977 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 1978 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1979 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1980 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1981 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1982 MachinePointerInfo::getConstantPool(), 1983 false, false, 0); 1984 SDValue Chain = Argument.getValue(1); 1985 1986 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1987 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1988 1989 // call __tls_get_addr. 1990 ArgListTy Args; 1991 ArgListEntry Entry; 1992 Entry.Node = Argument; 1993 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 1994 Args.push_back(Entry); 1995 // FIXME: is there useful debug info available here? 1996 std::pair<SDValue, SDValue> CallResult = 1997 LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()), 1998 false, false, false, false, 1999 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 2000 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2001 return CallResult.first; 2002} 2003 2004// Lower ISD::GlobalTLSAddress using the "initial exec" or 2005// "local exec" model. 2006SDValue 2007ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2008 SelectionDAG &DAG) const { 2009 const GlobalValue *GV = GA->getGlobal(); 2010 DebugLoc dl = GA->getDebugLoc(); 2011 SDValue Offset; 2012 SDValue Chain = DAG.getEntryNode(); 2013 EVT PtrVT = getPointerTy(); 2014 // Get the Thread Pointer 2015 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2016 2017 if (GV->isDeclaration()) { 2018 MachineFunction &MF = DAG.getMachineFunction(); 2019 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2020 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2021 // Initial exec model. 2022 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2023 ARMConstantPoolValue *CPV = 2024 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2025 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2026 true); 2027 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2028 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2029 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2030 MachinePointerInfo::getConstantPool(), 2031 false, false, 0); 2032 Chain = Offset.getValue(1); 2033 2034 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2035 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2036 2037 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2038 MachinePointerInfo::getConstantPool(), 2039 false, false, 0); 2040 } else { 2041 // local exec model 2042 ARMConstantPoolValue *CPV = 2043 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2044 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2045 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2046 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2047 MachinePointerInfo::getConstantPool(), 2048 false, false, 0); 2049 } 2050 2051 // The address of the thread local variable is the add of the thread 2052 // pointer with the offset of the variable. 2053 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2054} 2055 2056SDValue 2057ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2058 // TODO: implement the "local dynamic" model 2059 assert(Subtarget->isTargetELF() && 2060 "TLS not implemented for non-ELF targets"); 2061 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2062 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 2063 // otherwise use the "Local Exec" TLS Model 2064 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 2065 return LowerToTLSGeneralDynamicModel(GA, DAG); 2066 else 2067 return LowerToTLSExecModels(GA, DAG); 2068} 2069 2070SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2071 SelectionDAG &DAG) const { 2072 EVT PtrVT = getPointerTy(); 2073 DebugLoc dl = Op.getDebugLoc(); 2074 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2075 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2076 if (RelocM == Reloc::PIC_) { 2077 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2078 ARMConstantPoolValue *CPV = 2079 ARMConstantPoolConstant::Create(GV, 2080 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2081 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2082 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2083 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2084 CPAddr, 2085 MachinePointerInfo::getConstantPool(), 2086 false, false, 0); 2087 SDValue Chain = Result.getValue(1); 2088 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2089 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2090 if (!UseGOTOFF) 2091 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2092 MachinePointerInfo::getGOT(), false, false, 0); 2093 return Result; 2094 } 2095 2096 // If we have T2 ops, we can materialize the address directly via movt/movw 2097 // pair. This is always cheaper. 2098 if (Subtarget->useMovt()) { 2099 ++NumMovwMovt; 2100 // FIXME: Once remat is capable of dealing with instructions with register 2101 // operands, expand this into two nodes. 2102 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2103 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2104 } else { 2105 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2106 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2107 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2108 MachinePointerInfo::getConstantPool(), 2109 false, false, 0); 2110 } 2111} 2112 2113SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2114 SelectionDAG &DAG) const { 2115 EVT PtrVT = getPointerTy(); 2116 DebugLoc dl = Op.getDebugLoc(); 2117 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2118 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2119 MachineFunction &MF = DAG.getMachineFunction(); 2120 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2121 2122 // FIXME: Enable this for static codegen when tool issues are fixed. 2123 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2124 ++NumMovwMovt; 2125 // FIXME: Once remat is capable of dealing with instructions with register 2126 // operands, expand this into two nodes. 2127 if (RelocM == Reloc::Static) 2128 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2129 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2130 2131 unsigned Wrapper = (RelocM == Reloc::PIC_) 2132 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2133 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2134 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2135 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2136 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2137 MachinePointerInfo::getGOT(), false, false, 0); 2138 return Result; 2139 } 2140 2141 unsigned ARMPCLabelIndex = 0; 2142 SDValue CPAddr; 2143 if (RelocM == Reloc::Static) { 2144 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2145 } else { 2146 ARMPCLabelIndex = AFI->createPICLabelUId(); 2147 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2148 ARMConstantPoolValue *CPV = 2149 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2150 PCAdj); 2151 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2152 } 2153 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2154 2155 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2156 MachinePointerInfo::getConstantPool(), 2157 false, false, 0); 2158 SDValue Chain = Result.getValue(1); 2159 2160 if (RelocM == Reloc::PIC_) { 2161 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2162 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2163 } 2164 2165 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2166 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2167 false, false, 0); 2168 2169 return Result; 2170} 2171 2172SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2173 SelectionDAG &DAG) const { 2174 assert(Subtarget->isTargetELF() && 2175 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2176 MachineFunction &MF = DAG.getMachineFunction(); 2177 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2178 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2179 EVT PtrVT = getPointerTy(); 2180 DebugLoc dl = Op.getDebugLoc(); 2181 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2182 ARMConstantPoolValue *CPV = 2183 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2184 ARMPCLabelIndex, PCAdj); 2185 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2186 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2187 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2188 MachinePointerInfo::getConstantPool(), 2189 false, false, 0); 2190 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2191 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2192} 2193 2194SDValue 2195ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2196 const { 2197 DebugLoc dl = Op.getDebugLoc(); 2198 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2199 Op.getOperand(0), Op.getOperand(1)); 2200} 2201 2202SDValue 2203ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2204 DebugLoc dl = Op.getDebugLoc(); 2205 SDValue Val = DAG.getConstant(0, MVT::i32); 2206 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2207 Op.getOperand(1), Val); 2208} 2209 2210SDValue 2211ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2212 DebugLoc dl = Op.getDebugLoc(); 2213 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2214 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2215} 2216 2217SDValue 2218ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2219 const ARMSubtarget *Subtarget) const { 2220 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2221 DebugLoc dl = Op.getDebugLoc(); 2222 switch (IntNo) { 2223 default: return SDValue(); // Don't custom lower most intrinsics. 2224 case Intrinsic::arm_thread_pointer: { 2225 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2226 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2227 } 2228 case Intrinsic::eh_sjlj_lsda: { 2229 MachineFunction &MF = DAG.getMachineFunction(); 2230 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2231 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2232 EVT PtrVT = getPointerTy(); 2233 DebugLoc dl = Op.getDebugLoc(); 2234 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2235 SDValue CPAddr; 2236 unsigned PCAdj = (RelocM != Reloc::PIC_) 2237 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2238 ARMConstantPoolValue *CPV = 2239 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2240 ARMCP::CPLSDA, PCAdj); 2241 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2242 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2243 SDValue Result = 2244 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2245 MachinePointerInfo::getConstantPool(), 2246 false, false, 0); 2247 2248 if (RelocM == Reloc::PIC_) { 2249 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2250 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2251 } 2252 return Result; 2253 } 2254 case Intrinsic::arm_neon_vmulls: 2255 case Intrinsic::arm_neon_vmullu: { 2256 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2257 ? ARMISD::VMULLs : ARMISD::VMULLu; 2258 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2259 Op.getOperand(1), Op.getOperand(2)); 2260 } 2261 } 2262} 2263 2264static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2265 const ARMSubtarget *Subtarget) { 2266 DebugLoc dl = Op.getDebugLoc(); 2267 if (!Subtarget->hasDataBarrier()) { 2268 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2269 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2270 // here. 2271 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2272 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2273 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2274 DAG.getConstant(0, MVT::i32)); 2275 } 2276 2277 SDValue Op5 = Op.getOperand(5); 2278 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2279 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2280 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2281 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2282 2283 ARM_MB::MemBOpt DMBOpt; 2284 if (isDeviceBarrier) 2285 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2286 else 2287 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2288 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2289 DAG.getConstant(DMBOpt, MVT::i32)); 2290} 2291 2292 2293static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2294 const ARMSubtarget *Subtarget) { 2295 // FIXME: handle "fence singlethread" more efficiently. 2296 DebugLoc dl = Op.getDebugLoc(); 2297 if (!Subtarget->hasDataBarrier()) { 2298 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2299 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2300 // here. 2301 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2302 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2303 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2304 DAG.getConstant(0, MVT::i32)); 2305 } 2306 2307 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2308 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2309} 2310 2311static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2312 const ARMSubtarget *Subtarget) { 2313 // ARM pre v5TE and Thumb1 does not have preload instructions. 2314 if (!(Subtarget->isThumb2() || 2315 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2316 // Just preserve the chain. 2317 return Op.getOperand(0); 2318 2319 DebugLoc dl = Op.getDebugLoc(); 2320 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2321 if (!isRead && 2322 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2323 // ARMv7 with MP extension has PLDW. 2324 return Op.getOperand(0); 2325 2326 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2327 if (Subtarget->isThumb()) { 2328 // Invert the bits. 2329 isRead = ~isRead & 1; 2330 isData = ~isData & 1; 2331 } 2332 2333 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2334 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2335 DAG.getConstant(isData, MVT::i32)); 2336} 2337 2338static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2339 MachineFunction &MF = DAG.getMachineFunction(); 2340 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2341 2342 // vastart just stores the address of the VarArgsFrameIndex slot into the 2343 // memory location argument. 2344 DebugLoc dl = Op.getDebugLoc(); 2345 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2346 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2347 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2348 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2349 MachinePointerInfo(SV), false, false, 0); 2350} 2351 2352SDValue 2353ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2354 SDValue &Root, SelectionDAG &DAG, 2355 DebugLoc dl) const { 2356 MachineFunction &MF = DAG.getMachineFunction(); 2357 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2358 2359 TargetRegisterClass *RC; 2360 if (AFI->isThumb1OnlyFunction()) 2361 RC = ARM::tGPRRegisterClass; 2362 else 2363 RC = ARM::GPRRegisterClass; 2364 2365 // Transform the arguments stored in physical registers into virtual ones. 2366 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2367 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2368 2369 SDValue ArgValue2; 2370 if (NextVA.isMemLoc()) { 2371 MachineFrameInfo *MFI = MF.getFrameInfo(); 2372 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2373 2374 // Create load node to retrieve arguments from the stack. 2375 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2376 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2377 MachinePointerInfo::getFixedStack(FI), 2378 false, false, 0); 2379 } else { 2380 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2381 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2382 } 2383 2384 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2385} 2386 2387void 2388ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2389 unsigned &VARegSize, unsigned &VARegSaveSize) 2390 const { 2391 unsigned NumGPRs; 2392 if (CCInfo.isFirstByValRegValid()) 2393 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2394 else { 2395 unsigned int firstUnalloced; 2396 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2397 sizeof(GPRArgRegs) / 2398 sizeof(GPRArgRegs[0])); 2399 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2400 } 2401 2402 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2403 VARegSize = NumGPRs * 4; 2404 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2405} 2406 2407// The remaining GPRs hold either the beginning of variable-argument 2408// data, or the beginning of an aggregate passed by value (usuall 2409// byval). Either way, we allocate stack slots adjacent to the data 2410// provided by our caller, and store the unallocated registers there. 2411// If this is a variadic function, the va_list pointer will begin with 2412// these values; otherwise, this reassembles a (byval) structure that 2413// was split between registers and memory. 2414void 2415ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2416 DebugLoc dl, SDValue &Chain, 2417 unsigned ArgOffset) const { 2418 MachineFunction &MF = DAG.getMachineFunction(); 2419 MachineFrameInfo *MFI = MF.getFrameInfo(); 2420 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2421 unsigned firstRegToSaveIndex; 2422 if (CCInfo.isFirstByValRegValid()) 2423 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2424 else { 2425 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2426 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2427 } 2428 2429 unsigned VARegSize, VARegSaveSize; 2430 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2431 if (VARegSaveSize) { 2432 // If this function is vararg, store any remaining integer argument regs 2433 // to their spots on the stack so that they may be loaded by deferencing 2434 // the result of va_next. 2435 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2436 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2437 ArgOffset + VARegSaveSize 2438 - VARegSize, 2439 false)); 2440 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2441 getPointerTy()); 2442 2443 SmallVector<SDValue, 4> MemOps; 2444 for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { 2445 TargetRegisterClass *RC; 2446 if (AFI->isThumb1OnlyFunction()) 2447 RC = ARM::tGPRRegisterClass; 2448 else 2449 RC = ARM::GPRRegisterClass; 2450 2451 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2452 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2453 SDValue Store = 2454 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2455 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2456 false, false, 0); 2457 MemOps.push_back(Store); 2458 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2459 DAG.getConstant(4, getPointerTy())); 2460 } 2461 if (!MemOps.empty()) 2462 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2463 &MemOps[0], MemOps.size()); 2464 } else 2465 // This will point to the next argument passed via stack. 2466 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2467} 2468 2469SDValue 2470ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2471 CallingConv::ID CallConv, bool isVarArg, 2472 const SmallVectorImpl<ISD::InputArg> 2473 &Ins, 2474 DebugLoc dl, SelectionDAG &DAG, 2475 SmallVectorImpl<SDValue> &InVals) 2476 const { 2477 MachineFunction &MF = DAG.getMachineFunction(); 2478 MachineFrameInfo *MFI = MF.getFrameInfo(); 2479 2480 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2481 2482 // Assign locations to all of the incoming arguments. 2483 SmallVector<CCValAssign, 16> ArgLocs; 2484 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2485 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2486 CCInfo.AnalyzeFormalArguments(Ins, 2487 CCAssignFnForNode(CallConv, /* Return*/ false, 2488 isVarArg)); 2489 2490 SmallVector<SDValue, 16> ArgValues; 2491 int lastInsIndex = -1; 2492 2493 SDValue ArgValue; 2494 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2495 CCValAssign &VA = ArgLocs[i]; 2496 2497 // Arguments stored in registers. 2498 if (VA.isRegLoc()) { 2499 EVT RegVT = VA.getLocVT(); 2500 2501 if (VA.needsCustom()) { 2502 // f64 and vector types are split up into multiple registers or 2503 // combinations of registers and stack slots. 2504 if (VA.getLocVT() == MVT::v2f64) { 2505 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2506 Chain, DAG, dl); 2507 VA = ArgLocs[++i]; // skip ahead to next loc 2508 SDValue ArgValue2; 2509 if (VA.isMemLoc()) { 2510 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2511 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2512 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2513 MachinePointerInfo::getFixedStack(FI), 2514 false, false, 0); 2515 } else { 2516 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2517 Chain, DAG, dl); 2518 } 2519 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2520 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2521 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2522 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2523 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2524 } else 2525 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2526 2527 } else { 2528 TargetRegisterClass *RC; 2529 2530 if (RegVT == MVT::f32) 2531 RC = ARM::SPRRegisterClass; 2532 else if (RegVT == MVT::f64) 2533 RC = ARM::DPRRegisterClass; 2534 else if (RegVT == MVT::v2f64) 2535 RC = ARM::QPRRegisterClass; 2536 else if (RegVT == MVT::i32) 2537 RC = (AFI->isThumb1OnlyFunction() ? 2538 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2539 else 2540 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2541 2542 // Transform the arguments in physical registers into virtual ones. 2543 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2544 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2545 } 2546 2547 // If this is an 8 or 16-bit value, it is really passed promoted 2548 // to 32 bits. Insert an assert[sz]ext to capture this, then 2549 // truncate to the right size. 2550 switch (VA.getLocInfo()) { 2551 default: llvm_unreachable("Unknown loc info!"); 2552 case CCValAssign::Full: break; 2553 case CCValAssign::BCvt: 2554 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2555 break; 2556 case CCValAssign::SExt: 2557 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2558 DAG.getValueType(VA.getValVT())); 2559 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2560 break; 2561 case CCValAssign::ZExt: 2562 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2563 DAG.getValueType(VA.getValVT())); 2564 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2565 break; 2566 } 2567 2568 InVals.push_back(ArgValue); 2569 2570 } else { // VA.isRegLoc() 2571 2572 // sanity check 2573 assert(VA.isMemLoc()); 2574 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2575 2576 int index = ArgLocs[i].getValNo(); 2577 2578 // Some Ins[] entries become multiple ArgLoc[] entries. 2579 // Process them only once. 2580 if (index != lastInsIndex) 2581 { 2582 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2583 // FIXME: For now, all byval parameter objects are marked mutable. 2584 // This can be changed with more analysis. 2585 // In case of tail call optimization mark all arguments mutable. 2586 // Since they could be overwritten by lowering of arguments in case of 2587 // a tail call. 2588 if (Flags.isByVal()) { 2589 unsigned VARegSize, VARegSaveSize; 2590 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2591 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); 2592 unsigned Bytes = Flags.getByValSize() - VARegSize; 2593 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2594 int FI = MFI->CreateFixedObject(Bytes, 2595 VA.getLocMemOffset(), false); 2596 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2597 } else { 2598 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2599 VA.getLocMemOffset(), true); 2600 2601 // Create load nodes to retrieve arguments from the stack. 2602 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2603 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2604 MachinePointerInfo::getFixedStack(FI), 2605 false, false, 0)); 2606 } 2607 lastInsIndex = index; 2608 } 2609 } 2610 } 2611 2612 // varargs 2613 if (isVarArg) 2614 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); 2615 2616 return Chain; 2617} 2618 2619/// isFloatingPointZero - Return true if this is +0.0. 2620static bool isFloatingPointZero(SDValue Op) { 2621 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2622 return CFP->getValueAPF().isPosZero(); 2623 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2624 // Maybe this has already been legalized into the constant pool? 2625 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2626 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2627 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2628 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2629 return CFP->getValueAPF().isPosZero(); 2630 } 2631 } 2632 return false; 2633} 2634 2635/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2636/// the given operands. 2637SDValue 2638ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2639 SDValue &ARMcc, SelectionDAG &DAG, 2640 DebugLoc dl) const { 2641 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2642 unsigned C = RHSC->getZExtValue(); 2643 if (!isLegalICmpImmediate(C)) { 2644 // Constant does not fit, try adjusting it by one? 2645 switch (CC) { 2646 default: break; 2647 case ISD::SETLT: 2648 case ISD::SETGE: 2649 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2650 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2651 RHS = DAG.getConstant(C-1, MVT::i32); 2652 } 2653 break; 2654 case ISD::SETULT: 2655 case ISD::SETUGE: 2656 if (C != 0 && isLegalICmpImmediate(C-1)) { 2657 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2658 RHS = DAG.getConstant(C-1, MVT::i32); 2659 } 2660 break; 2661 case ISD::SETLE: 2662 case ISD::SETGT: 2663 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2664 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2665 RHS = DAG.getConstant(C+1, MVT::i32); 2666 } 2667 break; 2668 case ISD::SETULE: 2669 case ISD::SETUGT: 2670 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2671 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2672 RHS = DAG.getConstant(C+1, MVT::i32); 2673 } 2674 break; 2675 } 2676 } 2677 } 2678 2679 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2680 ARMISD::NodeType CompareType; 2681 switch (CondCode) { 2682 default: 2683 CompareType = ARMISD::CMP; 2684 break; 2685 case ARMCC::EQ: 2686 case ARMCC::NE: 2687 // Uses only Z Flag 2688 CompareType = ARMISD::CMPZ; 2689 break; 2690 } 2691 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2692 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2693} 2694 2695/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2696SDValue 2697ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2698 DebugLoc dl) const { 2699 SDValue Cmp; 2700 if (!isFloatingPointZero(RHS)) 2701 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2702 else 2703 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2704 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2705} 2706 2707/// duplicateCmp - Glue values can have only one use, so this function 2708/// duplicates a comparison node. 2709SDValue 2710ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2711 unsigned Opc = Cmp.getOpcode(); 2712 DebugLoc DL = Cmp.getDebugLoc(); 2713 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2714 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2715 2716 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2717 Cmp = Cmp.getOperand(0); 2718 Opc = Cmp.getOpcode(); 2719 if (Opc == ARMISD::CMPFP) 2720 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2721 else { 2722 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2723 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2724 } 2725 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2726} 2727 2728SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2729 SDValue Cond = Op.getOperand(0); 2730 SDValue SelectTrue = Op.getOperand(1); 2731 SDValue SelectFalse = Op.getOperand(2); 2732 DebugLoc dl = Op.getDebugLoc(); 2733 2734 // Convert: 2735 // 2736 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2737 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2738 // 2739 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2740 const ConstantSDNode *CMOVTrue = 2741 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2742 const ConstantSDNode *CMOVFalse = 2743 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2744 2745 if (CMOVTrue && CMOVFalse) { 2746 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2747 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2748 2749 SDValue True; 2750 SDValue False; 2751 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2752 True = SelectTrue; 2753 False = SelectFalse; 2754 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2755 True = SelectFalse; 2756 False = SelectTrue; 2757 } 2758 2759 if (True.getNode() && False.getNode()) { 2760 EVT VT = Op.getValueType(); 2761 SDValue ARMcc = Cond.getOperand(2); 2762 SDValue CCR = Cond.getOperand(3); 2763 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2764 assert(True.getValueType() == VT); 2765 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2766 } 2767 } 2768 } 2769 2770 return DAG.getSelectCC(dl, Cond, 2771 DAG.getConstant(0, Cond.getValueType()), 2772 SelectTrue, SelectFalse, ISD::SETNE); 2773} 2774 2775SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2776 EVT VT = Op.getValueType(); 2777 SDValue LHS = Op.getOperand(0); 2778 SDValue RHS = Op.getOperand(1); 2779 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2780 SDValue TrueVal = Op.getOperand(2); 2781 SDValue FalseVal = Op.getOperand(3); 2782 DebugLoc dl = Op.getDebugLoc(); 2783 2784 if (LHS.getValueType() == MVT::i32) { 2785 SDValue ARMcc; 2786 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2787 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2788 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2789 } 2790 2791 ARMCC::CondCodes CondCode, CondCode2; 2792 FPCCToARMCC(CC, CondCode, CondCode2); 2793 2794 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2795 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2796 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2797 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2798 ARMcc, CCR, Cmp); 2799 if (CondCode2 != ARMCC::AL) { 2800 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2801 // FIXME: Needs another CMP because flag can have but one use. 2802 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2803 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2804 Result, TrueVal, ARMcc2, CCR, Cmp2); 2805 } 2806 return Result; 2807} 2808 2809/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2810/// to morph to an integer compare sequence. 2811static bool canChangeToInt(SDValue Op, bool &SeenZero, 2812 const ARMSubtarget *Subtarget) { 2813 SDNode *N = Op.getNode(); 2814 if (!N->hasOneUse()) 2815 // Otherwise it requires moving the value from fp to integer registers. 2816 return false; 2817 if (!N->getNumValues()) 2818 return false; 2819 EVT VT = Op.getValueType(); 2820 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2821 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2822 // vmrs are very slow, e.g. cortex-a8. 2823 return false; 2824 2825 if (isFloatingPointZero(Op)) { 2826 SeenZero = true; 2827 return true; 2828 } 2829 return ISD::isNormalLoad(N); 2830} 2831 2832static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2833 if (isFloatingPointZero(Op)) 2834 return DAG.getConstant(0, MVT::i32); 2835 2836 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2837 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2838 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2839 Ld->isVolatile(), Ld->isNonTemporal(), 2840 Ld->getAlignment()); 2841 2842 llvm_unreachable("Unknown VFP cmp argument!"); 2843} 2844 2845static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2846 SDValue &RetVal1, SDValue &RetVal2) { 2847 if (isFloatingPointZero(Op)) { 2848 RetVal1 = DAG.getConstant(0, MVT::i32); 2849 RetVal2 = DAG.getConstant(0, MVT::i32); 2850 return; 2851 } 2852 2853 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2854 SDValue Ptr = Ld->getBasePtr(); 2855 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2856 Ld->getChain(), Ptr, 2857 Ld->getPointerInfo(), 2858 Ld->isVolatile(), Ld->isNonTemporal(), 2859 Ld->getAlignment()); 2860 2861 EVT PtrType = Ptr.getValueType(); 2862 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2863 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2864 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2865 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2866 Ld->getChain(), NewPtr, 2867 Ld->getPointerInfo().getWithOffset(4), 2868 Ld->isVolatile(), Ld->isNonTemporal(), 2869 NewAlign); 2870 return; 2871 } 2872 2873 llvm_unreachable("Unknown VFP cmp argument!"); 2874} 2875 2876/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2877/// f32 and even f64 comparisons to integer ones. 2878SDValue 2879ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2880 SDValue Chain = Op.getOperand(0); 2881 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2882 SDValue LHS = Op.getOperand(2); 2883 SDValue RHS = Op.getOperand(3); 2884 SDValue Dest = Op.getOperand(4); 2885 DebugLoc dl = Op.getDebugLoc(); 2886 2887 bool SeenZero = false; 2888 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2889 canChangeToInt(RHS, SeenZero, Subtarget) && 2890 // If one of the operand is zero, it's safe to ignore the NaN case since 2891 // we only care about equality comparisons. 2892 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2893 // If unsafe fp math optimization is enabled and there are no other uses of 2894 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2895 // to an integer comparison. 2896 if (CC == ISD::SETOEQ) 2897 CC = ISD::SETEQ; 2898 else if (CC == ISD::SETUNE) 2899 CC = ISD::SETNE; 2900 2901 SDValue ARMcc; 2902 if (LHS.getValueType() == MVT::f32) { 2903 LHS = bitcastf32Toi32(LHS, DAG); 2904 RHS = bitcastf32Toi32(RHS, DAG); 2905 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2906 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2907 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2908 Chain, Dest, ARMcc, CCR, Cmp); 2909 } 2910 2911 SDValue LHS1, LHS2; 2912 SDValue RHS1, RHS2; 2913 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2914 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2915 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2916 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2917 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2918 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2919 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2920 } 2921 2922 return SDValue(); 2923} 2924 2925SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2926 SDValue Chain = Op.getOperand(0); 2927 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2928 SDValue LHS = Op.getOperand(2); 2929 SDValue RHS = Op.getOperand(3); 2930 SDValue Dest = Op.getOperand(4); 2931 DebugLoc dl = Op.getDebugLoc(); 2932 2933 if (LHS.getValueType() == MVT::i32) { 2934 SDValue ARMcc; 2935 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2936 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2937 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2938 Chain, Dest, ARMcc, CCR, Cmp); 2939 } 2940 2941 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2942 2943 if (UnsafeFPMath && 2944 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2945 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2946 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2947 if (Result.getNode()) 2948 return Result; 2949 } 2950 2951 ARMCC::CondCodes CondCode, CondCode2; 2952 FPCCToARMCC(CC, CondCode, CondCode2); 2953 2954 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2955 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2956 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2957 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2958 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2959 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2960 if (CondCode2 != ARMCC::AL) { 2961 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2962 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2963 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2964 } 2965 return Res; 2966} 2967 2968SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2969 SDValue Chain = Op.getOperand(0); 2970 SDValue Table = Op.getOperand(1); 2971 SDValue Index = Op.getOperand(2); 2972 DebugLoc dl = Op.getDebugLoc(); 2973 2974 EVT PTy = getPointerTy(); 2975 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2976 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2977 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2978 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2979 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2980 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2981 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2982 if (Subtarget->isThumb2()) { 2983 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2984 // which does another jump to the destination. This also makes it easier 2985 // to translate it to TBB / TBH later. 2986 // FIXME: This might not work if the function is extremely large. 2987 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2988 Addr, Op.getOperand(2), JTI, UId); 2989 } 2990 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2991 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2992 MachinePointerInfo::getJumpTable(), 2993 false, false, 0); 2994 Chain = Addr.getValue(1); 2995 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2996 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2997 } else { 2998 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2999 MachinePointerInfo::getJumpTable(), false, false, 0); 3000 Chain = Addr.getValue(1); 3001 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3002 } 3003} 3004 3005static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3006 DebugLoc dl = Op.getDebugLoc(); 3007 unsigned Opc; 3008 3009 switch (Op.getOpcode()) { 3010 default: 3011 assert(0 && "Invalid opcode!"); 3012 case ISD::FP_TO_SINT: 3013 Opc = ARMISD::FTOSI; 3014 break; 3015 case ISD::FP_TO_UINT: 3016 Opc = ARMISD::FTOUI; 3017 break; 3018 } 3019 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3020 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3021} 3022 3023static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3024 EVT VT = Op.getValueType(); 3025 DebugLoc dl = Op.getDebugLoc(); 3026 3027 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3028 "Invalid type for custom lowering!"); 3029 if (VT != MVT::v4f32) 3030 return DAG.UnrollVectorOp(Op.getNode()); 3031 3032 unsigned CastOpc; 3033 unsigned Opc; 3034 switch (Op.getOpcode()) { 3035 default: 3036 assert(0 && "Invalid opcode!"); 3037 case ISD::SINT_TO_FP: 3038 CastOpc = ISD::SIGN_EXTEND; 3039 Opc = ISD::SINT_TO_FP; 3040 break; 3041 case ISD::UINT_TO_FP: 3042 CastOpc = ISD::ZERO_EXTEND; 3043 Opc = ISD::UINT_TO_FP; 3044 break; 3045 } 3046 3047 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3048 return DAG.getNode(Opc, dl, VT, Op); 3049} 3050 3051static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3052 EVT VT = Op.getValueType(); 3053 if (VT.isVector()) 3054 return LowerVectorINT_TO_FP(Op, DAG); 3055 3056 DebugLoc dl = Op.getDebugLoc(); 3057 unsigned Opc; 3058 3059 switch (Op.getOpcode()) { 3060 default: 3061 assert(0 && "Invalid opcode!"); 3062 case ISD::SINT_TO_FP: 3063 Opc = ARMISD::SITOF; 3064 break; 3065 case ISD::UINT_TO_FP: 3066 Opc = ARMISD::UITOF; 3067 break; 3068 } 3069 3070 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3071 return DAG.getNode(Opc, dl, VT, Op); 3072} 3073 3074SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3075 // Implement fcopysign with a fabs and a conditional fneg. 3076 SDValue Tmp0 = Op.getOperand(0); 3077 SDValue Tmp1 = Op.getOperand(1); 3078 DebugLoc dl = Op.getDebugLoc(); 3079 EVT VT = Op.getValueType(); 3080 EVT SrcVT = Tmp1.getValueType(); 3081 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3082 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3083 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3084 3085 if (UseNEON) { 3086 // Use VBSL to copy the sign bit. 3087 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3088 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3089 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3090 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3091 if (VT == MVT::f64) 3092 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3093 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3094 DAG.getConstant(32, MVT::i32)); 3095 else /*if (VT == MVT::f32)*/ 3096 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3097 if (SrcVT == MVT::f32) { 3098 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3099 if (VT == MVT::f64) 3100 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3101 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3102 DAG.getConstant(32, MVT::i32)); 3103 } else if (VT == MVT::f32) 3104 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3105 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3106 DAG.getConstant(32, MVT::i32)); 3107 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3108 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3109 3110 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3111 MVT::i32); 3112 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3113 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3114 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3115 3116 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3117 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3118 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3119 if (VT == MVT::f32) { 3120 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3121 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3122 DAG.getConstant(0, MVT::i32)); 3123 } else { 3124 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3125 } 3126 3127 return Res; 3128 } 3129 3130 // Bitcast operand 1 to i32. 3131 if (SrcVT == MVT::f64) 3132 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3133 &Tmp1, 1).getValue(1); 3134 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3135 3136 // Or in the signbit with integer operations. 3137 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3138 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3139 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3140 if (VT == MVT::f32) { 3141 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3142 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3143 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3144 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3145 } 3146 3147 // f64: Or the high part with signbit and then combine two parts. 3148 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3149 &Tmp0, 1); 3150 SDValue Lo = Tmp0.getValue(0); 3151 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3152 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3153 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3154} 3155 3156SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3157 MachineFunction &MF = DAG.getMachineFunction(); 3158 MachineFrameInfo *MFI = MF.getFrameInfo(); 3159 MFI->setReturnAddressIsTaken(true); 3160 3161 EVT VT = Op.getValueType(); 3162 DebugLoc dl = Op.getDebugLoc(); 3163 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3164 if (Depth) { 3165 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3166 SDValue Offset = DAG.getConstant(4, MVT::i32); 3167 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3168 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3169 MachinePointerInfo(), false, false, 0); 3170 } 3171 3172 // Return LR, which contains the return address. Mark it an implicit live-in. 3173 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3174 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3175} 3176 3177SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3178 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3179 MFI->setFrameAddressIsTaken(true); 3180 3181 EVT VT = Op.getValueType(); 3182 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3183 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3184 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3185 ? ARM::R7 : ARM::R11; 3186 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3187 while (Depth--) 3188 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3189 MachinePointerInfo(), 3190 false, false, 0); 3191 return FrameAddr; 3192} 3193 3194/// ExpandBITCAST - If the target supports VFP, this function is called to 3195/// expand a bit convert where either the source or destination type is i64 to 3196/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3197/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3198/// vectors), since the legalizer won't know what to do with that. 3199static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3200 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3201 DebugLoc dl = N->getDebugLoc(); 3202 SDValue Op = N->getOperand(0); 3203 3204 // This function is only supposed to be called for i64 types, either as the 3205 // source or destination of the bit convert. 3206 EVT SrcVT = Op.getValueType(); 3207 EVT DstVT = N->getValueType(0); 3208 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3209 "ExpandBITCAST called for non-i64 type"); 3210 3211 // Turn i64->f64 into VMOVDRR. 3212 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3213 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3214 DAG.getConstant(0, MVT::i32)); 3215 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3216 DAG.getConstant(1, MVT::i32)); 3217 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3218 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3219 } 3220 3221 // Turn f64->i64 into VMOVRRD. 3222 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3223 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3224 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3225 // Merge the pieces into a single i64 value. 3226 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3227 } 3228 3229 return SDValue(); 3230} 3231 3232/// getZeroVector - Returns a vector of specified type with all zero elements. 3233/// Zero vectors are used to represent vector negation and in those cases 3234/// will be implemented with the NEON VNEG instruction. However, VNEG does 3235/// not support i64 elements, so sometimes the zero vectors will need to be 3236/// explicitly constructed. Regardless, use a canonical VMOV to create the 3237/// zero vector. 3238static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3239 assert(VT.isVector() && "Expected a vector type"); 3240 // The canonical modified immediate encoding of a zero vector is....0! 3241 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3242 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3243 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3244 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3245} 3246 3247/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3248/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3249SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3250 SelectionDAG &DAG) const { 3251 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3252 EVT VT = Op.getValueType(); 3253 unsigned VTBits = VT.getSizeInBits(); 3254 DebugLoc dl = Op.getDebugLoc(); 3255 SDValue ShOpLo = Op.getOperand(0); 3256 SDValue ShOpHi = Op.getOperand(1); 3257 SDValue ShAmt = Op.getOperand(2); 3258 SDValue ARMcc; 3259 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3260 3261 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3262 3263 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3264 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3265 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3266 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3267 DAG.getConstant(VTBits, MVT::i32)); 3268 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3269 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3270 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3271 3272 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3273 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3274 ARMcc, DAG, dl); 3275 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3276 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3277 CCR, Cmp); 3278 3279 SDValue Ops[2] = { Lo, Hi }; 3280 return DAG.getMergeValues(Ops, 2, dl); 3281} 3282 3283/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3284/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3285SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3286 SelectionDAG &DAG) const { 3287 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3288 EVT VT = Op.getValueType(); 3289 unsigned VTBits = VT.getSizeInBits(); 3290 DebugLoc dl = Op.getDebugLoc(); 3291 SDValue ShOpLo = Op.getOperand(0); 3292 SDValue ShOpHi = Op.getOperand(1); 3293 SDValue ShAmt = Op.getOperand(2); 3294 SDValue ARMcc; 3295 3296 assert(Op.getOpcode() == ISD::SHL_PARTS); 3297 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3298 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3299 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3300 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3301 DAG.getConstant(VTBits, MVT::i32)); 3302 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3303 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3304 3305 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3306 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3307 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3308 ARMcc, DAG, dl); 3309 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3310 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3311 CCR, Cmp); 3312 3313 SDValue Ops[2] = { Lo, Hi }; 3314 return DAG.getMergeValues(Ops, 2, dl); 3315} 3316 3317SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3318 SelectionDAG &DAG) const { 3319 // The rounding mode is in bits 23:22 of the FPSCR. 3320 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3321 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3322 // so that the shift + and get folded into a bitfield extract. 3323 DebugLoc dl = Op.getDebugLoc(); 3324 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3325 DAG.getConstant(Intrinsic::arm_get_fpscr, 3326 MVT::i32)); 3327 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3328 DAG.getConstant(1U << 22, MVT::i32)); 3329 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3330 DAG.getConstant(22, MVT::i32)); 3331 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3332 DAG.getConstant(3, MVT::i32)); 3333} 3334 3335static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3336 const ARMSubtarget *ST) { 3337 EVT VT = N->getValueType(0); 3338 DebugLoc dl = N->getDebugLoc(); 3339 3340 if (!ST->hasV6T2Ops()) 3341 return SDValue(); 3342 3343 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3344 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3345} 3346 3347static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3348 const ARMSubtarget *ST) { 3349 EVT VT = N->getValueType(0); 3350 DebugLoc dl = N->getDebugLoc(); 3351 3352 if (!VT.isVector()) 3353 return SDValue(); 3354 3355 // Lower vector shifts on NEON to use VSHL. 3356 assert(ST->hasNEON() && "unexpected vector shift"); 3357 3358 // Left shifts translate directly to the vshiftu intrinsic. 3359 if (N->getOpcode() == ISD::SHL) 3360 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3361 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3362 N->getOperand(0), N->getOperand(1)); 3363 3364 assert((N->getOpcode() == ISD::SRA || 3365 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3366 3367 // NEON uses the same intrinsics for both left and right shifts. For 3368 // right shifts, the shift amounts are negative, so negate the vector of 3369 // shift amounts. 3370 EVT ShiftVT = N->getOperand(1).getValueType(); 3371 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3372 getZeroVector(ShiftVT, DAG, dl), 3373 N->getOperand(1)); 3374 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3375 Intrinsic::arm_neon_vshifts : 3376 Intrinsic::arm_neon_vshiftu); 3377 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3378 DAG.getConstant(vshiftInt, MVT::i32), 3379 N->getOperand(0), NegatedCount); 3380} 3381 3382static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3383 const ARMSubtarget *ST) { 3384 EVT VT = N->getValueType(0); 3385 DebugLoc dl = N->getDebugLoc(); 3386 3387 // We can get here for a node like i32 = ISD::SHL i32, i64 3388 if (VT != MVT::i64) 3389 return SDValue(); 3390 3391 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3392 "Unknown shift to lower!"); 3393 3394 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3395 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3396 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3397 return SDValue(); 3398 3399 // If we are in thumb mode, we don't have RRX. 3400 if (ST->isThumb1Only()) return SDValue(); 3401 3402 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3403 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3404 DAG.getConstant(0, MVT::i32)); 3405 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3406 DAG.getConstant(1, MVT::i32)); 3407 3408 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3409 // captures the result into a carry flag. 3410 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3411 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3412 3413 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3414 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3415 3416 // Merge the pieces into a single i64 value. 3417 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3418} 3419 3420static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3421 SDValue TmpOp0, TmpOp1; 3422 bool Invert = false; 3423 bool Swap = false; 3424 unsigned Opc = 0; 3425 3426 SDValue Op0 = Op.getOperand(0); 3427 SDValue Op1 = Op.getOperand(1); 3428 SDValue CC = Op.getOperand(2); 3429 EVT VT = Op.getValueType(); 3430 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3431 DebugLoc dl = Op.getDebugLoc(); 3432 3433 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3434 switch (SetCCOpcode) { 3435 default: llvm_unreachable("Illegal FP comparison"); break; 3436 case ISD::SETUNE: 3437 case ISD::SETNE: Invert = true; // Fallthrough 3438 case ISD::SETOEQ: 3439 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3440 case ISD::SETOLT: 3441 case ISD::SETLT: Swap = true; // Fallthrough 3442 case ISD::SETOGT: 3443 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3444 case ISD::SETOLE: 3445 case ISD::SETLE: Swap = true; // Fallthrough 3446 case ISD::SETOGE: 3447 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3448 case ISD::SETUGE: Swap = true; // Fallthrough 3449 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3450 case ISD::SETUGT: Swap = true; // Fallthrough 3451 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3452 case ISD::SETUEQ: Invert = true; // Fallthrough 3453 case ISD::SETONE: 3454 // Expand this to (OLT | OGT). 3455 TmpOp0 = Op0; 3456 TmpOp1 = Op1; 3457 Opc = ISD::OR; 3458 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3459 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3460 break; 3461 case ISD::SETUO: Invert = true; // Fallthrough 3462 case ISD::SETO: 3463 // Expand this to (OLT | OGE). 3464 TmpOp0 = Op0; 3465 TmpOp1 = Op1; 3466 Opc = ISD::OR; 3467 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3468 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3469 break; 3470 } 3471 } else { 3472 // Integer comparisons. 3473 switch (SetCCOpcode) { 3474 default: llvm_unreachable("Illegal integer comparison"); break; 3475 case ISD::SETNE: Invert = true; 3476 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3477 case ISD::SETLT: Swap = true; 3478 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3479 case ISD::SETLE: Swap = true; 3480 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3481 case ISD::SETULT: Swap = true; 3482 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3483 case ISD::SETULE: Swap = true; 3484 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3485 } 3486 3487 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3488 if (Opc == ARMISD::VCEQ) { 3489 3490 SDValue AndOp; 3491 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3492 AndOp = Op0; 3493 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3494 AndOp = Op1; 3495 3496 // Ignore bitconvert. 3497 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3498 AndOp = AndOp.getOperand(0); 3499 3500 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3501 Opc = ARMISD::VTST; 3502 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3503 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3504 Invert = !Invert; 3505 } 3506 } 3507 } 3508 3509 if (Swap) 3510 std::swap(Op0, Op1); 3511 3512 // If one of the operands is a constant vector zero, attempt to fold the 3513 // comparison to a specialized compare-against-zero form. 3514 SDValue SingleOp; 3515 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3516 SingleOp = Op0; 3517 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3518 if (Opc == ARMISD::VCGE) 3519 Opc = ARMISD::VCLEZ; 3520 else if (Opc == ARMISD::VCGT) 3521 Opc = ARMISD::VCLTZ; 3522 SingleOp = Op1; 3523 } 3524 3525 SDValue Result; 3526 if (SingleOp.getNode()) { 3527 switch (Opc) { 3528 case ARMISD::VCEQ: 3529 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3530 case ARMISD::VCGE: 3531 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3532 case ARMISD::VCLEZ: 3533 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3534 case ARMISD::VCGT: 3535 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3536 case ARMISD::VCLTZ: 3537 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3538 default: 3539 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3540 } 3541 } else { 3542 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3543 } 3544 3545 if (Invert) 3546 Result = DAG.getNOT(dl, Result, VT); 3547 3548 return Result; 3549} 3550 3551/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3552/// valid vector constant for a NEON instruction with a "modified immediate" 3553/// operand (e.g., VMOV). If so, return the encoded value. 3554static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3555 unsigned SplatBitSize, SelectionDAG &DAG, 3556 EVT &VT, bool is128Bits, NEONModImmType type) { 3557 unsigned OpCmode, Imm; 3558 3559 // SplatBitSize is set to the smallest size that splats the vector, so a 3560 // zero vector will always have SplatBitSize == 8. However, NEON modified 3561 // immediate instructions others than VMOV do not support the 8-bit encoding 3562 // of a zero vector, and the default encoding of zero is supposed to be the 3563 // 32-bit version. 3564 if (SplatBits == 0) 3565 SplatBitSize = 32; 3566 3567 switch (SplatBitSize) { 3568 case 8: 3569 if (type != VMOVModImm) 3570 return SDValue(); 3571 // Any 1-byte value is OK. Op=0, Cmode=1110. 3572 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3573 OpCmode = 0xe; 3574 Imm = SplatBits; 3575 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3576 break; 3577 3578 case 16: 3579 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3580 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3581 if ((SplatBits & ~0xff) == 0) { 3582 // Value = 0x00nn: Op=x, Cmode=100x. 3583 OpCmode = 0x8; 3584 Imm = SplatBits; 3585 break; 3586 } 3587 if ((SplatBits & ~0xff00) == 0) { 3588 // Value = 0xnn00: Op=x, Cmode=101x. 3589 OpCmode = 0xa; 3590 Imm = SplatBits >> 8; 3591 break; 3592 } 3593 return SDValue(); 3594 3595 case 32: 3596 // NEON's 32-bit VMOV supports splat values where: 3597 // * only one byte is nonzero, or 3598 // * the least significant byte is 0xff and the second byte is nonzero, or 3599 // * the least significant 2 bytes are 0xff and the third is nonzero. 3600 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3601 if ((SplatBits & ~0xff) == 0) { 3602 // Value = 0x000000nn: Op=x, Cmode=000x. 3603 OpCmode = 0; 3604 Imm = SplatBits; 3605 break; 3606 } 3607 if ((SplatBits & ~0xff00) == 0) { 3608 // Value = 0x0000nn00: Op=x, Cmode=001x. 3609 OpCmode = 0x2; 3610 Imm = SplatBits >> 8; 3611 break; 3612 } 3613 if ((SplatBits & ~0xff0000) == 0) { 3614 // Value = 0x00nn0000: Op=x, Cmode=010x. 3615 OpCmode = 0x4; 3616 Imm = SplatBits >> 16; 3617 break; 3618 } 3619 if ((SplatBits & ~0xff000000) == 0) { 3620 // Value = 0xnn000000: Op=x, Cmode=011x. 3621 OpCmode = 0x6; 3622 Imm = SplatBits >> 24; 3623 break; 3624 } 3625 3626 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3627 if (type == OtherModImm) return SDValue(); 3628 3629 if ((SplatBits & ~0xffff) == 0 && 3630 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3631 // Value = 0x0000nnff: Op=x, Cmode=1100. 3632 OpCmode = 0xc; 3633 Imm = SplatBits >> 8; 3634 SplatBits |= 0xff; 3635 break; 3636 } 3637 3638 if ((SplatBits & ~0xffffff) == 0 && 3639 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3640 // Value = 0x00nnffff: Op=x, Cmode=1101. 3641 OpCmode = 0xd; 3642 Imm = SplatBits >> 16; 3643 SplatBits |= 0xffff; 3644 break; 3645 } 3646 3647 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3648 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3649 // VMOV.I32. A (very) minor optimization would be to replicate the value 3650 // and fall through here to test for a valid 64-bit splat. But, then the 3651 // caller would also need to check and handle the change in size. 3652 return SDValue(); 3653 3654 case 64: { 3655 if (type != VMOVModImm) 3656 return SDValue(); 3657 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3658 uint64_t BitMask = 0xff; 3659 uint64_t Val = 0; 3660 unsigned ImmMask = 1; 3661 Imm = 0; 3662 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3663 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3664 Val |= BitMask; 3665 Imm |= ImmMask; 3666 } else if ((SplatBits & BitMask) != 0) { 3667 return SDValue(); 3668 } 3669 BitMask <<= 8; 3670 ImmMask <<= 1; 3671 } 3672 // Op=1, Cmode=1110. 3673 OpCmode = 0x1e; 3674 SplatBits = Val; 3675 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3676 break; 3677 } 3678 3679 default: 3680 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3681 return SDValue(); 3682 } 3683 3684 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3685 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3686} 3687 3688static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3689 bool &ReverseVEXT, unsigned &Imm) { 3690 unsigned NumElts = VT.getVectorNumElements(); 3691 ReverseVEXT = false; 3692 3693 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3694 if (M[0] < 0) 3695 return false; 3696 3697 Imm = M[0]; 3698 3699 // If this is a VEXT shuffle, the immediate value is the index of the first 3700 // element. The other shuffle indices must be the successive elements after 3701 // the first one. 3702 unsigned ExpectedElt = Imm; 3703 for (unsigned i = 1; i < NumElts; ++i) { 3704 // Increment the expected index. If it wraps around, it may still be 3705 // a VEXT but the source vectors must be swapped. 3706 ExpectedElt += 1; 3707 if (ExpectedElt == NumElts * 2) { 3708 ExpectedElt = 0; 3709 ReverseVEXT = true; 3710 } 3711 3712 if (M[i] < 0) continue; // ignore UNDEF indices 3713 if (ExpectedElt != static_cast<unsigned>(M[i])) 3714 return false; 3715 } 3716 3717 // Adjust the index value if the source operands will be swapped. 3718 if (ReverseVEXT) 3719 Imm -= NumElts; 3720 3721 return true; 3722} 3723 3724/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3725/// instruction with the specified blocksize. (The order of the elements 3726/// within each block of the vector is reversed.) 3727static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3728 unsigned BlockSize) { 3729 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3730 "Only possible block sizes for VREV are: 16, 32, 64"); 3731 3732 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3733 if (EltSz == 64) 3734 return false; 3735 3736 unsigned NumElts = VT.getVectorNumElements(); 3737 unsigned BlockElts = M[0] + 1; 3738 // If the first shuffle index is UNDEF, be optimistic. 3739 if (M[0] < 0) 3740 BlockElts = BlockSize / EltSz; 3741 3742 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3743 return false; 3744 3745 for (unsigned i = 0; i < NumElts; ++i) { 3746 if (M[i] < 0) continue; // ignore UNDEF indices 3747 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3748 return false; 3749 } 3750 3751 return true; 3752} 3753 3754static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3755 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3756 // range, then 0 is placed into the resulting vector. So pretty much any mask 3757 // of 8 elements can work here. 3758 return VT == MVT::v8i8 && M.size() == 8; 3759} 3760 3761static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3762 unsigned &WhichResult) { 3763 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3764 if (EltSz == 64) 3765 return false; 3766 3767 unsigned NumElts = VT.getVectorNumElements(); 3768 WhichResult = (M[0] == 0 ? 0 : 1); 3769 for (unsigned i = 0; i < NumElts; i += 2) { 3770 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3771 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3772 return false; 3773 } 3774 return true; 3775} 3776 3777/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3778/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3779/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3780static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3781 unsigned &WhichResult) { 3782 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3783 if (EltSz == 64) 3784 return false; 3785 3786 unsigned NumElts = VT.getVectorNumElements(); 3787 WhichResult = (M[0] == 0 ? 0 : 1); 3788 for (unsigned i = 0; i < NumElts; i += 2) { 3789 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3790 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3791 return false; 3792 } 3793 return true; 3794} 3795 3796static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3797 unsigned &WhichResult) { 3798 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3799 if (EltSz == 64) 3800 return false; 3801 3802 unsigned NumElts = VT.getVectorNumElements(); 3803 WhichResult = (M[0] == 0 ? 0 : 1); 3804 for (unsigned i = 0; i != NumElts; ++i) { 3805 if (M[i] < 0) continue; // ignore UNDEF indices 3806 if ((unsigned) M[i] != 2 * i + WhichResult) 3807 return false; 3808 } 3809 3810 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3811 if (VT.is64BitVector() && EltSz == 32) 3812 return false; 3813 3814 return true; 3815} 3816 3817/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3818/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3819/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3820static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3821 unsigned &WhichResult) { 3822 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3823 if (EltSz == 64) 3824 return false; 3825 3826 unsigned Half = VT.getVectorNumElements() / 2; 3827 WhichResult = (M[0] == 0 ? 0 : 1); 3828 for (unsigned j = 0; j != 2; ++j) { 3829 unsigned Idx = WhichResult; 3830 for (unsigned i = 0; i != Half; ++i) { 3831 int MIdx = M[i + j * Half]; 3832 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3833 return false; 3834 Idx += 2; 3835 } 3836 } 3837 3838 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3839 if (VT.is64BitVector() && EltSz == 32) 3840 return false; 3841 3842 return true; 3843} 3844 3845static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3846 unsigned &WhichResult) { 3847 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3848 if (EltSz == 64) 3849 return false; 3850 3851 unsigned NumElts = VT.getVectorNumElements(); 3852 WhichResult = (M[0] == 0 ? 0 : 1); 3853 unsigned Idx = WhichResult * NumElts / 2; 3854 for (unsigned i = 0; i != NumElts; i += 2) { 3855 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3856 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3857 return false; 3858 Idx += 1; 3859 } 3860 3861 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3862 if (VT.is64BitVector() && EltSz == 32) 3863 return false; 3864 3865 return true; 3866} 3867 3868/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3869/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3870/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3871static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3872 unsigned &WhichResult) { 3873 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3874 if (EltSz == 64) 3875 return false; 3876 3877 unsigned NumElts = VT.getVectorNumElements(); 3878 WhichResult = (M[0] == 0 ? 0 : 1); 3879 unsigned Idx = WhichResult * NumElts / 2; 3880 for (unsigned i = 0; i != NumElts; i += 2) { 3881 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3882 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3883 return false; 3884 Idx += 1; 3885 } 3886 3887 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3888 if (VT.is64BitVector() && EltSz == 32) 3889 return false; 3890 3891 return true; 3892} 3893 3894// If N is an integer constant that can be moved into a register in one 3895// instruction, return an SDValue of such a constant (will become a MOV 3896// instruction). Otherwise return null. 3897static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3898 const ARMSubtarget *ST, DebugLoc dl) { 3899 uint64_t Val; 3900 if (!isa<ConstantSDNode>(N)) 3901 return SDValue(); 3902 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3903 3904 if (ST->isThumb1Only()) { 3905 if (Val <= 255 || ~Val <= 255) 3906 return DAG.getConstant(Val, MVT::i32); 3907 } else { 3908 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3909 return DAG.getConstant(Val, MVT::i32); 3910 } 3911 return SDValue(); 3912} 3913 3914// If this is a case we can't handle, return null and let the default 3915// expansion code take care of it. 3916SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3917 const ARMSubtarget *ST) const { 3918 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3919 DebugLoc dl = Op.getDebugLoc(); 3920 EVT VT = Op.getValueType(); 3921 3922 APInt SplatBits, SplatUndef; 3923 unsigned SplatBitSize; 3924 bool HasAnyUndefs; 3925 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3926 if (SplatBitSize <= 64) { 3927 // Check if an immediate VMOV works. 3928 EVT VmovVT; 3929 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3930 SplatUndef.getZExtValue(), SplatBitSize, 3931 DAG, VmovVT, VT.is128BitVector(), 3932 VMOVModImm); 3933 if (Val.getNode()) { 3934 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3935 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3936 } 3937 3938 // Try an immediate VMVN. 3939 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3940 ((1LL << SplatBitSize) - 1)); 3941 Val = isNEONModifiedImm(NegatedImm, 3942 SplatUndef.getZExtValue(), SplatBitSize, 3943 DAG, VmovVT, VT.is128BitVector(), 3944 VMVNModImm); 3945 if (Val.getNode()) { 3946 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3947 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3948 } 3949 } 3950 } 3951 3952 // Scan through the operands to see if only one value is used. 3953 unsigned NumElts = VT.getVectorNumElements(); 3954 bool isOnlyLowElement = true; 3955 bool usesOnlyOneValue = true; 3956 bool isConstant = true; 3957 SDValue Value; 3958 for (unsigned i = 0; i < NumElts; ++i) { 3959 SDValue V = Op.getOperand(i); 3960 if (V.getOpcode() == ISD::UNDEF) 3961 continue; 3962 if (i > 0) 3963 isOnlyLowElement = false; 3964 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3965 isConstant = false; 3966 3967 if (!Value.getNode()) 3968 Value = V; 3969 else if (V != Value) 3970 usesOnlyOneValue = false; 3971 } 3972 3973 if (!Value.getNode()) 3974 return DAG.getUNDEF(VT); 3975 3976 if (isOnlyLowElement) 3977 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3978 3979 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3980 3981 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3982 // i32 and try again. 3983 if (usesOnlyOneValue && EltSize <= 32) { 3984 if (!isConstant) 3985 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3986 if (VT.getVectorElementType().isFloatingPoint()) { 3987 SmallVector<SDValue, 8> Ops; 3988 for (unsigned i = 0; i < NumElts; ++i) 3989 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3990 Op.getOperand(i))); 3991 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3992 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3993 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3994 if (Val.getNode()) 3995 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3996 } 3997 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3998 if (Val.getNode()) 3999 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4000 } 4001 4002 // If all elements are constants and the case above didn't get hit, fall back 4003 // to the default expansion, which will generate a load from the constant 4004 // pool. 4005 if (isConstant) 4006 return SDValue(); 4007 4008 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4009 if (NumElts >= 4) { 4010 SDValue shuffle = ReconstructShuffle(Op, DAG); 4011 if (shuffle != SDValue()) 4012 return shuffle; 4013 } 4014 4015 // Vectors with 32- or 64-bit elements can be built by directly assigning 4016 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4017 // will be legalized. 4018 if (EltSize >= 32) { 4019 // Do the expansion with floating-point types, since that is what the VFP 4020 // registers are defined to use, and since i64 is not legal. 4021 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4022 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4023 SmallVector<SDValue, 8> Ops; 4024 for (unsigned i = 0; i < NumElts; ++i) 4025 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4026 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4027 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4028 } 4029 4030 return SDValue(); 4031} 4032 4033// Gather data to see if the operation can be modelled as a 4034// shuffle in combination with VEXTs. 4035SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4036 SelectionDAG &DAG) const { 4037 DebugLoc dl = Op.getDebugLoc(); 4038 EVT VT = Op.getValueType(); 4039 unsigned NumElts = VT.getVectorNumElements(); 4040 4041 SmallVector<SDValue, 2> SourceVecs; 4042 SmallVector<unsigned, 2> MinElts; 4043 SmallVector<unsigned, 2> MaxElts; 4044 4045 for (unsigned i = 0; i < NumElts; ++i) { 4046 SDValue V = Op.getOperand(i); 4047 if (V.getOpcode() == ISD::UNDEF) 4048 continue; 4049 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4050 // A shuffle can only come from building a vector from various 4051 // elements of other vectors. 4052 return SDValue(); 4053 } 4054 4055 // Record this extraction against the appropriate vector if possible... 4056 SDValue SourceVec = V.getOperand(0); 4057 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4058 bool FoundSource = false; 4059 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4060 if (SourceVecs[j] == SourceVec) { 4061 if (MinElts[j] > EltNo) 4062 MinElts[j] = EltNo; 4063 if (MaxElts[j] < EltNo) 4064 MaxElts[j] = EltNo; 4065 FoundSource = true; 4066 break; 4067 } 4068 } 4069 4070 // Or record a new source if not... 4071 if (!FoundSource) { 4072 SourceVecs.push_back(SourceVec); 4073 MinElts.push_back(EltNo); 4074 MaxElts.push_back(EltNo); 4075 } 4076 } 4077 4078 // Currently only do something sane when at most two source vectors 4079 // involved. 4080 if (SourceVecs.size() > 2) 4081 return SDValue(); 4082 4083 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4084 int VEXTOffsets[2] = {0, 0}; 4085 4086 // This loop extracts the usage patterns of the source vectors 4087 // and prepares appropriate SDValues for a shuffle if possible. 4088 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4089 if (SourceVecs[i].getValueType() == VT) { 4090 // No VEXT necessary 4091 ShuffleSrcs[i] = SourceVecs[i]; 4092 VEXTOffsets[i] = 0; 4093 continue; 4094 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4095 // It probably isn't worth padding out a smaller vector just to 4096 // break it down again in a shuffle. 4097 return SDValue(); 4098 } 4099 4100 // Since only 64-bit and 128-bit vectors are legal on ARM and 4101 // we've eliminated the other cases... 4102 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4103 "unexpected vector sizes in ReconstructShuffle"); 4104 4105 if (MaxElts[i] - MinElts[i] >= NumElts) { 4106 // Span too large for a VEXT to cope 4107 return SDValue(); 4108 } 4109 4110 if (MinElts[i] >= NumElts) { 4111 // The extraction can just take the second half 4112 VEXTOffsets[i] = NumElts; 4113 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4114 SourceVecs[i], 4115 DAG.getIntPtrConstant(NumElts)); 4116 } else if (MaxElts[i] < NumElts) { 4117 // The extraction can just take the first half 4118 VEXTOffsets[i] = 0; 4119 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4120 SourceVecs[i], 4121 DAG.getIntPtrConstant(0)); 4122 } else { 4123 // An actual VEXT is needed 4124 VEXTOffsets[i] = MinElts[i]; 4125 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4126 SourceVecs[i], 4127 DAG.getIntPtrConstant(0)); 4128 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4129 SourceVecs[i], 4130 DAG.getIntPtrConstant(NumElts)); 4131 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4132 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4133 } 4134 } 4135 4136 SmallVector<int, 8> Mask; 4137 4138 for (unsigned i = 0; i < NumElts; ++i) { 4139 SDValue Entry = Op.getOperand(i); 4140 if (Entry.getOpcode() == ISD::UNDEF) { 4141 Mask.push_back(-1); 4142 continue; 4143 } 4144 4145 SDValue ExtractVec = Entry.getOperand(0); 4146 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4147 .getOperand(1))->getSExtValue(); 4148 if (ExtractVec == SourceVecs[0]) { 4149 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4150 } else { 4151 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4152 } 4153 } 4154 4155 // Final check before we try to produce nonsense... 4156 if (isShuffleMaskLegal(Mask, VT)) 4157 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4158 &Mask[0]); 4159 4160 return SDValue(); 4161} 4162 4163/// isShuffleMaskLegal - Targets can use this to indicate that they only 4164/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4165/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4166/// are assumed to be legal. 4167bool 4168ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4169 EVT VT) const { 4170 if (VT.getVectorNumElements() == 4 && 4171 (VT.is128BitVector() || VT.is64BitVector())) { 4172 unsigned PFIndexes[4]; 4173 for (unsigned i = 0; i != 4; ++i) { 4174 if (M[i] < 0) 4175 PFIndexes[i] = 8; 4176 else 4177 PFIndexes[i] = M[i]; 4178 } 4179 4180 // Compute the index in the perfect shuffle table. 4181 unsigned PFTableIndex = 4182 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4183 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4184 unsigned Cost = (PFEntry >> 30); 4185 4186 if (Cost <= 4) 4187 return true; 4188 } 4189 4190 bool ReverseVEXT; 4191 unsigned Imm, WhichResult; 4192 4193 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4194 return (EltSize >= 32 || 4195 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4196 isVREVMask(M, VT, 64) || 4197 isVREVMask(M, VT, 32) || 4198 isVREVMask(M, VT, 16) || 4199 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4200 isVTBLMask(M, VT) || 4201 isVTRNMask(M, VT, WhichResult) || 4202 isVUZPMask(M, VT, WhichResult) || 4203 isVZIPMask(M, VT, WhichResult) || 4204 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4205 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4206 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4207} 4208 4209/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4210/// the specified operations to build the shuffle. 4211static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4212 SDValue RHS, SelectionDAG &DAG, 4213 DebugLoc dl) { 4214 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4215 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4216 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4217 4218 enum { 4219 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4220 OP_VREV, 4221 OP_VDUP0, 4222 OP_VDUP1, 4223 OP_VDUP2, 4224 OP_VDUP3, 4225 OP_VEXT1, 4226 OP_VEXT2, 4227 OP_VEXT3, 4228 OP_VUZPL, // VUZP, left result 4229 OP_VUZPR, // VUZP, right result 4230 OP_VZIPL, // VZIP, left result 4231 OP_VZIPR, // VZIP, right result 4232 OP_VTRNL, // VTRN, left result 4233 OP_VTRNR // VTRN, right result 4234 }; 4235 4236 if (OpNum == OP_COPY) { 4237 if (LHSID == (1*9+2)*9+3) return LHS; 4238 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4239 return RHS; 4240 } 4241 4242 SDValue OpLHS, OpRHS; 4243 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4244 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4245 EVT VT = OpLHS.getValueType(); 4246 4247 switch (OpNum) { 4248 default: llvm_unreachable("Unknown shuffle opcode!"); 4249 case OP_VREV: 4250 // VREV divides the vector in half and swaps within the half. 4251 if (VT.getVectorElementType() == MVT::i32 || 4252 VT.getVectorElementType() == MVT::f32) 4253 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4254 // vrev <4 x i16> -> VREV32 4255 if (VT.getVectorElementType() == MVT::i16) 4256 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4257 // vrev <4 x i8> -> VREV16 4258 assert(VT.getVectorElementType() == MVT::i8); 4259 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4260 case OP_VDUP0: 4261 case OP_VDUP1: 4262 case OP_VDUP2: 4263 case OP_VDUP3: 4264 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4265 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4266 case OP_VEXT1: 4267 case OP_VEXT2: 4268 case OP_VEXT3: 4269 return DAG.getNode(ARMISD::VEXT, dl, VT, 4270 OpLHS, OpRHS, 4271 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4272 case OP_VUZPL: 4273 case OP_VUZPR: 4274 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4275 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4276 case OP_VZIPL: 4277 case OP_VZIPR: 4278 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4279 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4280 case OP_VTRNL: 4281 case OP_VTRNR: 4282 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4283 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4284 } 4285} 4286 4287static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4288 SmallVectorImpl<int> &ShuffleMask, 4289 SelectionDAG &DAG) { 4290 // Check to see if we can use the VTBL instruction. 4291 SDValue V1 = Op.getOperand(0); 4292 SDValue V2 = Op.getOperand(1); 4293 DebugLoc DL = Op.getDebugLoc(); 4294 4295 SmallVector<SDValue, 8> VTBLMask; 4296 for (SmallVectorImpl<int>::iterator 4297 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4298 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4299 4300 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4301 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4302 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4303 &VTBLMask[0], 8)); 4304 4305 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4306 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4307 &VTBLMask[0], 8)); 4308} 4309 4310static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4311 SDValue V1 = Op.getOperand(0); 4312 SDValue V2 = Op.getOperand(1); 4313 DebugLoc dl = Op.getDebugLoc(); 4314 EVT VT = Op.getValueType(); 4315 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4316 SmallVector<int, 8> ShuffleMask; 4317 4318 // Convert shuffles that are directly supported on NEON to target-specific 4319 // DAG nodes, instead of keeping them as shuffles and matching them again 4320 // during code selection. This is more efficient and avoids the possibility 4321 // of inconsistencies between legalization and selection. 4322 // FIXME: floating-point vectors should be canonicalized to integer vectors 4323 // of the same time so that they get CSEd properly. 4324 SVN->getMask(ShuffleMask); 4325 4326 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4327 if (EltSize <= 32) { 4328 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4329 int Lane = SVN->getSplatIndex(); 4330 // If this is undef splat, generate it via "just" vdup, if possible. 4331 if (Lane == -1) Lane = 0; 4332 4333 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4334 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4335 } 4336 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4337 DAG.getConstant(Lane, MVT::i32)); 4338 } 4339 4340 bool ReverseVEXT; 4341 unsigned Imm; 4342 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4343 if (ReverseVEXT) 4344 std::swap(V1, V2); 4345 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4346 DAG.getConstant(Imm, MVT::i32)); 4347 } 4348 4349 if (isVREVMask(ShuffleMask, VT, 64)) 4350 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4351 if (isVREVMask(ShuffleMask, VT, 32)) 4352 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4353 if (isVREVMask(ShuffleMask, VT, 16)) 4354 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4355 4356 // Check for Neon shuffles that modify both input vectors in place. 4357 // If both results are used, i.e., if there are two shuffles with the same 4358 // source operands and with masks corresponding to both results of one of 4359 // these operations, DAG memoization will ensure that a single node is 4360 // used for both shuffles. 4361 unsigned WhichResult; 4362 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4363 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4364 V1, V2).getValue(WhichResult); 4365 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4366 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4367 V1, V2).getValue(WhichResult); 4368 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4369 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4370 V1, V2).getValue(WhichResult); 4371 4372 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4373 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4374 V1, V1).getValue(WhichResult); 4375 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4376 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4377 V1, V1).getValue(WhichResult); 4378 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4379 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4380 V1, V1).getValue(WhichResult); 4381 } 4382 4383 // If the shuffle is not directly supported and it has 4 elements, use 4384 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4385 unsigned NumElts = VT.getVectorNumElements(); 4386 if (NumElts == 4) { 4387 unsigned PFIndexes[4]; 4388 for (unsigned i = 0; i != 4; ++i) { 4389 if (ShuffleMask[i] < 0) 4390 PFIndexes[i] = 8; 4391 else 4392 PFIndexes[i] = ShuffleMask[i]; 4393 } 4394 4395 // Compute the index in the perfect shuffle table. 4396 unsigned PFTableIndex = 4397 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4398 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4399 unsigned Cost = (PFEntry >> 30); 4400 4401 if (Cost <= 4) 4402 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4403 } 4404 4405 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4406 if (EltSize >= 32) { 4407 // Do the expansion with floating-point types, since that is what the VFP 4408 // registers are defined to use, and since i64 is not legal. 4409 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4410 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4411 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4412 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4413 SmallVector<SDValue, 8> Ops; 4414 for (unsigned i = 0; i < NumElts; ++i) { 4415 if (ShuffleMask[i] < 0) 4416 Ops.push_back(DAG.getUNDEF(EltVT)); 4417 else 4418 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4419 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4420 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4421 MVT::i32))); 4422 } 4423 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4424 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4425 } 4426 4427 if (VT == MVT::v8i8) { 4428 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4429 if (NewOp.getNode()) 4430 return NewOp; 4431 } 4432 4433 return SDValue(); 4434} 4435 4436static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4437 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4438 SDValue Lane = Op.getOperand(1); 4439 if (!isa<ConstantSDNode>(Lane)) 4440 return SDValue(); 4441 4442 SDValue Vec = Op.getOperand(0); 4443 if (Op.getValueType() == MVT::i32 && 4444 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4445 DebugLoc dl = Op.getDebugLoc(); 4446 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4447 } 4448 4449 return Op; 4450} 4451 4452static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4453 // The only time a CONCAT_VECTORS operation can have legal types is when 4454 // two 64-bit vectors are concatenated to a 128-bit vector. 4455 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4456 "unexpected CONCAT_VECTORS"); 4457 DebugLoc dl = Op.getDebugLoc(); 4458 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4459 SDValue Op0 = Op.getOperand(0); 4460 SDValue Op1 = Op.getOperand(1); 4461 if (Op0.getOpcode() != ISD::UNDEF) 4462 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4463 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4464 DAG.getIntPtrConstant(0)); 4465 if (Op1.getOpcode() != ISD::UNDEF) 4466 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4467 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4468 DAG.getIntPtrConstant(1)); 4469 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4470} 4471 4472/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4473/// element has been zero/sign-extended, depending on the isSigned parameter, 4474/// from an integer type half its size. 4475static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4476 bool isSigned) { 4477 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4478 EVT VT = N->getValueType(0); 4479 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4480 SDNode *BVN = N->getOperand(0).getNode(); 4481 if (BVN->getValueType(0) != MVT::v4i32 || 4482 BVN->getOpcode() != ISD::BUILD_VECTOR) 4483 return false; 4484 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4485 unsigned HiElt = 1 - LoElt; 4486 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4487 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4488 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4489 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4490 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4491 return false; 4492 if (isSigned) { 4493 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4494 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4495 return true; 4496 } else { 4497 if (Hi0->isNullValue() && Hi1->isNullValue()) 4498 return true; 4499 } 4500 return false; 4501 } 4502 4503 if (N->getOpcode() != ISD::BUILD_VECTOR) 4504 return false; 4505 4506 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4507 SDNode *Elt = N->getOperand(i).getNode(); 4508 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4509 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4510 unsigned HalfSize = EltSize / 2; 4511 if (isSigned) { 4512 int64_t SExtVal = C->getSExtValue(); 4513 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 4514 return false; 4515 } else { 4516 if ((C->getZExtValue() >> HalfSize) != 0) 4517 return false; 4518 } 4519 continue; 4520 } 4521 return false; 4522 } 4523 4524 return true; 4525} 4526 4527/// isSignExtended - Check if a node is a vector value that is sign-extended 4528/// or a constant BUILD_VECTOR with sign-extended elements. 4529static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4530 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4531 return true; 4532 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4533 return true; 4534 return false; 4535} 4536 4537/// isZeroExtended - Check if a node is a vector value that is zero-extended 4538/// or a constant BUILD_VECTOR with zero-extended elements. 4539static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4540 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4541 return true; 4542 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4543 return true; 4544 return false; 4545} 4546 4547/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4548/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4549static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4550 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4551 return N->getOperand(0); 4552 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4553 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4554 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4555 LD->isNonTemporal(), LD->getAlignment()); 4556 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4557 // have been legalized as a BITCAST from v4i32. 4558 if (N->getOpcode() == ISD::BITCAST) { 4559 SDNode *BVN = N->getOperand(0).getNode(); 4560 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4561 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4562 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4563 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4564 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4565 } 4566 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4567 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4568 EVT VT = N->getValueType(0); 4569 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4570 unsigned NumElts = VT.getVectorNumElements(); 4571 MVT TruncVT = MVT::getIntegerVT(EltSize); 4572 SmallVector<SDValue, 8> Ops; 4573 for (unsigned i = 0; i != NumElts; ++i) { 4574 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4575 const APInt &CInt = C->getAPIntValue(); 4576 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4577 } 4578 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4579 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4580} 4581 4582static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4583 unsigned Opcode = N->getOpcode(); 4584 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4585 SDNode *N0 = N->getOperand(0).getNode(); 4586 SDNode *N1 = N->getOperand(1).getNode(); 4587 return N0->hasOneUse() && N1->hasOneUse() && 4588 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4589 } 4590 return false; 4591} 4592 4593static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4594 unsigned Opcode = N->getOpcode(); 4595 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4596 SDNode *N0 = N->getOperand(0).getNode(); 4597 SDNode *N1 = N->getOperand(1).getNode(); 4598 return N0->hasOneUse() && N1->hasOneUse() && 4599 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4600 } 4601 return false; 4602} 4603 4604static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4605 // Multiplications are only custom-lowered for 128-bit vectors so that 4606 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4607 EVT VT = Op.getValueType(); 4608 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4609 SDNode *N0 = Op.getOperand(0).getNode(); 4610 SDNode *N1 = Op.getOperand(1).getNode(); 4611 unsigned NewOpc = 0; 4612 bool isMLA = false; 4613 bool isN0SExt = isSignExtended(N0, DAG); 4614 bool isN1SExt = isSignExtended(N1, DAG); 4615 if (isN0SExt && isN1SExt) 4616 NewOpc = ARMISD::VMULLs; 4617 else { 4618 bool isN0ZExt = isZeroExtended(N0, DAG); 4619 bool isN1ZExt = isZeroExtended(N1, DAG); 4620 if (isN0ZExt && isN1ZExt) 4621 NewOpc = ARMISD::VMULLu; 4622 else if (isN1SExt || isN1ZExt) { 4623 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4624 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4625 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4626 NewOpc = ARMISD::VMULLs; 4627 isMLA = true; 4628 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4629 NewOpc = ARMISD::VMULLu; 4630 isMLA = true; 4631 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4632 std::swap(N0, N1); 4633 NewOpc = ARMISD::VMULLu; 4634 isMLA = true; 4635 } 4636 } 4637 4638 if (!NewOpc) { 4639 if (VT == MVT::v2i64) 4640 // Fall through to expand this. It is not legal. 4641 return SDValue(); 4642 else 4643 // Other vector multiplications are legal. 4644 return Op; 4645 } 4646 } 4647 4648 // Legalize to a VMULL instruction. 4649 DebugLoc DL = Op.getDebugLoc(); 4650 SDValue Op0; 4651 SDValue Op1 = SkipExtension(N1, DAG); 4652 if (!isMLA) { 4653 Op0 = SkipExtension(N0, DAG); 4654 assert(Op0.getValueType().is64BitVector() && 4655 Op1.getValueType().is64BitVector() && 4656 "unexpected types for extended operands to VMULL"); 4657 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4658 } 4659 4660 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4661 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4662 // vmull q0, d4, d6 4663 // vmlal q0, d5, d6 4664 // is faster than 4665 // vaddl q0, d4, d5 4666 // vmovl q1, d6 4667 // vmul q0, q0, q1 4668 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4669 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4670 EVT Op1VT = Op1.getValueType(); 4671 return DAG.getNode(N0->getOpcode(), DL, VT, 4672 DAG.getNode(NewOpc, DL, VT, 4673 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4674 DAG.getNode(NewOpc, DL, VT, 4675 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4676} 4677 4678static SDValue 4679LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4680 // Convert to float 4681 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4682 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4683 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4684 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4685 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4686 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4687 // Get reciprocal estimate. 4688 // float4 recip = vrecpeq_f32(yf); 4689 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4690 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4691 // Because char has a smaller range than uchar, we can actually get away 4692 // without any newton steps. This requires that we use a weird bias 4693 // of 0xb000, however (again, this has been exhaustively tested). 4694 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4695 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4696 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4697 Y = DAG.getConstant(0xb000, MVT::i32); 4698 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4699 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4700 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4701 // Convert back to short. 4702 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4703 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4704 return X; 4705} 4706 4707static SDValue 4708LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4709 SDValue N2; 4710 // Convert to float. 4711 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4712 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4713 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4714 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4715 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4716 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4717 4718 // Use reciprocal estimate and one refinement step. 4719 // float4 recip = vrecpeq_f32(yf); 4720 // recip *= vrecpsq_f32(yf, recip); 4721 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4722 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4723 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4724 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4725 N1, N2); 4726 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4727 // Because short has a smaller range than ushort, we can actually get away 4728 // with only a single newton step. This requires that we use a weird bias 4729 // of 89, however (again, this has been exhaustively tested). 4730 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 4731 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4732 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4733 N1 = DAG.getConstant(0x89, MVT::i32); 4734 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4735 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4736 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4737 // Convert back to integer and return. 4738 // return vmovn_s32(vcvt_s32_f32(result)); 4739 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4740 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4741 return N0; 4742} 4743 4744static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4745 EVT VT = Op.getValueType(); 4746 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4747 "unexpected type for custom-lowering ISD::SDIV"); 4748 4749 DebugLoc dl = Op.getDebugLoc(); 4750 SDValue N0 = Op.getOperand(0); 4751 SDValue N1 = Op.getOperand(1); 4752 SDValue N2, N3; 4753 4754 if (VT == MVT::v8i8) { 4755 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4756 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4757 4758 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4759 DAG.getIntPtrConstant(4)); 4760 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4761 DAG.getIntPtrConstant(4)); 4762 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4763 DAG.getIntPtrConstant(0)); 4764 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4765 DAG.getIntPtrConstant(0)); 4766 4767 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4768 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4769 4770 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4771 N0 = LowerCONCAT_VECTORS(N0, DAG); 4772 4773 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4774 return N0; 4775 } 4776 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4777} 4778 4779static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4780 EVT VT = Op.getValueType(); 4781 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4782 "unexpected type for custom-lowering ISD::UDIV"); 4783 4784 DebugLoc dl = Op.getDebugLoc(); 4785 SDValue N0 = Op.getOperand(0); 4786 SDValue N1 = Op.getOperand(1); 4787 SDValue N2, N3; 4788 4789 if (VT == MVT::v8i8) { 4790 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4791 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4792 4793 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4794 DAG.getIntPtrConstant(4)); 4795 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4796 DAG.getIntPtrConstant(4)); 4797 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4798 DAG.getIntPtrConstant(0)); 4799 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4800 DAG.getIntPtrConstant(0)); 4801 4802 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4803 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4804 4805 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4806 N0 = LowerCONCAT_VECTORS(N0, DAG); 4807 4808 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4809 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4810 N0); 4811 return N0; 4812 } 4813 4814 // v4i16 sdiv ... Convert to float. 4815 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4816 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4817 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4818 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4819 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4820 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4821 4822 // Use reciprocal estimate and two refinement steps. 4823 // float4 recip = vrecpeq_f32(yf); 4824 // recip *= vrecpsq_f32(yf, recip); 4825 // recip *= vrecpsq_f32(yf, recip); 4826 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4827 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 4828 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4829 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4830 BN1, N2); 4831 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4832 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4833 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4834 BN1, N2); 4835 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4836 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4837 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4838 // and that it will never cause us to return an answer too large). 4839 // float4 result = as_float4(as_int4(xf*recip) + 2); 4840 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4841 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4842 N1 = DAG.getConstant(2, MVT::i32); 4843 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4844 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4845 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4846 // Convert back to integer and return. 4847 // return vmovn_u32(vcvt_s32_f32(result)); 4848 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4849 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4850 return N0; 4851} 4852 4853static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 4854 EVT VT = Op.getNode()->getValueType(0); 4855 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4856 4857 unsigned Opc; 4858 bool ExtraOp = false; 4859 switch (Op.getOpcode()) { 4860 default: assert(0 && "Invalid code"); 4861 case ISD::ADDC: Opc = ARMISD::ADDC; break; 4862 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 4863 case ISD::SUBC: Opc = ARMISD::SUBC; break; 4864 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 4865 } 4866 4867 if (!ExtraOp) 4868 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4869 Op.getOperand(1)); 4870 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4871 Op.getOperand(1), Op.getOperand(2)); 4872} 4873 4874static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 4875 // Monotonic load/store is legal for all targets 4876 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 4877 return Op; 4878 4879 // Aquire/Release load/store is not legal for targets without a 4880 // dmb or equivalent available. 4881 return SDValue(); 4882} 4883 4884 4885static void 4886ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 4887 SelectionDAG &DAG, unsigned NewOp) { 4888 EVT T = Node->getValueType(0); 4889 DebugLoc dl = Node->getDebugLoc(); 4890 assert (T == MVT::i64 && "Only know how to expand i64 atomics"); 4891 4892 SmallVector<SDValue, 6> Ops; 4893 Ops.push_back(Node->getOperand(0)); // Chain 4894 Ops.push_back(Node->getOperand(1)); // Ptr 4895 // Low part of Val1 4896 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4897 Node->getOperand(2), DAG.getIntPtrConstant(0))); 4898 // High part of Val1 4899 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4900 Node->getOperand(2), DAG.getIntPtrConstant(1))); 4901 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 4902 // High part of Val1 4903 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4904 Node->getOperand(3), DAG.getIntPtrConstant(0))); 4905 // High part of Val2 4906 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4907 Node->getOperand(3), DAG.getIntPtrConstant(1))); 4908 } 4909 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4910 SDValue Result = 4911 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 4912 cast<MemSDNode>(Node)->getMemOperand()); 4913 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 4914 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 4915 Results.push_back(Result.getValue(2)); 4916} 4917 4918SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4919 switch (Op.getOpcode()) { 4920 default: llvm_unreachable("Don't know how to custom lower this!"); 4921 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4922 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4923 case ISD::GlobalAddress: 4924 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4925 LowerGlobalAddressELF(Op, DAG); 4926 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4927 case ISD::SELECT: return LowerSELECT(Op, DAG); 4928 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4929 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4930 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4931 case ISD::VASTART: return LowerVASTART(Op, DAG); 4932 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4933 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 4934 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4935 case ISD::SINT_TO_FP: 4936 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4937 case ISD::FP_TO_SINT: 4938 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4939 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4940 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4941 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4942 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4943 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4944 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4945 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4946 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4947 Subtarget); 4948 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4949 case ISD::SHL: 4950 case ISD::SRL: 4951 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4952 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4953 case ISD::SRL_PARTS: 4954 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4955 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4956 case ISD::SETCC: return LowerVSETCC(Op, DAG); 4957 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4958 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4959 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4960 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4961 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4962 case ISD::MUL: return LowerMUL(Op, DAG); 4963 case ISD::SDIV: return LowerSDIV(Op, DAG); 4964 case ISD::UDIV: return LowerUDIV(Op, DAG); 4965 case ISD::ADDC: 4966 case ISD::ADDE: 4967 case ISD::SUBC: 4968 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 4969 case ISD::ATOMIC_LOAD: 4970 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 4971 } 4972 return SDValue(); 4973} 4974 4975/// ReplaceNodeResults - Replace the results of node with an illegal result 4976/// type with new values built out of custom code. 4977void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4978 SmallVectorImpl<SDValue>&Results, 4979 SelectionDAG &DAG) const { 4980 SDValue Res; 4981 switch (N->getOpcode()) { 4982 default: 4983 llvm_unreachable("Don't know how to custom expand this!"); 4984 break; 4985 case ISD::BITCAST: 4986 Res = ExpandBITCAST(N, DAG); 4987 break; 4988 case ISD::SRL: 4989 case ISD::SRA: 4990 Res = Expand64BitShift(N, DAG, Subtarget); 4991 break; 4992 case ISD::ATOMIC_LOAD_ADD: 4993 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 4994 return; 4995 case ISD::ATOMIC_LOAD_AND: 4996 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 4997 return; 4998 case ISD::ATOMIC_LOAD_NAND: 4999 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 5000 return; 5001 case ISD::ATOMIC_LOAD_OR: 5002 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 5003 return; 5004 case ISD::ATOMIC_LOAD_SUB: 5005 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 5006 return; 5007 case ISD::ATOMIC_LOAD_XOR: 5008 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5009 return; 5010 case ISD::ATOMIC_SWAP: 5011 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5012 return; 5013 case ISD::ATOMIC_CMP_SWAP: 5014 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5015 return; 5016 } 5017 if (Res.getNode()) 5018 Results.push_back(Res); 5019} 5020 5021//===----------------------------------------------------------------------===// 5022// ARM Scheduler Hooks 5023//===----------------------------------------------------------------------===// 5024 5025MachineBasicBlock * 5026ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5027 MachineBasicBlock *BB, 5028 unsigned Size) const { 5029 unsigned dest = MI->getOperand(0).getReg(); 5030 unsigned ptr = MI->getOperand(1).getReg(); 5031 unsigned oldval = MI->getOperand(2).getReg(); 5032 unsigned newval = MI->getOperand(3).getReg(); 5033 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5034 DebugLoc dl = MI->getDebugLoc(); 5035 bool isThumb2 = Subtarget->isThumb2(); 5036 5037 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5038 unsigned scratch = 5039 MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass 5040 : ARM::GPRRegisterClass); 5041 5042 if (isThumb2) { 5043 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5044 MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass); 5045 MRI.constrainRegClass(newval, ARM::rGPRRegisterClass); 5046 } 5047 5048 unsigned ldrOpc, strOpc; 5049 switch (Size) { 5050 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5051 case 1: 5052 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5053 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5054 break; 5055 case 2: 5056 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5057 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5058 break; 5059 case 4: 5060 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5061 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5062 break; 5063 } 5064 5065 MachineFunction *MF = BB->getParent(); 5066 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5067 MachineFunction::iterator It = BB; 5068 ++It; // insert the new blocks after the current block 5069 5070 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5071 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5072 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5073 MF->insert(It, loop1MBB); 5074 MF->insert(It, loop2MBB); 5075 MF->insert(It, exitMBB); 5076 5077 // Transfer the remainder of BB and its successor edges to exitMBB. 5078 exitMBB->splice(exitMBB->begin(), BB, 5079 llvm::next(MachineBasicBlock::iterator(MI)), 5080 BB->end()); 5081 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5082 5083 // thisMBB: 5084 // ... 5085 // fallthrough --> loop1MBB 5086 BB->addSuccessor(loop1MBB); 5087 5088 // loop1MBB: 5089 // ldrex dest, [ptr] 5090 // cmp dest, oldval 5091 // bne exitMBB 5092 BB = loop1MBB; 5093 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5094 if (ldrOpc == ARM::t2LDREX) 5095 MIB.addImm(0); 5096 AddDefaultPred(MIB); 5097 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5098 .addReg(dest).addReg(oldval)); 5099 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5100 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5101 BB->addSuccessor(loop2MBB); 5102 BB->addSuccessor(exitMBB); 5103 5104 // loop2MBB: 5105 // strex scratch, newval, [ptr] 5106 // cmp scratch, #0 5107 // bne loop1MBB 5108 BB = loop2MBB; 5109 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5110 if (strOpc == ARM::t2STREX) 5111 MIB.addImm(0); 5112 AddDefaultPred(MIB); 5113 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5114 .addReg(scratch).addImm(0)); 5115 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5116 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5117 BB->addSuccessor(loop1MBB); 5118 BB->addSuccessor(exitMBB); 5119 5120 // exitMBB: 5121 // ... 5122 BB = exitMBB; 5123 5124 MI->eraseFromParent(); // The instruction is gone now. 5125 5126 return BB; 5127} 5128 5129MachineBasicBlock * 5130ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5131 unsigned Size, unsigned BinOpcode) const { 5132 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5133 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5134 5135 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5136 MachineFunction *MF = BB->getParent(); 5137 MachineFunction::iterator It = BB; 5138 ++It; 5139 5140 unsigned dest = MI->getOperand(0).getReg(); 5141 unsigned ptr = MI->getOperand(1).getReg(); 5142 unsigned incr = MI->getOperand(2).getReg(); 5143 DebugLoc dl = MI->getDebugLoc(); 5144 bool isThumb2 = Subtarget->isThumb2(); 5145 5146 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5147 if (isThumb2) { 5148 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5149 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5150 } 5151 5152 unsigned ldrOpc, strOpc; 5153 switch (Size) { 5154 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5155 case 1: 5156 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5157 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5158 break; 5159 case 2: 5160 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5161 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5162 break; 5163 case 4: 5164 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5165 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5166 break; 5167 } 5168 5169 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5170 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5171 MF->insert(It, loopMBB); 5172 MF->insert(It, exitMBB); 5173 5174 // Transfer the remainder of BB and its successor edges to exitMBB. 5175 exitMBB->splice(exitMBB->begin(), BB, 5176 llvm::next(MachineBasicBlock::iterator(MI)), 5177 BB->end()); 5178 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5179 5180 TargetRegisterClass *TRC = 5181 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5182 unsigned scratch = MRI.createVirtualRegister(TRC); 5183 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5184 5185 // thisMBB: 5186 // ... 5187 // fallthrough --> loopMBB 5188 BB->addSuccessor(loopMBB); 5189 5190 // loopMBB: 5191 // ldrex dest, ptr 5192 // <binop> scratch2, dest, incr 5193 // strex scratch, scratch2, ptr 5194 // cmp scratch, #0 5195 // bne- loopMBB 5196 // fallthrough --> exitMBB 5197 BB = loopMBB; 5198 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5199 if (ldrOpc == ARM::t2LDREX) 5200 MIB.addImm(0); 5201 AddDefaultPred(MIB); 5202 if (BinOpcode) { 5203 // operand order needs to go the other way for NAND 5204 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5205 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5206 addReg(incr).addReg(dest)).addReg(0); 5207 else 5208 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5209 addReg(dest).addReg(incr)).addReg(0); 5210 } 5211 5212 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5213 if (strOpc == ARM::t2STREX) 5214 MIB.addImm(0); 5215 AddDefaultPred(MIB); 5216 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5217 .addReg(scratch).addImm(0)); 5218 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5219 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5220 5221 BB->addSuccessor(loopMBB); 5222 BB->addSuccessor(exitMBB); 5223 5224 // exitMBB: 5225 // ... 5226 BB = exitMBB; 5227 5228 MI->eraseFromParent(); // The instruction is gone now. 5229 5230 return BB; 5231} 5232 5233MachineBasicBlock * 5234ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5235 MachineBasicBlock *BB, 5236 unsigned Size, 5237 bool signExtend, 5238 ARMCC::CondCodes Cond) const { 5239 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5240 5241 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5242 MachineFunction *MF = BB->getParent(); 5243 MachineFunction::iterator It = BB; 5244 ++It; 5245 5246 unsigned dest = MI->getOperand(0).getReg(); 5247 unsigned ptr = MI->getOperand(1).getReg(); 5248 unsigned incr = MI->getOperand(2).getReg(); 5249 unsigned oldval = dest; 5250 DebugLoc dl = MI->getDebugLoc(); 5251 bool isThumb2 = Subtarget->isThumb2(); 5252 5253 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5254 if (isThumb2) { 5255 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5256 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5257 } 5258 5259 unsigned ldrOpc, strOpc, extendOpc; 5260 switch (Size) { 5261 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5262 case 1: 5263 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5264 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5265 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 5266 break; 5267 case 2: 5268 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5269 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5270 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 5271 break; 5272 case 4: 5273 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5274 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5275 extendOpc = 0; 5276 break; 5277 } 5278 5279 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5280 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5281 MF->insert(It, loopMBB); 5282 MF->insert(It, exitMBB); 5283 5284 // Transfer the remainder of BB and its successor edges to exitMBB. 5285 exitMBB->splice(exitMBB->begin(), BB, 5286 llvm::next(MachineBasicBlock::iterator(MI)), 5287 BB->end()); 5288 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5289 5290 TargetRegisterClass *TRC = 5291 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5292 unsigned scratch = MRI.createVirtualRegister(TRC); 5293 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5294 5295 // thisMBB: 5296 // ... 5297 // fallthrough --> loopMBB 5298 BB->addSuccessor(loopMBB); 5299 5300 // loopMBB: 5301 // ldrex dest, ptr 5302 // (sign extend dest, if required) 5303 // cmp dest, incr 5304 // cmov.cond scratch2, dest, incr 5305 // strex scratch, scratch2, ptr 5306 // cmp scratch, #0 5307 // bne- loopMBB 5308 // fallthrough --> exitMBB 5309 BB = loopMBB; 5310 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5311 if (ldrOpc == ARM::t2LDREX) 5312 MIB.addImm(0); 5313 AddDefaultPred(MIB); 5314 5315 // Sign extend the value, if necessary. 5316 if (signExtend && extendOpc) { 5317 oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass); 5318 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 5319 .addReg(dest) 5320 .addImm(0)); 5321 } 5322 5323 // Build compare and cmov instructions. 5324 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5325 .addReg(oldval).addReg(incr)); 5326 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5327 .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); 5328 5329 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5330 if (strOpc == ARM::t2STREX) 5331 MIB.addImm(0); 5332 AddDefaultPred(MIB); 5333 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5334 .addReg(scratch).addImm(0)); 5335 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5336 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5337 5338 BB->addSuccessor(loopMBB); 5339 BB->addSuccessor(exitMBB); 5340 5341 // exitMBB: 5342 // ... 5343 BB = exitMBB; 5344 5345 MI->eraseFromParent(); // The instruction is gone now. 5346 5347 return BB; 5348} 5349 5350MachineBasicBlock * 5351ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 5352 unsigned Op1, unsigned Op2, 5353 bool NeedsCarry, bool IsCmpxchg) const { 5354 // This also handles ATOMIC_SWAP, indicated by Op1==0. 5355 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5356 5357 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5358 MachineFunction *MF = BB->getParent(); 5359 MachineFunction::iterator It = BB; 5360 ++It; 5361 5362 unsigned destlo = MI->getOperand(0).getReg(); 5363 unsigned desthi = MI->getOperand(1).getReg(); 5364 unsigned ptr = MI->getOperand(2).getReg(); 5365 unsigned vallo = MI->getOperand(3).getReg(); 5366 unsigned valhi = MI->getOperand(4).getReg(); 5367 DebugLoc dl = MI->getDebugLoc(); 5368 bool isThumb2 = Subtarget->isThumb2(); 5369 5370 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5371 if (isThumb2) { 5372 MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass); 5373 MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass); 5374 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5375 } 5376 5377 unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; 5378 unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD; 5379 5380 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5381 MachineBasicBlock *contBB = 0, *cont2BB = 0; 5382 if (IsCmpxchg) { 5383 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 5384 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 5385 } 5386 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5387 MF->insert(It, loopMBB); 5388 if (IsCmpxchg) { 5389 MF->insert(It, contBB); 5390 MF->insert(It, cont2BB); 5391 } 5392 MF->insert(It, exitMBB); 5393 5394 // Transfer the remainder of BB and its successor edges to exitMBB. 5395 exitMBB->splice(exitMBB->begin(), BB, 5396 llvm::next(MachineBasicBlock::iterator(MI)), 5397 BB->end()); 5398 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5399 5400 TargetRegisterClass *TRC = 5401 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5402 unsigned storesuccess = MRI.createVirtualRegister(TRC); 5403 5404 // thisMBB: 5405 // ... 5406 // fallthrough --> loopMBB 5407 BB->addSuccessor(loopMBB); 5408 5409 // loopMBB: 5410 // ldrexd r2, r3, ptr 5411 // <binopa> r0, r2, incr 5412 // <binopb> r1, r3, incr 5413 // strexd storesuccess, r0, r1, ptr 5414 // cmp storesuccess, #0 5415 // bne- loopMBB 5416 // fallthrough --> exitMBB 5417 // 5418 // Note that the registers are explicitly specified because there is not any 5419 // way to force the register allocator to allocate a register pair. 5420 // 5421 // FIXME: The hardcoded registers are not necessary for Thumb2, but we 5422 // need to properly enforce the restriction that the two output registers 5423 // for ldrexd must be different. 5424 BB = loopMBB; 5425 // Load 5426 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 5427 .addReg(ARM::R2, RegState::Define) 5428 .addReg(ARM::R3, RegState::Define).addReg(ptr)); 5429 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 5430 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2); 5431 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3); 5432 5433 if (IsCmpxchg) { 5434 // Add early exit 5435 for (unsigned i = 0; i < 2; i++) { 5436 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 5437 ARM::CMPrr)) 5438 .addReg(i == 0 ? destlo : desthi) 5439 .addReg(i == 0 ? vallo : valhi)); 5440 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5441 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5442 BB->addSuccessor(exitMBB); 5443 BB->addSuccessor(i == 0 ? contBB : cont2BB); 5444 BB = (i == 0 ? contBB : cont2BB); 5445 } 5446 5447 // Copy to physregs for strexd 5448 unsigned setlo = MI->getOperand(5).getReg(); 5449 unsigned sethi = MI->getOperand(6).getReg(); 5450 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo); 5451 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi); 5452 } else if (Op1) { 5453 // Perform binary operation 5454 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0) 5455 .addReg(destlo).addReg(vallo)) 5456 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 5457 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1) 5458 .addReg(desthi).addReg(valhi)).addReg(0); 5459 } else { 5460 // Copy to physregs for strexd 5461 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo); 5462 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi); 5463 } 5464 5465 // Store 5466 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 5467 .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr)); 5468 // Cmp+jump 5469 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5470 .addReg(storesuccess).addImm(0)); 5471 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5472 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5473 5474 BB->addSuccessor(loopMBB); 5475 BB->addSuccessor(exitMBB); 5476 5477 // exitMBB: 5478 // ... 5479 BB = exitMBB; 5480 5481 MI->eraseFromParent(); // The instruction is gone now. 5482 5483 return BB; 5484} 5485 5486/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 5487/// registers the function context. 5488void ARMTargetLowering:: 5489SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 5490 MachineBasicBlock *DispatchBB, int FI) const { 5491 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5492 DebugLoc dl = MI->getDebugLoc(); 5493 MachineFunction *MF = MBB->getParent(); 5494 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5495 MachineConstantPool *MCP = MF->getConstantPool(); 5496 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5497 const Function *F = MF->getFunction(); 5498 5499 bool isThumb = Subtarget->isThumb(); 5500 bool isThumb2 = Subtarget->isThumb2(); 5501 5502 unsigned PCLabelId = AFI->createPICLabelUId(); 5503 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 5504 ARMConstantPoolValue *CPV = 5505 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 5506 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 5507 5508 const TargetRegisterClass *TRC = 5509 isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5510 5511 // Grab constant pool and fixed stack memory operands. 5512 MachineMemOperand *CPMMO = 5513 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 5514 MachineMemOperand::MOLoad, 4, 4); 5515 5516 MachineMemOperand *FIMMOSt = 5517 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5518 MachineMemOperand::MOStore, 4, 4); 5519 5520 // Load the address of the dispatch MBB into the jump buffer. 5521 if (isThumb2) { 5522 // Incoming value: jbuf 5523 // ldr.n r5, LCPI1_1 5524 // orr r5, r5, #1 5525 // add r5, pc 5526 // str r5, [$jbuf, #+4] ; &jbuf[1] 5527 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5528 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 5529 .addConstantPoolIndex(CPI) 5530 .addMemOperand(CPMMO)); 5531 // Set the low bit because of thumb mode. 5532 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5533 AddDefaultCC( 5534 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 5535 .addReg(NewVReg1, RegState::Kill) 5536 .addImm(0x01))); 5537 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5538 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 5539 .addReg(NewVReg2, RegState::Kill) 5540 .addImm(PCLabelId); 5541 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 5542 .addReg(NewVReg3, RegState::Kill) 5543 .addFrameIndex(FI) 5544 .addImm(36) // &jbuf[1] :: pc 5545 .addMemOperand(FIMMOSt)); 5546 } else if (isThumb) { 5547 // Incoming value: jbuf 5548 // ldr.n r1, LCPI1_4 5549 // add r1, pc 5550 // mov r2, #1 5551 // orrs r1, r2 5552 // add r2, $jbuf, #+4 ; &jbuf[1] 5553 // str r1, [r2] 5554 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5555 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 5556 .addConstantPoolIndex(CPI) 5557 .addMemOperand(CPMMO)); 5558 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5559 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 5560 .addReg(NewVReg1, RegState::Kill) 5561 .addImm(PCLabelId); 5562 // Set the low bit because of thumb mode. 5563 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5564 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 5565 .addReg(ARM::CPSR, RegState::Define) 5566 .addImm(1)); 5567 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5568 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 5569 .addReg(ARM::CPSR, RegState::Define) 5570 .addReg(NewVReg2, RegState::Kill) 5571 .addReg(NewVReg3, RegState::Kill)); 5572 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5573 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 5574 .addFrameIndex(FI) 5575 .addImm(36)); // &jbuf[1] :: pc 5576 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 5577 .addReg(NewVReg4, RegState::Kill) 5578 .addReg(NewVReg5, RegState::Kill) 5579 .addImm(0) 5580 .addMemOperand(FIMMOSt)); 5581 } else { 5582 // Incoming value: jbuf 5583 // ldr r1, LCPI1_1 5584 // add r1, pc, r1 5585 // str r1, [$jbuf, #+4] ; &jbuf[1] 5586 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5587 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 5588 .addConstantPoolIndex(CPI) 5589 .addImm(0) 5590 .addMemOperand(CPMMO)); 5591 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5592 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 5593 .addReg(NewVReg1, RegState::Kill) 5594 .addImm(PCLabelId)); 5595 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 5596 .addReg(NewVReg2, RegState::Kill) 5597 .addFrameIndex(FI) 5598 .addImm(36) // &jbuf[1] :: pc 5599 .addMemOperand(FIMMOSt)); 5600 } 5601} 5602 5603MachineBasicBlock *ARMTargetLowering:: 5604EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 5605 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5606 DebugLoc dl = MI->getDebugLoc(); 5607 MachineFunction *MF = MBB->getParent(); 5608 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5609 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5610 MachineFrameInfo *MFI = MF->getFrameInfo(); 5611 int FI = MFI->getFunctionContextIndex(); 5612 5613 const TargetRegisterClass *TRC = 5614 Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5615 5616 // Get a mapping of the call site numbers to all of the landing pads they're 5617 // associated with. 5618 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 5619 unsigned MaxCSNum = 0; 5620 MachineModuleInfo &MMI = MF->getMMI(); 5621 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; ++BB) { 5622 if (!BB->isLandingPad()) continue; 5623 5624 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 5625 // pad. 5626 for (MachineBasicBlock::iterator 5627 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 5628 if (!II->isEHLabel()) continue; 5629 5630 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 5631 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 5632 5633 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 5634 for (SmallVectorImpl<unsigned>::iterator 5635 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 5636 CSI != CSE; ++CSI) { 5637 CallSiteNumToLPad[*CSI].push_back(BB); 5638 MaxCSNum = std::max(MaxCSNum, *CSI); 5639 } 5640 break; 5641 } 5642 } 5643 5644 // Get an ordered list of the machine basic blocks for the jump table. 5645 std::vector<MachineBasicBlock*> LPadList; 5646 LPadList.reserve(CallSiteNumToLPad.size()); 5647 for (unsigned I = 1; I <= MaxCSNum; ++I) { 5648 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 5649 for (SmallVectorImpl<MachineBasicBlock*>::iterator 5650 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) 5651 LPadList.push_back(*II); 5652 } 5653 5654 assert(!LPadList.empty() && 5655 "No landing pad destinations for the dispatch jump table!"); 5656 5657 // Create the jump table and associated information. 5658 MachineJumpTableInfo *JTI = 5659 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 5660 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 5661 unsigned UId = AFI->createJumpTableUId(); 5662 5663 // Create the MBBs for the dispatch code. 5664 5665 // Shove the dispatch's address into the return slot in the function context. 5666 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 5667 DispatchBB->setIsLandingPad(); 5668 MBB->addSuccessor(DispatchBB); 5669 5670 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 5671 BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); 5672 DispatchBB->addSuccessor(TrapBB); 5673 5674 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 5675 DispatchBB->addSuccessor(DispContBB); 5676 5677 // Insert and renumber MBBs. 5678 MachineBasicBlock *Last = &MF->back(); 5679 MF->insert(MF->end(), DispatchBB); 5680 MF->insert(MF->end(), DispContBB); 5681 MF->insert(MF->end(), TrapBB); 5682 MF->RenumberBlocks(Last); 5683 5684 // Insert code into the entry block that creates and registers the function 5685 // context. 5686 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 5687 5688 MachineMemOperand *FIMMOLd = 5689 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5690 MachineMemOperand::MOLoad | 5691 MachineMemOperand::MOVolatile, 4, 4); 5692 5693 if (Subtarget->isThumb2()) { 5694 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5695 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 5696 .addFrameIndex(FI) 5697 .addImm(4) 5698 .addMemOperand(FIMMOLd)); 5699 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 5700 .addReg(NewVReg1) 5701 .addImm(LPadList.size())); 5702 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 5703 .addMBB(TrapBB) 5704 .addImm(ARMCC::HI) 5705 .addReg(ARM::CPSR); 5706 5707 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5708 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg2) 5709 .addJumpTableIndex(MJTI) 5710 .addImm(UId)); 5711 5712 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5713 AddDefaultCC( 5714 AddDefaultPred( 5715 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg3) 5716 .addReg(NewVReg2, RegState::Kill) 5717 .addReg(NewVReg1) 5718 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 5719 5720 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 5721 .addReg(NewVReg3, RegState::Kill) 5722 .addReg(NewVReg1) 5723 .addJumpTableIndex(MJTI) 5724 .addImm(UId); 5725 } else if (Subtarget->isThumb()) { 5726 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5727 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 5728 .addFrameIndex(FI) 5729 .addImm(1) 5730 .addMemOperand(FIMMOLd)); 5731 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 5732 .addReg(NewVReg1) 5733 .addImm(LPadList.size())); 5734 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 5735 .addMBB(TrapBB) 5736 .addImm(ARMCC::HI) 5737 .addReg(ARM::CPSR); 5738 5739 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5740 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 5741 .addReg(ARM::CPSR, RegState::Define) 5742 .addReg(NewVReg1) 5743 .addImm(2)); 5744 5745 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5746 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 5747 .addJumpTableIndex(MJTI) 5748 .addImm(UId)); 5749 5750 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5751 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 5752 .addReg(ARM::CPSR, RegState::Define) 5753 .addReg(NewVReg2, RegState::Kill) 5754 .addReg(NewVReg3)); 5755 5756 MachineMemOperand *JTMMOLd = 5757 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 5758 MachineMemOperand::MOLoad, 4, 4); 5759 5760 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5761 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 5762 .addReg(NewVReg4, RegState::Kill) 5763 .addImm(0) 5764 .addMemOperand(JTMMOLd)); 5765 5766 unsigned NewVReg6 = MRI->createVirtualRegister(TRC); 5767 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 5768 .addReg(ARM::CPSR, RegState::Define) 5769 .addReg(NewVReg5, RegState::Kill) 5770 .addReg(NewVReg3)); 5771 5772 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 5773 .addReg(NewVReg6, RegState::Kill) 5774 .addJumpTableIndex(MJTI) 5775 .addImm(UId); 5776 } else { 5777 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5778 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 5779 .addFrameIndex(FI) 5780 .addImm(4) 5781 .addMemOperand(FIMMOLd)); 5782 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 5783 .addReg(NewVReg1) 5784 .addImm(LPadList.size())); 5785 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 5786 .addMBB(TrapBB) 5787 .addImm(ARMCC::HI) 5788 .addReg(ARM::CPSR); 5789 5790 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5791 AddDefaultCC( 5792 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg2) 5793 .addReg(NewVReg1) 5794 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 5795 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5796 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg3) 5797 .addJumpTableIndex(MJTI) 5798 .addImm(UId)); 5799 5800 MachineMemOperand *JTMMOLd = 5801 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 5802 MachineMemOperand::MOLoad, 4, 4); 5803 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5804 AddDefaultPred( 5805 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg4) 5806 .addReg(NewVReg2, RegState::Kill) 5807 .addReg(NewVReg3) 5808 .addImm(0) 5809 .addMemOperand(JTMMOLd)); 5810 5811 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 5812 .addReg(NewVReg4, RegState::Kill) 5813 .addReg(NewVReg3) 5814 .addJumpTableIndex(MJTI) 5815 .addImm(UId); 5816 } 5817 5818 // Add the jump table entries as successors to the MBB. 5819 for (std::vector<MachineBasicBlock*>::iterator 5820 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) 5821 DispContBB->addSuccessor(*I); 5822 5823 // The instruction is gone now. 5824 MI->eraseFromParent(); 5825 5826 return MBB; 5827} 5828 5829static 5830MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 5831 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 5832 E = MBB->succ_end(); I != E; ++I) 5833 if (*I != Succ) 5834 return *I; 5835 llvm_unreachable("Expecting a BB with two successors!"); 5836} 5837 5838MachineBasicBlock * 5839ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 5840 MachineBasicBlock *BB) const { 5841 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5842 DebugLoc dl = MI->getDebugLoc(); 5843 bool isThumb2 = Subtarget->isThumb2(); 5844 switch (MI->getOpcode()) { 5845 default: { 5846 MI->dump(); 5847 llvm_unreachable("Unexpected instr type to insert"); 5848 } 5849 // The Thumb2 pre-indexed stores have the same MI operands, they just 5850 // define them differently in the .td files from the isel patterns, so 5851 // they need pseudos. 5852 case ARM::t2STR_preidx: 5853 MI->setDesc(TII->get(ARM::t2STR_PRE)); 5854 return BB; 5855 case ARM::t2STRB_preidx: 5856 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 5857 return BB; 5858 case ARM::t2STRH_preidx: 5859 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 5860 return BB; 5861 5862 case ARM::STRi_preidx: 5863 case ARM::STRBi_preidx: { 5864 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 5865 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 5866 // Decode the offset. 5867 unsigned Offset = MI->getOperand(4).getImm(); 5868 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 5869 Offset = ARM_AM::getAM2Offset(Offset); 5870 if (isSub) 5871 Offset = -Offset; 5872 5873 MachineMemOperand *MMO = *MI->memoperands_begin(); 5874 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 5875 .addOperand(MI->getOperand(0)) // Rn_wb 5876 .addOperand(MI->getOperand(1)) // Rt 5877 .addOperand(MI->getOperand(2)) // Rn 5878 .addImm(Offset) // offset (skip GPR==zero_reg) 5879 .addOperand(MI->getOperand(5)) // pred 5880 .addOperand(MI->getOperand(6)) 5881 .addMemOperand(MMO); 5882 MI->eraseFromParent(); 5883 return BB; 5884 } 5885 case ARM::STRr_preidx: 5886 case ARM::STRBr_preidx: 5887 case ARM::STRH_preidx: { 5888 unsigned NewOpc; 5889 switch (MI->getOpcode()) { 5890 default: llvm_unreachable("unexpected opcode!"); 5891 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 5892 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 5893 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 5894 } 5895 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 5896 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 5897 MIB.addOperand(MI->getOperand(i)); 5898 MI->eraseFromParent(); 5899 return BB; 5900 } 5901 case ARM::ATOMIC_LOAD_ADD_I8: 5902 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5903 case ARM::ATOMIC_LOAD_ADD_I16: 5904 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5905 case ARM::ATOMIC_LOAD_ADD_I32: 5906 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 5907 5908 case ARM::ATOMIC_LOAD_AND_I8: 5909 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5910 case ARM::ATOMIC_LOAD_AND_I16: 5911 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5912 case ARM::ATOMIC_LOAD_AND_I32: 5913 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5914 5915 case ARM::ATOMIC_LOAD_OR_I8: 5916 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5917 case ARM::ATOMIC_LOAD_OR_I16: 5918 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5919 case ARM::ATOMIC_LOAD_OR_I32: 5920 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5921 5922 case ARM::ATOMIC_LOAD_XOR_I8: 5923 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5924 case ARM::ATOMIC_LOAD_XOR_I16: 5925 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5926 case ARM::ATOMIC_LOAD_XOR_I32: 5927 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5928 5929 case ARM::ATOMIC_LOAD_NAND_I8: 5930 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5931 case ARM::ATOMIC_LOAD_NAND_I16: 5932 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5933 case ARM::ATOMIC_LOAD_NAND_I32: 5934 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5935 5936 case ARM::ATOMIC_LOAD_SUB_I8: 5937 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5938 case ARM::ATOMIC_LOAD_SUB_I16: 5939 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5940 case ARM::ATOMIC_LOAD_SUB_I32: 5941 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5942 5943 case ARM::ATOMIC_LOAD_MIN_I8: 5944 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 5945 case ARM::ATOMIC_LOAD_MIN_I16: 5946 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 5947 case ARM::ATOMIC_LOAD_MIN_I32: 5948 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 5949 5950 case ARM::ATOMIC_LOAD_MAX_I8: 5951 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 5952 case ARM::ATOMIC_LOAD_MAX_I16: 5953 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 5954 case ARM::ATOMIC_LOAD_MAX_I32: 5955 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 5956 5957 case ARM::ATOMIC_LOAD_UMIN_I8: 5958 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 5959 case ARM::ATOMIC_LOAD_UMIN_I16: 5960 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 5961 case ARM::ATOMIC_LOAD_UMIN_I32: 5962 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 5963 5964 case ARM::ATOMIC_LOAD_UMAX_I8: 5965 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 5966 case ARM::ATOMIC_LOAD_UMAX_I16: 5967 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 5968 case ARM::ATOMIC_LOAD_UMAX_I32: 5969 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 5970 5971 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 5972 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 5973 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 5974 5975 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 5976 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 5977 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 5978 5979 5980 case ARM::ATOMADD6432: 5981 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 5982 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 5983 /*NeedsCarry*/ true); 5984 case ARM::ATOMSUB6432: 5985 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 5986 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 5987 /*NeedsCarry*/ true); 5988 case ARM::ATOMOR6432: 5989 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 5990 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 5991 case ARM::ATOMXOR6432: 5992 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 5993 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5994 case ARM::ATOMAND6432: 5995 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 5996 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 5997 case ARM::ATOMSWAP6432: 5998 return EmitAtomicBinary64(MI, BB, 0, 0, false); 5999 case ARM::ATOMCMPXCHG6432: 6000 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6001 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6002 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 6003 6004 case ARM::tMOVCCr_pseudo: { 6005 // To "insert" a SELECT_CC instruction, we actually have to insert the 6006 // diamond control-flow pattern. The incoming instruction knows the 6007 // destination vreg to set, the condition code register to branch on, the 6008 // true/false values to select between, and a branch opcode to use. 6009 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6010 MachineFunction::iterator It = BB; 6011 ++It; 6012 6013 // thisMBB: 6014 // ... 6015 // TrueVal = ... 6016 // cmpTY ccX, r1, r2 6017 // bCC copy1MBB 6018 // fallthrough --> copy0MBB 6019 MachineBasicBlock *thisMBB = BB; 6020 MachineFunction *F = BB->getParent(); 6021 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6022 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6023 F->insert(It, copy0MBB); 6024 F->insert(It, sinkMBB); 6025 6026 // Transfer the remainder of BB and its successor edges to sinkMBB. 6027 sinkMBB->splice(sinkMBB->begin(), BB, 6028 llvm::next(MachineBasicBlock::iterator(MI)), 6029 BB->end()); 6030 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6031 6032 BB->addSuccessor(copy0MBB); 6033 BB->addSuccessor(sinkMBB); 6034 6035 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 6036 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 6037 6038 // copy0MBB: 6039 // %FalseValue = ... 6040 // # fallthrough to sinkMBB 6041 BB = copy0MBB; 6042 6043 // Update machine-CFG edges 6044 BB->addSuccessor(sinkMBB); 6045 6046 // sinkMBB: 6047 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6048 // ... 6049 BB = sinkMBB; 6050 BuildMI(*BB, BB->begin(), dl, 6051 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 6052 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 6053 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6054 6055 MI->eraseFromParent(); // The pseudo instruction is gone now. 6056 return BB; 6057 } 6058 6059 case ARM::BCCi64: 6060 case ARM::BCCZi64: { 6061 // If there is an unconditional branch to the other successor, remove it. 6062 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 6063 6064 // Compare both parts that make up the double comparison separately for 6065 // equality. 6066 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 6067 6068 unsigned LHS1 = MI->getOperand(1).getReg(); 6069 unsigned LHS2 = MI->getOperand(2).getReg(); 6070 if (RHSisZero) { 6071 AddDefaultPred(BuildMI(BB, dl, 6072 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6073 .addReg(LHS1).addImm(0)); 6074 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6075 .addReg(LHS2).addImm(0) 6076 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6077 } else { 6078 unsigned RHS1 = MI->getOperand(3).getReg(); 6079 unsigned RHS2 = MI->getOperand(4).getReg(); 6080 AddDefaultPred(BuildMI(BB, dl, 6081 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6082 .addReg(LHS1).addReg(RHS1)); 6083 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6084 .addReg(LHS2).addReg(RHS2) 6085 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6086 } 6087 6088 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 6089 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 6090 if (MI->getOperand(0).getImm() == ARMCC::NE) 6091 std::swap(destMBB, exitMBB); 6092 6093 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6094 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 6095 if (isThumb2) 6096 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 6097 else 6098 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 6099 6100 MI->eraseFromParent(); // The pseudo instruction is gone now. 6101 return BB; 6102 } 6103 } 6104} 6105 6106void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 6107 SDNode *Node) const { 6108 const MCInstrDesc &MCID = MI->getDesc(); 6109 if (!MCID.hasPostISelHook()) { 6110 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 6111 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 6112 return; 6113 } 6114 6115 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 6116 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 6117 // operand is still set to noreg. If needed, set the optional operand's 6118 // register to CPSR, and remove the redundant implicit def. 6119 // 6120 // e.g. ADCS (...opt:%noreg, CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 6121 6122 // Rename pseudo opcodes. 6123 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 6124 if (NewOpc) { 6125 const ARMBaseInstrInfo *TII = 6126 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 6127 MI->setDesc(TII->get(NewOpc)); 6128 } 6129 unsigned ccOutIdx = MCID.getNumOperands() - 1; 6130 6131 // Any ARM instruction that sets the 's' bit should specify an optional 6132 // "cc_out" operand in the last operand position. 6133 if (!MCID.hasOptionalDef() || !MCID.OpInfo[ccOutIdx].isOptionalDef()) { 6134 assert(!NewOpc && "Optional cc_out operand required"); 6135 return; 6136 } 6137 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 6138 // since we already have an optional CPSR def. 6139 bool definesCPSR = false; 6140 bool deadCPSR = false; 6141 for (unsigned i = MCID.getNumOperands(), e = MI->getNumOperands(); 6142 i != e; ++i) { 6143 const MachineOperand &MO = MI->getOperand(i); 6144 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 6145 definesCPSR = true; 6146 if (MO.isDead()) 6147 deadCPSR = true; 6148 MI->RemoveOperand(i); 6149 break; 6150 } 6151 } 6152 if (!definesCPSR) { 6153 assert(!NewOpc && "Optional cc_out operand required"); 6154 return; 6155 } 6156 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 6157 if (deadCPSR) { 6158 assert(!MI->getOperand(ccOutIdx).getReg() && 6159 "expect uninitialized optional cc_out operand"); 6160 return; 6161 } 6162 6163 // If this instruction was defined with an optional CPSR def and its dag node 6164 // had a live implicit CPSR def, then activate the optional CPSR def. 6165 MachineOperand &MO = MI->getOperand(ccOutIdx); 6166 MO.setReg(ARM::CPSR); 6167 MO.setIsDef(true); 6168} 6169 6170//===----------------------------------------------------------------------===// 6171// ARM Optimization Hooks 6172//===----------------------------------------------------------------------===// 6173 6174static 6175SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 6176 TargetLowering::DAGCombinerInfo &DCI) { 6177 SelectionDAG &DAG = DCI.DAG; 6178 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6179 EVT VT = N->getValueType(0); 6180 unsigned Opc = N->getOpcode(); 6181 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 6182 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 6183 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 6184 ISD::CondCode CC = ISD::SETCC_INVALID; 6185 6186 if (isSlctCC) { 6187 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 6188 } else { 6189 SDValue CCOp = Slct.getOperand(0); 6190 if (CCOp.getOpcode() == ISD::SETCC) 6191 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 6192 } 6193 6194 bool DoXform = false; 6195 bool InvCC = false; 6196 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 6197 "Bad input!"); 6198 6199 if (LHS.getOpcode() == ISD::Constant && 6200 cast<ConstantSDNode>(LHS)->isNullValue()) { 6201 DoXform = true; 6202 } else if (CC != ISD::SETCC_INVALID && 6203 RHS.getOpcode() == ISD::Constant && 6204 cast<ConstantSDNode>(RHS)->isNullValue()) { 6205 std::swap(LHS, RHS); 6206 SDValue Op0 = Slct.getOperand(0); 6207 EVT OpVT = isSlctCC ? Op0.getValueType() : 6208 Op0.getOperand(0).getValueType(); 6209 bool isInt = OpVT.isInteger(); 6210 CC = ISD::getSetCCInverse(CC, isInt); 6211 6212 if (!TLI.isCondCodeLegal(CC, OpVT)) 6213 return SDValue(); // Inverse operator isn't legal. 6214 6215 DoXform = true; 6216 InvCC = true; 6217 } 6218 6219 if (DoXform) { 6220 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 6221 if (isSlctCC) 6222 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 6223 Slct.getOperand(0), Slct.getOperand(1), CC); 6224 SDValue CCOp = Slct.getOperand(0); 6225 if (InvCC) 6226 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 6227 CCOp.getOperand(0), CCOp.getOperand(1), CC); 6228 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 6229 CCOp, OtherOp, Result); 6230 } 6231 return SDValue(); 6232} 6233 6234// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 6235// (only after legalization). 6236static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 6237 TargetLowering::DAGCombinerInfo &DCI, 6238 const ARMSubtarget *Subtarget) { 6239 6240 // Only perform optimization if after legalize, and if NEON is available. We 6241 // also expected both operands to be BUILD_VECTORs. 6242 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 6243 || N0.getOpcode() != ISD::BUILD_VECTOR 6244 || N1.getOpcode() != ISD::BUILD_VECTOR) 6245 return SDValue(); 6246 6247 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 6248 EVT VT = N->getValueType(0); 6249 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 6250 return SDValue(); 6251 6252 // Check that the vector operands are of the right form. 6253 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 6254 // operands, where N is the size of the formed vector. 6255 // Each EXTRACT_VECTOR should have the same input vector and odd or even 6256 // index such that we have a pair wise add pattern. 6257 6258 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 6259 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6260 return SDValue(); 6261 SDValue Vec = N0->getOperand(0)->getOperand(0); 6262 SDNode *V = Vec.getNode(); 6263 unsigned nextIndex = 0; 6264 6265 // For each operands to the ADD which are BUILD_VECTORs, 6266 // check to see if each of their operands are an EXTRACT_VECTOR with 6267 // the same vector and appropriate index. 6268 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 6269 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 6270 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 6271 6272 SDValue ExtVec0 = N0->getOperand(i); 6273 SDValue ExtVec1 = N1->getOperand(i); 6274 6275 // First operand is the vector, verify its the same. 6276 if (V != ExtVec0->getOperand(0).getNode() || 6277 V != ExtVec1->getOperand(0).getNode()) 6278 return SDValue(); 6279 6280 // Second is the constant, verify its correct. 6281 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 6282 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 6283 6284 // For the constant, we want to see all the even or all the odd. 6285 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 6286 || C1->getZExtValue() != nextIndex+1) 6287 return SDValue(); 6288 6289 // Increment index. 6290 nextIndex+=2; 6291 } else 6292 return SDValue(); 6293 } 6294 6295 // Create VPADDL node. 6296 SelectionDAG &DAG = DCI.DAG; 6297 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6298 6299 // Build operand list. 6300 SmallVector<SDValue, 8> Ops; 6301 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 6302 TLI.getPointerTy())); 6303 6304 // Input is the vector. 6305 Ops.push_back(Vec); 6306 6307 // Get widened type and narrowed type. 6308 MVT widenType; 6309 unsigned numElem = VT.getVectorNumElements(); 6310 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 6311 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 6312 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 6313 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 6314 default: 6315 assert(0 && "Invalid vector element type for padd optimization."); 6316 } 6317 6318 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 6319 widenType, &Ops[0], Ops.size()); 6320 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 6321} 6322 6323/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 6324/// operands N0 and N1. This is a helper for PerformADDCombine that is 6325/// called with the default operands, and if that fails, with commuted 6326/// operands. 6327static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 6328 TargetLowering::DAGCombinerInfo &DCI, 6329 const ARMSubtarget *Subtarget){ 6330 6331 // Attempt to create vpaddl for this add. 6332 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 6333 if (Result.getNode()) 6334 return Result; 6335 6336 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 6337 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 6338 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 6339 if (Result.getNode()) return Result; 6340 } 6341 return SDValue(); 6342} 6343 6344/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 6345/// 6346static SDValue PerformADDCombine(SDNode *N, 6347 TargetLowering::DAGCombinerInfo &DCI, 6348 const ARMSubtarget *Subtarget) { 6349 SDValue N0 = N->getOperand(0); 6350 SDValue N1 = N->getOperand(1); 6351 6352 // First try with the default operand order. 6353 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 6354 if (Result.getNode()) 6355 return Result; 6356 6357 // If that didn't work, try again with the operands commuted. 6358 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 6359} 6360 6361/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 6362/// 6363static SDValue PerformSUBCombine(SDNode *N, 6364 TargetLowering::DAGCombinerInfo &DCI) { 6365 SDValue N0 = N->getOperand(0); 6366 SDValue N1 = N->getOperand(1); 6367 6368 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 6369 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 6370 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 6371 if (Result.getNode()) return Result; 6372 } 6373 6374 return SDValue(); 6375} 6376 6377/// PerformVMULCombine 6378/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 6379/// special multiplier accumulator forwarding. 6380/// vmul d3, d0, d2 6381/// vmla d3, d1, d2 6382/// is faster than 6383/// vadd d3, d0, d1 6384/// vmul d3, d3, d2 6385static SDValue PerformVMULCombine(SDNode *N, 6386 TargetLowering::DAGCombinerInfo &DCI, 6387 const ARMSubtarget *Subtarget) { 6388 if (!Subtarget->hasVMLxForwarding()) 6389 return SDValue(); 6390 6391 SelectionDAG &DAG = DCI.DAG; 6392 SDValue N0 = N->getOperand(0); 6393 SDValue N1 = N->getOperand(1); 6394 unsigned Opcode = N0.getOpcode(); 6395 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6396 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 6397 Opcode = N1.getOpcode(); 6398 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6399 Opcode != ISD::FADD && Opcode != ISD::FSUB) 6400 return SDValue(); 6401 std::swap(N0, N1); 6402 } 6403 6404 EVT VT = N->getValueType(0); 6405 DebugLoc DL = N->getDebugLoc(); 6406 SDValue N00 = N0->getOperand(0); 6407 SDValue N01 = N0->getOperand(1); 6408 return DAG.getNode(Opcode, DL, VT, 6409 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 6410 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 6411} 6412 6413static SDValue PerformMULCombine(SDNode *N, 6414 TargetLowering::DAGCombinerInfo &DCI, 6415 const ARMSubtarget *Subtarget) { 6416 SelectionDAG &DAG = DCI.DAG; 6417 6418 if (Subtarget->isThumb1Only()) 6419 return SDValue(); 6420 6421 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6422 return SDValue(); 6423 6424 EVT VT = N->getValueType(0); 6425 if (VT.is64BitVector() || VT.is128BitVector()) 6426 return PerformVMULCombine(N, DCI, Subtarget); 6427 if (VT != MVT::i32) 6428 return SDValue(); 6429 6430 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6431 if (!C) 6432 return SDValue(); 6433 6434 uint64_t MulAmt = C->getZExtValue(); 6435 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 6436 ShiftAmt = ShiftAmt & (32 - 1); 6437 SDValue V = N->getOperand(0); 6438 DebugLoc DL = N->getDebugLoc(); 6439 6440 SDValue Res; 6441 MulAmt >>= ShiftAmt; 6442 if (isPowerOf2_32(MulAmt - 1)) { 6443 // (mul x, 2^N + 1) => (add (shl x, N), x) 6444 Res = DAG.getNode(ISD::ADD, DL, VT, 6445 V, DAG.getNode(ISD::SHL, DL, VT, 6446 V, DAG.getConstant(Log2_32(MulAmt-1), 6447 MVT::i32))); 6448 } else if (isPowerOf2_32(MulAmt + 1)) { 6449 // (mul x, 2^N - 1) => (sub (shl x, N), x) 6450 Res = DAG.getNode(ISD::SUB, DL, VT, 6451 DAG.getNode(ISD::SHL, DL, VT, 6452 V, DAG.getConstant(Log2_32(MulAmt+1), 6453 MVT::i32)), 6454 V); 6455 } else 6456 return SDValue(); 6457 6458 if (ShiftAmt != 0) 6459 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 6460 DAG.getConstant(ShiftAmt, MVT::i32)); 6461 6462 // Do not add new nodes to DAG combiner worklist. 6463 DCI.CombineTo(N, Res, false); 6464 return SDValue(); 6465} 6466 6467static SDValue PerformANDCombine(SDNode *N, 6468 TargetLowering::DAGCombinerInfo &DCI) { 6469 6470 // Attempt to use immediate-form VBIC 6471 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6472 DebugLoc dl = N->getDebugLoc(); 6473 EVT VT = N->getValueType(0); 6474 SelectionDAG &DAG = DCI.DAG; 6475 6476 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6477 return SDValue(); 6478 6479 APInt SplatBits, SplatUndef; 6480 unsigned SplatBitSize; 6481 bool HasAnyUndefs; 6482 if (BVN && 6483 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6484 if (SplatBitSize <= 64) { 6485 EVT VbicVT; 6486 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 6487 SplatUndef.getZExtValue(), SplatBitSize, 6488 DAG, VbicVT, VT.is128BitVector(), 6489 OtherModImm); 6490 if (Val.getNode()) { 6491 SDValue Input = 6492 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 6493 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 6494 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 6495 } 6496 } 6497 } 6498 6499 return SDValue(); 6500} 6501 6502/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 6503static SDValue PerformORCombine(SDNode *N, 6504 TargetLowering::DAGCombinerInfo &DCI, 6505 const ARMSubtarget *Subtarget) { 6506 // Attempt to use immediate-form VORR 6507 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6508 DebugLoc dl = N->getDebugLoc(); 6509 EVT VT = N->getValueType(0); 6510 SelectionDAG &DAG = DCI.DAG; 6511 6512 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6513 return SDValue(); 6514 6515 APInt SplatBits, SplatUndef; 6516 unsigned SplatBitSize; 6517 bool HasAnyUndefs; 6518 if (BVN && Subtarget->hasNEON() && 6519 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6520 if (SplatBitSize <= 64) { 6521 EVT VorrVT; 6522 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 6523 SplatUndef.getZExtValue(), SplatBitSize, 6524 DAG, VorrVT, VT.is128BitVector(), 6525 OtherModImm); 6526 if (Val.getNode()) { 6527 SDValue Input = 6528 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 6529 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 6530 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 6531 } 6532 } 6533 } 6534 6535 SDValue N0 = N->getOperand(0); 6536 if (N0.getOpcode() != ISD::AND) 6537 return SDValue(); 6538 SDValue N1 = N->getOperand(1); 6539 6540 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 6541 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 6542 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 6543 APInt SplatUndef; 6544 unsigned SplatBitSize; 6545 bool HasAnyUndefs; 6546 6547 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 6548 APInt SplatBits0; 6549 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 6550 HasAnyUndefs) && !HasAnyUndefs) { 6551 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 6552 APInt SplatBits1; 6553 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 6554 HasAnyUndefs) && !HasAnyUndefs && 6555 SplatBits0 == ~SplatBits1) { 6556 // Canonicalize the vector type to make instruction selection simpler. 6557 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 6558 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 6559 N0->getOperand(1), N0->getOperand(0), 6560 N1->getOperand(0)); 6561 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 6562 } 6563 } 6564 } 6565 6566 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 6567 // reasonable. 6568 6569 // BFI is only available on V6T2+ 6570 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 6571 return SDValue(); 6572 6573 DebugLoc DL = N->getDebugLoc(); 6574 // 1) or (and A, mask), val => ARMbfi A, val, mask 6575 // iff (val & mask) == val 6576 // 6577 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6578 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 6579 // && mask == ~mask2 6580 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 6581 // && ~mask == mask2 6582 // (i.e., copy a bitfield value into another bitfield of the same width) 6583 6584 if (VT != MVT::i32) 6585 return SDValue(); 6586 6587 SDValue N00 = N0.getOperand(0); 6588 6589 // The value and the mask need to be constants so we can verify this is 6590 // actually a bitfield set. If the mask is 0xffff, we can do better 6591 // via a movt instruction, so don't use BFI in that case. 6592 SDValue MaskOp = N0.getOperand(1); 6593 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 6594 if (!MaskC) 6595 return SDValue(); 6596 unsigned Mask = MaskC->getZExtValue(); 6597 if (Mask == 0xffff) 6598 return SDValue(); 6599 SDValue Res; 6600 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 6601 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 6602 if (N1C) { 6603 unsigned Val = N1C->getZExtValue(); 6604 if ((Val & ~Mask) != Val) 6605 return SDValue(); 6606 6607 if (ARM::isBitFieldInvertedMask(Mask)) { 6608 Val >>= CountTrailingZeros_32(~Mask); 6609 6610 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 6611 DAG.getConstant(Val, MVT::i32), 6612 DAG.getConstant(Mask, MVT::i32)); 6613 6614 // Do not add new nodes to DAG combiner worklist. 6615 DCI.CombineTo(N, Res, false); 6616 return SDValue(); 6617 } 6618 } else if (N1.getOpcode() == ISD::AND) { 6619 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6620 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 6621 if (!N11C) 6622 return SDValue(); 6623 unsigned Mask2 = N11C->getZExtValue(); 6624 6625 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 6626 // as is to match. 6627 if (ARM::isBitFieldInvertedMask(Mask) && 6628 (Mask == ~Mask2)) { 6629 // The pack halfword instruction works better for masks that fit it, 6630 // so use that when it's available. 6631 if (Subtarget->hasT2ExtractPack() && 6632 (Mask == 0xffff || Mask == 0xffff0000)) 6633 return SDValue(); 6634 // 2a 6635 unsigned amt = CountTrailingZeros_32(Mask2); 6636 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 6637 DAG.getConstant(amt, MVT::i32)); 6638 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 6639 DAG.getConstant(Mask, MVT::i32)); 6640 // Do not add new nodes to DAG combiner worklist. 6641 DCI.CombineTo(N, Res, false); 6642 return SDValue(); 6643 } else if (ARM::isBitFieldInvertedMask(~Mask) && 6644 (~Mask == Mask2)) { 6645 // The pack halfword instruction works better for masks that fit it, 6646 // so use that when it's available. 6647 if (Subtarget->hasT2ExtractPack() && 6648 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 6649 return SDValue(); 6650 // 2b 6651 unsigned lsb = CountTrailingZeros_32(Mask); 6652 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 6653 DAG.getConstant(lsb, MVT::i32)); 6654 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 6655 DAG.getConstant(Mask2, MVT::i32)); 6656 // Do not add new nodes to DAG combiner worklist. 6657 DCI.CombineTo(N, Res, false); 6658 return SDValue(); 6659 } 6660 } 6661 6662 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 6663 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 6664 ARM::isBitFieldInvertedMask(~Mask)) { 6665 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 6666 // where lsb(mask) == #shamt and masked bits of B are known zero. 6667 SDValue ShAmt = N00.getOperand(1); 6668 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 6669 unsigned LSB = CountTrailingZeros_32(Mask); 6670 if (ShAmtC != LSB) 6671 return SDValue(); 6672 6673 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 6674 DAG.getConstant(~Mask, MVT::i32)); 6675 6676 // Do not add new nodes to DAG combiner worklist. 6677 DCI.CombineTo(N, Res, false); 6678 } 6679 6680 return SDValue(); 6681} 6682 6683/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 6684/// the bits being cleared by the AND are not demanded by the BFI. 6685static SDValue PerformBFICombine(SDNode *N, 6686 TargetLowering::DAGCombinerInfo &DCI) { 6687 SDValue N1 = N->getOperand(1); 6688 if (N1.getOpcode() == ISD::AND) { 6689 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 6690 if (!N11C) 6691 return SDValue(); 6692 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 6693 unsigned LSB = CountTrailingZeros_32(~InvMask); 6694 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 6695 unsigned Mask = (1 << Width)-1; 6696 unsigned Mask2 = N11C->getZExtValue(); 6697 if ((Mask & (~Mask2)) == 0) 6698 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 6699 N->getOperand(0), N1.getOperand(0), 6700 N->getOperand(2)); 6701 } 6702 return SDValue(); 6703} 6704 6705/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 6706/// ARMISD::VMOVRRD. 6707static SDValue PerformVMOVRRDCombine(SDNode *N, 6708 TargetLowering::DAGCombinerInfo &DCI) { 6709 // vmovrrd(vmovdrr x, y) -> x,y 6710 SDValue InDouble = N->getOperand(0); 6711 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 6712 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 6713 6714 // vmovrrd(load f64) -> (load i32), (load i32) 6715 SDNode *InNode = InDouble.getNode(); 6716 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 6717 InNode->getValueType(0) == MVT::f64 && 6718 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 6719 !cast<LoadSDNode>(InNode)->isVolatile()) { 6720 // TODO: Should this be done for non-FrameIndex operands? 6721 LoadSDNode *LD = cast<LoadSDNode>(InNode); 6722 6723 SelectionDAG &DAG = DCI.DAG; 6724 DebugLoc DL = LD->getDebugLoc(); 6725 SDValue BasePtr = LD->getBasePtr(); 6726 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 6727 LD->getPointerInfo(), LD->isVolatile(), 6728 LD->isNonTemporal(), LD->getAlignment()); 6729 6730 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 6731 DAG.getConstant(4, MVT::i32)); 6732 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 6733 LD->getPointerInfo(), LD->isVolatile(), 6734 LD->isNonTemporal(), 6735 std::min(4U, LD->getAlignment() / 2)); 6736 6737 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 6738 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 6739 DCI.RemoveFromWorklist(LD); 6740 DAG.DeleteNode(LD); 6741 return Result; 6742 } 6743 6744 return SDValue(); 6745} 6746 6747/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 6748/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 6749static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 6750 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 6751 SDValue Op0 = N->getOperand(0); 6752 SDValue Op1 = N->getOperand(1); 6753 if (Op0.getOpcode() == ISD::BITCAST) 6754 Op0 = Op0.getOperand(0); 6755 if (Op1.getOpcode() == ISD::BITCAST) 6756 Op1 = Op1.getOperand(0); 6757 if (Op0.getOpcode() == ARMISD::VMOVRRD && 6758 Op0.getNode() == Op1.getNode() && 6759 Op0.getResNo() == 0 && Op1.getResNo() == 1) 6760 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 6761 N->getValueType(0), Op0.getOperand(0)); 6762 return SDValue(); 6763} 6764 6765/// PerformSTORECombine - Target-specific dag combine xforms for 6766/// ISD::STORE. 6767static SDValue PerformSTORECombine(SDNode *N, 6768 TargetLowering::DAGCombinerInfo &DCI) { 6769 // Bitcast an i64 store extracted from a vector to f64. 6770 // Otherwise, the i64 value will be legalized to a pair of i32 values. 6771 StoreSDNode *St = cast<StoreSDNode>(N); 6772 SDValue StVal = St->getValue(); 6773 if (!ISD::isNormalStore(St) || St->isVolatile()) 6774 return SDValue(); 6775 6776 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 6777 StVal.getNode()->hasOneUse() && !St->isVolatile()) { 6778 SelectionDAG &DAG = DCI.DAG; 6779 DebugLoc DL = St->getDebugLoc(); 6780 SDValue BasePtr = St->getBasePtr(); 6781 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 6782 StVal.getNode()->getOperand(0), BasePtr, 6783 St->getPointerInfo(), St->isVolatile(), 6784 St->isNonTemporal(), St->getAlignment()); 6785 6786 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 6787 DAG.getConstant(4, MVT::i32)); 6788 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 6789 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 6790 St->isNonTemporal(), 6791 std::min(4U, St->getAlignment() / 2)); 6792 } 6793 6794 if (StVal.getValueType() != MVT::i64 || 6795 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6796 return SDValue(); 6797 6798 SelectionDAG &DAG = DCI.DAG; 6799 DebugLoc dl = StVal.getDebugLoc(); 6800 SDValue IntVec = StVal.getOperand(0); 6801 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 6802 IntVec.getValueType().getVectorNumElements()); 6803 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 6804 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6805 Vec, StVal.getOperand(1)); 6806 dl = N->getDebugLoc(); 6807 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 6808 // Make the DAGCombiner fold the bitcasts. 6809 DCI.AddToWorklist(Vec.getNode()); 6810 DCI.AddToWorklist(ExtElt.getNode()); 6811 DCI.AddToWorklist(V.getNode()); 6812 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 6813 St->getPointerInfo(), St->isVolatile(), 6814 St->isNonTemporal(), St->getAlignment(), 6815 St->getTBAAInfo()); 6816} 6817 6818/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 6819/// are normal, non-volatile loads. If so, it is profitable to bitcast an 6820/// i64 vector to have f64 elements, since the value can then be loaded 6821/// directly into a VFP register. 6822static bool hasNormalLoadOperand(SDNode *N) { 6823 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 6824 for (unsigned i = 0; i < NumElts; ++i) { 6825 SDNode *Elt = N->getOperand(i).getNode(); 6826 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 6827 return true; 6828 } 6829 return false; 6830} 6831 6832/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 6833/// ISD::BUILD_VECTOR. 6834static SDValue PerformBUILD_VECTORCombine(SDNode *N, 6835 TargetLowering::DAGCombinerInfo &DCI){ 6836 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 6837 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 6838 // into a pair of GPRs, which is fine when the value is used as a scalar, 6839 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 6840 SelectionDAG &DAG = DCI.DAG; 6841 if (N->getNumOperands() == 2) { 6842 SDValue RV = PerformVMOVDRRCombine(N, DAG); 6843 if (RV.getNode()) 6844 return RV; 6845 } 6846 6847 // Load i64 elements as f64 values so that type legalization does not split 6848 // them up into i32 values. 6849 EVT VT = N->getValueType(0); 6850 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 6851 return SDValue(); 6852 DebugLoc dl = N->getDebugLoc(); 6853 SmallVector<SDValue, 8> Ops; 6854 unsigned NumElts = VT.getVectorNumElements(); 6855 for (unsigned i = 0; i < NumElts; ++i) { 6856 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 6857 Ops.push_back(V); 6858 // Make the DAGCombiner fold the bitcast. 6859 DCI.AddToWorklist(V.getNode()); 6860 } 6861 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 6862 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 6863 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 6864} 6865 6866/// PerformInsertEltCombine - Target-specific dag combine xforms for 6867/// ISD::INSERT_VECTOR_ELT. 6868static SDValue PerformInsertEltCombine(SDNode *N, 6869 TargetLowering::DAGCombinerInfo &DCI) { 6870 // Bitcast an i64 load inserted into a vector to f64. 6871 // Otherwise, the i64 value will be legalized to a pair of i32 values. 6872 EVT VT = N->getValueType(0); 6873 SDNode *Elt = N->getOperand(1).getNode(); 6874 if (VT.getVectorElementType() != MVT::i64 || 6875 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 6876 return SDValue(); 6877 6878 SelectionDAG &DAG = DCI.DAG; 6879 DebugLoc dl = N->getDebugLoc(); 6880 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 6881 VT.getVectorNumElements()); 6882 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 6883 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 6884 // Make the DAGCombiner fold the bitcasts. 6885 DCI.AddToWorklist(Vec.getNode()); 6886 DCI.AddToWorklist(V.getNode()); 6887 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 6888 Vec, V, N->getOperand(2)); 6889 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 6890} 6891 6892/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 6893/// ISD::VECTOR_SHUFFLE. 6894static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 6895 // The LLVM shufflevector instruction does not require the shuffle mask 6896 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 6897 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 6898 // operands do not match the mask length, they are extended by concatenating 6899 // them with undef vectors. That is probably the right thing for other 6900 // targets, but for NEON it is better to concatenate two double-register 6901 // size vector operands into a single quad-register size vector. Do that 6902 // transformation here: 6903 // shuffle(concat(v1, undef), concat(v2, undef)) -> 6904 // shuffle(concat(v1, v2), undef) 6905 SDValue Op0 = N->getOperand(0); 6906 SDValue Op1 = N->getOperand(1); 6907 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 6908 Op1.getOpcode() != ISD::CONCAT_VECTORS || 6909 Op0.getNumOperands() != 2 || 6910 Op1.getNumOperands() != 2) 6911 return SDValue(); 6912 SDValue Concat0Op1 = Op0.getOperand(1); 6913 SDValue Concat1Op1 = Op1.getOperand(1); 6914 if (Concat0Op1.getOpcode() != ISD::UNDEF || 6915 Concat1Op1.getOpcode() != ISD::UNDEF) 6916 return SDValue(); 6917 // Skip the transformation if any of the types are illegal. 6918 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6919 EVT VT = N->getValueType(0); 6920 if (!TLI.isTypeLegal(VT) || 6921 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 6922 !TLI.isTypeLegal(Concat1Op1.getValueType())) 6923 return SDValue(); 6924 6925 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 6926 Op0.getOperand(0), Op1.getOperand(0)); 6927 // Translate the shuffle mask. 6928 SmallVector<int, 16> NewMask; 6929 unsigned NumElts = VT.getVectorNumElements(); 6930 unsigned HalfElts = NumElts/2; 6931 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 6932 for (unsigned n = 0; n < NumElts; ++n) { 6933 int MaskElt = SVN->getMaskElt(n); 6934 int NewElt = -1; 6935 if (MaskElt < (int)HalfElts) 6936 NewElt = MaskElt; 6937 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 6938 NewElt = HalfElts + MaskElt - NumElts; 6939 NewMask.push_back(NewElt); 6940 } 6941 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 6942 DAG.getUNDEF(VT), NewMask.data()); 6943} 6944 6945/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 6946/// NEON load/store intrinsics to merge base address updates. 6947static SDValue CombineBaseUpdate(SDNode *N, 6948 TargetLowering::DAGCombinerInfo &DCI) { 6949 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6950 return SDValue(); 6951 6952 SelectionDAG &DAG = DCI.DAG; 6953 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 6954 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 6955 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 6956 SDValue Addr = N->getOperand(AddrOpIdx); 6957 6958 // Search for a use of the address operand that is an increment. 6959 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 6960 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 6961 SDNode *User = *UI; 6962 if (User->getOpcode() != ISD::ADD || 6963 UI.getUse().getResNo() != Addr.getResNo()) 6964 continue; 6965 6966 // Check that the add is independent of the load/store. Otherwise, folding 6967 // it would create a cycle. 6968 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 6969 continue; 6970 6971 // Find the new opcode for the updating load/store. 6972 bool isLoad = true; 6973 bool isLaneOp = false; 6974 unsigned NewOpc = 0; 6975 unsigned NumVecs = 0; 6976 if (isIntrinsic) { 6977 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 6978 switch (IntNo) { 6979 default: assert(0 && "unexpected intrinsic for Neon base update"); 6980 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 6981 NumVecs = 1; break; 6982 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 6983 NumVecs = 2; break; 6984 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 6985 NumVecs = 3; break; 6986 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 6987 NumVecs = 4; break; 6988 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 6989 NumVecs = 2; isLaneOp = true; break; 6990 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 6991 NumVecs = 3; isLaneOp = true; break; 6992 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 6993 NumVecs = 4; isLaneOp = true; break; 6994 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 6995 NumVecs = 1; isLoad = false; break; 6996 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 6997 NumVecs = 2; isLoad = false; break; 6998 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 6999 NumVecs = 3; isLoad = false; break; 7000 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 7001 NumVecs = 4; isLoad = false; break; 7002 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 7003 NumVecs = 2; isLoad = false; isLaneOp = true; break; 7004 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 7005 NumVecs = 3; isLoad = false; isLaneOp = true; break; 7006 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 7007 NumVecs = 4; isLoad = false; isLaneOp = true; break; 7008 } 7009 } else { 7010 isLaneOp = true; 7011 switch (N->getOpcode()) { 7012 default: assert(0 && "unexpected opcode for Neon base update"); 7013 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 7014 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 7015 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 7016 } 7017 } 7018 7019 // Find the size of memory referenced by the load/store. 7020 EVT VecTy; 7021 if (isLoad) 7022 VecTy = N->getValueType(0); 7023 else 7024 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 7025 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 7026 if (isLaneOp) 7027 NumBytes /= VecTy.getVectorNumElements(); 7028 7029 // If the increment is a constant, it must match the memory ref size. 7030 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 7031 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 7032 uint64_t IncVal = CInc->getZExtValue(); 7033 if (IncVal != NumBytes) 7034 continue; 7035 } else if (NumBytes >= 3 * 16) { 7036 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 7037 // separate instructions that make it harder to use a non-constant update. 7038 continue; 7039 } 7040 7041 // Create the new updating load/store node. 7042 EVT Tys[6]; 7043 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 7044 unsigned n; 7045 for (n = 0; n < NumResultVecs; ++n) 7046 Tys[n] = VecTy; 7047 Tys[n++] = MVT::i32; 7048 Tys[n] = MVT::Other; 7049 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 7050 SmallVector<SDValue, 8> Ops; 7051 Ops.push_back(N->getOperand(0)); // incoming chain 7052 Ops.push_back(N->getOperand(AddrOpIdx)); 7053 Ops.push_back(Inc); 7054 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 7055 Ops.push_back(N->getOperand(i)); 7056 } 7057 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 7058 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 7059 Ops.data(), Ops.size(), 7060 MemInt->getMemoryVT(), 7061 MemInt->getMemOperand()); 7062 7063 // Update the uses. 7064 std::vector<SDValue> NewResults; 7065 for (unsigned i = 0; i < NumResultVecs; ++i) { 7066 NewResults.push_back(SDValue(UpdN.getNode(), i)); 7067 } 7068 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 7069 DCI.CombineTo(N, NewResults); 7070 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 7071 7072 break; 7073 } 7074 return SDValue(); 7075} 7076 7077/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 7078/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 7079/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 7080/// return true. 7081static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 7082 SelectionDAG &DAG = DCI.DAG; 7083 EVT VT = N->getValueType(0); 7084 // vldN-dup instructions only support 64-bit vectors for N > 1. 7085 if (!VT.is64BitVector()) 7086 return false; 7087 7088 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 7089 SDNode *VLD = N->getOperand(0).getNode(); 7090 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 7091 return false; 7092 unsigned NumVecs = 0; 7093 unsigned NewOpc = 0; 7094 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 7095 if (IntNo == Intrinsic::arm_neon_vld2lane) { 7096 NumVecs = 2; 7097 NewOpc = ARMISD::VLD2DUP; 7098 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 7099 NumVecs = 3; 7100 NewOpc = ARMISD::VLD3DUP; 7101 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 7102 NumVecs = 4; 7103 NewOpc = ARMISD::VLD4DUP; 7104 } else { 7105 return false; 7106 } 7107 7108 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 7109 // numbers match the load. 7110 unsigned VLDLaneNo = 7111 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 7112 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7113 UI != UE; ++UI) { 7114 // Ignore uses of the chain result. 7115 if (UI.getUse().getResNo() == NumVecs) 7116 continue; 7117 SDNode *User = *UI; 7118 if (User->getOpcode() != ARMISD::VDUPLANE || 7119 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 7120 return false; 7121 } 7122 7123 // Create the vldN-dup node. 7124 EVT Tys[5]; 7125 unsigned n; 7126 for (n = 0; n < NumVecs; ++n) 7127 Tys[n] = VT; 7128 Tys[n] = MVT::Other; 7129 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 7130 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 7131 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 7132 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 7133 Ops, 2, VLDMemInt->getMemoryVT(), 7134 VLDMemInt->getMemOperand()); 7135 7136 // Update the uses. 7137 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7138 UI != UE; ++UI) { 7139 unsigned ResNo = UI.getUse().getResNo(); 7140 // Ignore uses of the chain result. 7141 if (ResNo == NumVecs) 7142 continue; 7143 SDNode *User = *UI; 7144 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 7145 } 7146 7147 // Now the vldN-lane intrinsic is dead except for its chain result. 7148 // Update uses of the chain. 7149 std::vector<SDValue> VLDDupResults; 7150 for (unsigned n = 0; n < NumVecs; ++n) 7151 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 7152 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 7153 DCI.CombineTo(VLD, VLDDupResults); 7154 7155 return true; 7156} 7157 7158/// PerformVDUPLANECombine - Target-specific dag combine xforms for 7159/// ARMISD::VDUPLANE. 7160static SDValue PerformVDUPLANECombine(SDNode *N, 7161 TargetLowering::DAGCombinerInfo &DCI) { 7162 SDValue Op = N->getOperand(0); 7163 7164 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 7165 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 7166 if (CombineVLDDUP(N, DCI)) 7167 return SDValue(N, 0); 7168 7169 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 7170 // redundant. Ignore bit_converts for now; element sizes are checked below. 7171 while (Op.getOpcode() == ISD::BITCAST) 7172 Op = Op.getOperand(0); 7173 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 7174 return SDValue(); 7175 7176 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 7177 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 7178 // The canonical VMOV for a zero vector uses a 32-bit element size. 7179 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7180 unsigned EltBits; 7181 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 7182 EltSize = 8; 7183 EVT VT = N->getValueType(0); 7184 if (EltSize > VT.getVectorElementType().getSizeInBits()) 7185 return SDValue(); 7186 7187 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 7188} 7189 7190// isConstVecPow2 - Return true if each vector element is a power of 2, all 7191// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 7192static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 7193{ 7194 integerPart cN; 7195 integerPart c0 = 0; 7196 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 7197 I != E; I++) { 7198 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 7199 if (!C) 7200 return false; 7201 7202 bool isExact; 7203 APFloat APF = C->getValueAPF(); 7204 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 7205 != APFloat::opOK || !isExact) 7206 return false; 7207 7208 c0 = (I == 0) ? cN : c0; 7209 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 7210 return false; 7211 } 7212 C = c0; 7213 return true; 7214} 7215 7216/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 7217/// can replace combinations of VMUL and VCVT (floating-point to integer) 7218/// when the VMUL has a constant operand that is a power of 2. 7219/// 7220/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7221/// vmul.f32 d16, d17, d16 7222/// vcvt.s32.f32 d16, d16 7223/// becomes: 7224/// vcvt.s32.f32 d16, d16, #3 7225static SDValue PerformVCVTCombine(SDNode *N, 7226 TargetLowering::DAGCombinerInfo &DCI, 7227 const ARMSubtarget *Subtarget) { 7228 SelectionDAG &DAG = DCI.DAG; 7229 SDValue Op = N->getOperand(0); 7230 7231 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 7232 Op.getOpcode() != ISD::FMUL) 7233 return SDValue(); 7234 7235 uint64_t C; 7236 SDValue N0 = Op->getOperand(0); 7237 SDValue ConstVec = Op->getOperand(1); 7238 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 7239 7240 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7241 !isConstVecPow2(ConstVec, isSigned, C)) 7242 return SDValue(); 7243 7244 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 7245 Intrinsic::arm_neon_vcvtfp2fxu; 7246 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7247 N->getValueType(0), 7248 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 7249 DAG.getConstant(Log2_64(C), MVT::i32)); 7250} 7251 7252/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 7253/// can replace combinations of VCVT (integer to floating-point) and VDIV 7254/// when the VDIV has a constant operand that is a power of 2. 7255/// 7256/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7257/// vcvt.f32.s32 d16, d16 7258/// vdiv.f32 d16, d17, d16 7259/// becomes: 7260/// vcvt.f32.s32 d16, d16, #3 7261static SDValue PerformVDIVCombine(SDNode *N, 7262 TargetLowering::DAGCombinerInfo &DCI, 7263 const ARMSubtarget *Subtarget) { 7264 SelectionDAG &DAG = DCI.DAG; 7265 SDValue Op = N->getOperand(0); 7266 unsigned OpOpcode = Op.getNode()->getOpcode(); 7267 7268 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 7269 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 7270 return SDValue(); 7271 7272 uint64_t C; 7273 SDValue ConstVec = N->getOperand(1); 7274 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 7275 7276 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7277 !isConstVecPow2(ConstVec, isSigned, C)) 7278 return SDValue(); 7279 7280 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 7281 Intrinsic::arm_neon_vcvtfxu2fp; 7282 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7283 Op.getValueType(), 7284 DAG.getConstant(IntrinsicOpcode, MVT::i32), 7285 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 7286} 7287 7288/// Getvshiftimm - Check if this is a valid build_vector for the immediate 7289/// operand of a vector shift operation, where all the elements of the 7290/// build_vector must have the same constant integer value. 7291static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 7292 // Ignore bit_converts. 7293 while (Op.getOpcode() == ISD::BITCAST) 7294 Op = Op.getOperand(0); 7295 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7296 APInt SplatBits, SplatUndef; 7297 unsigned SplatBitSize; 7298 bool HasAnyUndefs; 7299 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 7300 HasAnyUndefs, ElementBits) || 7301 SplatBitSize > ElementBits) 7302 return false; 7303 Cnt = SplatBits.getSExtValue(); 7304 return true; 7305} 7306 7307/// isVShiftLImm - Check if this is a valid build_vector for the immediate 7308/// operand of a vector shift left operation. That value must be in the range: 7309/// 0 <= Value < ElementBits for a left shift; or 7310/// 0 <= Value <= ElementBits for a long left shift. 7311static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 7312 assert(VT.isVector() && "vector shift count is not a vector type"); 7313 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7314 if (! getVShiftImm(Op, ElementBits, Cnt)) 7315 return false; 7316 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 7317} 7318 7319/// isVShiftRImm - Check if this is a valid build_vector for the immediate 7320/// operand of a vector shift right operation. For a shift opcode, the value 7321/// is positive, but for an intrinsic the value count must be negative. The 7322/// absolute value must be in the range: 7323/// 1 <= |Value| <= ElementBits for a right shift; or 7324/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 7325static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 7326 int64_t &Cnt) { 7327 assert(VT.isVector() && "vector shift count is not a vector type"); 7328 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7329 if (! getVShiftImm(Op, ElementBits, Cnt)) 7330 return false; 7331 if (isIntrinsic) 7332 Cnt = -Cnt; 7333 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 7334} 7335 7336/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 7337static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 7338 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 7339 switch (IntNo) { 7340 default: 7341 // Don't do anything for most intrinsics. 7342 break; 7343 7344 // Vector shifts: check for immediate versions and lower them. 7345 // Note: This is done during DAG combining instead of DAG legalizing because 7346 // the build_vectors for 64-bit vector element shift counts are generally 7347 // not legal, and it is hard to see their values after they get legalized to 7348 // loads from a constant pool. 7349 case Intrinsic::arm_neon_vshifts: 7350 case Intrinsic::arm_neon_vshiftu: 7351 case Intrinsic::arm_neon_vshiftls: 7352 case Intrinsic::arm_neon_vshiftlu: 7353 case Intrinsic::arm_neon_vshiftn: 7354 case Intrinsic::arm_neon_vrshifts: 7355 case Intrinsic::arm_neon_vrshiftu: 7356 case Intrinsic::arm_neon_vrshiftn: 7357 case Intrinsic::arm_neon_vqshifts: 7358 case Intrinsic::arm_neon_vqshiftu: 7359 case Intrinsic::arm_neon_vqshiftsu: 7360 case Intrinsic::arm_neon_vqshiftns: 7361 case Intrinsic::arm_neon_vqshiftnu: 7362 case Intrinsic::arm_neon_vqshiftnsu: 7363 case Intrinsic::arm_neon_vqrshiftns: 7364 case Intrinsic::arm_neon_vqrshiftnu: 7365 case Intrinsic::arm_neon_vqrshiftnsu: { 7366 EVT VT = N->getOperand(1).getValueType(); 7367 int64_t Cnt; 7368 unsigned VShiftOpc = 0; 7369 7370 switch (IntNo) { 7371 case Intrinsic::arm_neon_vshifts: 7372 case Intrinsic::arm_neon_vshiftu: 7373 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 7374 VShiftOpc = ARMISD::VSHL; 7375 break; 7376 } 7377 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 7378 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 7379 ARMISD::VSHRs : ARMISD::VSHRu); 7380 break; 7381 } 7382 return SDValue(); 7383 7384 case Intrinsic::arm_neon_vshiftls: 7385 case Intrinsic::arm_neon_vshiftlu: 7386 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 7387 break; 7388 llvm_unreachable("invalid shift count for vshll intrinsic"); 7389 7390 case Intrinsic::arm_neon_vrshifts: 7391 case Intrinsic::arm_neon_vrshiftu: 7392 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 7393 break; 7394 return SDValue(); 7395 7396 case Intrinsic::arm_neon_vqshifts: 7397 case Intrinsic::arm_neon_vqshiftu: 7398 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7399 break; 7400 return SDValue(); 7401 7402 case Intrinsic::arm_neon_vqshiftsu: 7403 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7404 break; 7405 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 7406 7407 case Intrinsic::arm_neon_vshiftn: 7408 case Intrinsic::arm_neon_vrshiftn: 7409 case Intrinsic::arm_neon_vqshiftns: 7410 case Intrinsic::arm_neon_vqshiftnu: 7411 case Intrinsic::arm_neon_vqshiftnsu: 7412 case Intrinsic::arm_neon_vqrshiftns: 7413 case Intrinsic::arm_neon_vqrshiftnu: 7414 case Intrinsic::arm_neon_vqrshiftnsu: 7415 // Narrowing shifts require an immediate right shift. 7416 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 7417 break; 7418 llvm_unreachable("invalid shift count for narrowing vector shift " 7419 "intrinsic"); 7420 7421 default: 7422 llvm_unreachable("unhandled vector shift"); 7423 } 7424 7425 switch (IntNo) { 7426 case Intrinsic::arm_neon_vshifts: 7427 case Intrinsic::arm_neon_vshiftu: 7428 // Opcode already set above. 7429 break; 7430 case Intrinsic::arm_neon_vshiftls: 7431 case Intrinsic::arm_neon_vshiftlu: 7432 if (Cnt == VT.getVectorElementType().getSizeInBits()) 7433 VShiftOpc = ARMISD::VSHLLi; 7434 else 7435 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 7436 ARMISD::VSHLLs : ARMISD::VSHLLu); 7437 break; 7438 case Intrinsic::arm_neon_vshiftn: 7439 VShiftOpc = ARMISD::VSHRN; break; 7440 case Intrinsic::arm_neon_vrshifts: 7441 VShiftOpc = ARMISD::VRSHRs; break; 7442 case Intrinsic::arm_neon_vrshiftu: 7443 VShiftOpc = ARMISD::VRSHRu; break; 7444 case Intrinsic::arm_neon_vrshiftn: 7445 VShiftOpc = ARMISD::VRSHRN; break; 7446 case Intrinsic::arm_neon_vqshifts: 7447 VShiftOpc = ARMISD::VQSHLs; break; 7448 case Intrinsic::arm_neon_vqshiftu: 7449 VShiftOpc = ARMISD::VQSHLu; break; 7450 case Intrinsic::arm_neon_vqshiftsu: 7451 VShiftOpc = ARMISD::VQSHLsu; break; 7452 case Intrinsic::arm_neon_vqshiftns: 7453 VShiftOpc = ARMISD::VQSHRNs; break; 7454 case Intrinsic::arm_neon_vqshiftnu: 7455 VShiftOpc = ARMISD::VQSHRNu; break; 7456 case Intrinsic::arm_neon_vqshiftnsu: 7457 VShiftOpc = ARMISD::VQSHRNsu; break; 7458 case Intrinsic::arm_neon_vqrshiftns: 7459 VShiftOpc = ARMISD::VQRSHRNs; break; 7460 case Intrinsic::arm_neon_vqrshiftnu: 7461 VShiftOpc = ARMISD::VQRSHRNu; break; 7462 case Intrinsic::arm_neon_vqrshiftnsu: 7463 VShiftOpc = ARMISD::VQRSHRNsu; break; 7464 } 7465 7466 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7467 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 7468 } 7469 7470 case Intrinsic::arm_neon_vshiftins: { 7471 EVT VT = N->getOperand(1).getValueType(); 7472 int64_t Cnt; 7473 unsigned VShiftOpc = 0; 7474 7475 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 7476 VShiftOpc = ARMISD::VSLI; 7477 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 7478 VShiftOpc = ARMISD::VSRI; 7479 else { 7480 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 7481 } 7482 7483 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7484 N->getOperand(1), N->getOperand(2), 7485 DAG.getConstant(Cnt, MVT::i32)); 7486 } 7487 7488 case Intrinsic::arm_neon_vqrshifts: 7489 case Intrinsic::arm_neon_vqrshiftu: 7490 // No immediate versions of these to check for. 7491 break; 7492 } 7493 7494 return SDValue(); 7495} 7496 7497/// PerformShiftCombine - Checks for immediate versions of vector shifts and 7498/// lowers them. As with the vector shift intrinsics, this is done during DAG 7499/// combining instead of DAG legalizing because the build_vectors for 64-bit 7500/// vector element shift counts are generally not legal, and it is hard to see 7501/// their values after they get legalized to loads from a constant pool. 7502static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 7503 const ARMSubtarget *ST) { 7504 EVT VT = N->getValueType(0); 7505 7506 // Nothing to be done for scalar shifts. 7507 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7508 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 7509 return SDValue(); 7510 7511 assert(ST->hasNEON() && "unexpected vector shift"); 7512 int64_t Cnt; 7513 7514 switch (N->getOpcode()) { 7515 default: llvm_unreachable("unexpected shift opcode"); 7516 7517 case ISD::SHL: 7518 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 7519 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 7520 DAG.getConstant(Cnt, MVT::i32)); 7521 break; 7522 7523 case ISD::SRA: 7524 case ISD::SRL: 7525 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 7526 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 7527 ARMISD::VSHRs : ARMISD::VSHRu); 7528 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 7529 DAG.getConstant(Cnt, MVT::i32)); 7530 } 7531 } 7532 return SDValue(); 7533} 7534 7535/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 7536/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 7537static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 7538 const ARMSubtarget *ST) { 7539 SDValue N0 = N->getOperand(0); 7540 7541 // Check for sign- and zero-extensions of vector extract operations of 8- 7542 // and 16-bit vector elements. NEON supports these directly. They are 7543 // handled during DAG combining because type legalization will promote them 7544 // to 32-bit types and it is messy to recognize the operations after that. 7545 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7546 SDValue Vec = N0.getOperand(0); 7547 SDValue Lane = N0.getOperand(1); 7548 EVT VT = N->getValueType(0); 7549 EVT EltVT = N0.getValueType(); 7550 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7551 7552 if (VT == MVT::i32 && 7553 (EltVT == MVT::i8 || EltVT == MVT::i16) && 7554 TLI.isTypeLegal(Vec.getValueType()) && 7555 isa<ConstantSDNode>(Lane)) { 7556 7557 unsigned Opc = 0; 7558 switch (N->getOpcode()) { 7559 default: llvm_unreachable("unexpected opcode"); 7560 case ISD::SIGN_EXTEND: 7561 Opc = ARMISD::VGETLANEs; 7562 break; 7563 case ISD::ZERO_EXTEND: 7564 case ISD::ANY_EXTEND: 7565 Opc = ARMISD::VGETLANEu; 7566 break; 7567 } 7568 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 7569 } 7570 } 7571 7572 return SDValue(); 7573} 7574 7575/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 7576/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 7577static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 7578 const ARMSubtarget *ST) { 7579 // If the target supports NEON, try to use vmax/vmin instructions for f32 7580 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 7581 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 7582 // a NaN; only do the transformation when it matches that behavior. 7583 7584 // For now only do this when using NEON for FP operations; if using VFP, it 7585 // is not obvious that the benefit outweighs the cost of switching to the 7586 // NEON pipeline. 7587 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 7588 N->getValueType(0) != MVT::f32) 7589 return SDValue(); 7590 7591 SDValue CondLHS = N->getOperand(0); 7592 SDValue CondRHS = N->getOperand(1); 7593 SDValue LHS = N->getOperand(2); 7594 SDValue RHS = N->getOperand(3); 7595 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 7596 7597 unsigned Opcode = 0; 7598 bool IsReversed; 7599 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 7600 IsReversed = false; // x CC y ? x : y 7601 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 7602 IsReversed = true ; // x CC y ? y : x 7603 } else { 7604 return SDValue(); 7605 } 7606 7607 bool IsUnordered; 7608 switch (CC) { 7609 default: break; 7610 case ISD::SETOLT: 7611 case ISD::SETOLE: 7612 case ISD::SETLT: 7613 case ISD::SETLE: 7614 case ISD::SETULT: 7615 case ISD::SETULE: 7616 // If LHS is NaN, an ordered comparison will be false and the result will 7617 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 7618 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7619 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 7620 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7621 break; 7622 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 7623 // will return -0, so vmin can only be used for unsafe math or if one of 7624 // the operands is known to be nonzero. 7625 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 7626 !UnsafeFPMath && 7627 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7628 break; 7629 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 7630 break; 7631 7632 case ISD::SETOGT: 7633 case ISD::SETOGE: 7634 case ISD::SETGT: 7635 case ISD::SETGE: 7636 case ISD::SETUGT: 7637 case ISD::SETUGE: 7638 // If LHS is NaN, an ordered comparison will be false and the result will 7639 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 7640 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7641 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 7642 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7643 break; 7644 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 7645 // will return +0, so vmax can only be used for unsafe math or if one of 7646 // the operands is known to be nonzero. 7647 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 7648 !UnsafeFPMath && 7649 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7650 break; 7651 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 7652 break; 7653 } 7654 7655 if (!Opcode) 7656 return SDValue(); 7657 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 7658} 7659 7660/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 7661SDValue 7662ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 7663 SDValue Cmp = N->getOperand(4); 7664 if (Cmp.getOpcode() != ARMISD::CMPZ) 7665 // Only looking at EQ and NE cases. 7666 return SDValue(); 7667 7668 EVT VT = N->getValueType(0); 7669 DebugLoc dl = N->getDebugLoc(); 7670 SDValue LHS = Cmp.getOperand(0); 7671 SDValue RHS = Cmp.getOperand(1); 7672 SDValue FalseVal = N->getOperand(0); 7673 SDValue TrueVal = N->getOperand(1); 7674 SDValue ARMcc = N->getOperand(2); 7675 ARMCC::CondCodes CC = 7676 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 7677 7678 // Simplify 7679 // mov r1, r0 7680 // cmp r1, x 7681 // mov r0, y 7682 // moveq r0, x 7683 // to 7684 // cmp r0, x 7685 // movne r0, y 7686 // 7687 // mov r1, r0 7688 // cmp r1, x 7689 // mov r0, x 7690 // movne r0, y 7691 // to 7692 // cmp r0, x 7693 // movne r0, y 7694 /// FIXME: Turn this into a target neutral optimization? 7695 SDValue Res; 7696 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 7697 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 7698 N->getOperand(3), Cmp); 7699 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 7700 SDValue ARMcc; 7701 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 7702 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 7703 N->getOperand(3), NewCmp); 7704 } 7705 7706 if (Res.getNode()) { 7707 APInt KnownZero, KnownOne; 7708 APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()); 7709 DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne); 7710 // Capture demanded bits information that would be otherwise lost. 7711 if (KnownZero == 0xfffffffe) 7712 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 7713 DAG.getValueType(MVT::i1)); 7714 else if (KnownZero == 0xffffff00) 7715 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 7716 DAG.getValueType(MVT::i8)); 7717 else if (KnownZero == 0xffff0000) 7718 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 7719 DAG.getValueType(MVT::i16)); 7720 } 7721 7722 return Res; 7723} 7724 7725SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 7726 DAGCombinerInfo &DCI) const { 7727 switch (N->getOpcode()) { 7728 default: break; 7729 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 7730 case ISD::SUB: return PerformSUBCombine(N, DCI); 7731 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 7732 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 7733 case ISD::AND: return PerformANDCombine(N, DCI); 7734 case ARMISD::BFI: return PerformBFICombine(N, DCI); 7735 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 7736 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 7737 case ISD::STORE: return PerformSTORECombine(N, DCI); 7738 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 7739 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 7740 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 7741 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 7742 case ISD::FP_TO_SINT: 7743 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 7744 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 7745 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 7746 case ISD::SHL: 7747 case ISD::SRA: 7748 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 7749 case ISD::SIGN_EXTEND: 7750 case ISD::ZERO_EXTEND: 7751 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 7752 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 7753 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 7754 case ARMISD::VLD2DUP: 7755 case ARMISD::VLD3DUP: 7756 case ARMISD::VLD4DUP: 7757 return CombineBaseUpdate(N, DCI); 7758 case ISD::INTRINSIC_VOID: 7759 case ISD::INTRINSIC_W_CHAIN: 7760 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 7761 case Intrinsic::arm_neon_vld1: 7762 case Intrinsic::arm_neon_vld2: 7763 case Intrinsic::arm_neon_vld3: 7764 case Intrinsic::arm_neon_vld4: 7765 case Intrinsic::arm_neon_vld2lane: 7766 case Intrinsic::arm_neon_vld3lane: 7767 case Intrinsic::arm_neon_vld4lane: 7768 case Intrinsic::arm_neon_vst1: 7769 case Intrinsic::arm_neon_vst2: 7770 case Intrinsic::arm_neon_vst3: 7771 case Intrinsic::arm_neon_vst4: 7772 case Intrinsic::arm_neon_vst2lane: 7773 case Intrinsic::arm_neon_vst3lane: 7774 case Intrinsic::arm_neon_vst4lane: 7775 return CombineBaseUpdate(N, DCI); 7776 default: break; 7777 } 7778 break; 7779 } 7780 return SDValue(); 7781} 7782 7783bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 7784 EVT VT) const { 7785 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 7786} 7787 7788bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 7789 if (!Subtarget->allowsUnalignedMem()) 7790 return false; 7791 7792 switch (VT.getSimpleVT().SimpleTy) { 7793 default: 7794 return false; 7795 case MVT::i8: 7796 case MVT::i16: 7797 case MVT::i32: 7798 return true; 7799 // FIXME: VLD1 etc with standard alignment is legal. 7800 } 7801} 7802 7803static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 7804 if (V < 0) 7805 return false; 7806 7807 unsigned Scale = 1; 7808 switch (VT.getSimpleVT().SimpleTy) { 7809 default: return false; 7810 case MVT::i1: 7811 case MVT::i8: 7812 // Scale == 1; 7813 break; 7814 case MVT::i16: 7815 // Scale == 2; 7816 Scale = 2; 7817 break; 7818 case MVT::i32: 7819 // Scale == 4; 7820 Scale = 4; 7821 break; 7822 } 7823 7824 if ((V & (Scale - 1)) != 0) 7825 return false; 7826 V /= Scale; 7827 return V == (V & ((1LL << 5) - 1)); 7828} 7829 7830static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 7831 const ARMSubtarget *Subtarget) { 7832 bool isNeg = false; 7833 if (V < 0) { 7834 isNeg = true; 7835 V = - V; 7836 } 7837 7838 switch (VT.getSimpleVT().SimpleTy) { 7839 default: return false; 7840 case MVT::i1: 7841 case MVT::i8: 7842 case MVT::i16: 7843 case MVT::i32: 7844 // + imm12 or - imm8 7845 if (isNeg) 7846 return V == (V & ((1LL << 8) - 1)); 7847 return V == (V & ((1LL << 12) - 1)); 7848 case MVT::f32: 7849 case MVT::f64: 7850 // Same as ARM mode. FIXME: NEON? 7851 if (!Subtarget->hasVFP2()) 7852 return false; 7853 if ((V & 3) != 0) 7854 return false; 7855 V >>= 2; 7856 return V == (V & ((1LL << 8) - 1)); 7857 } 7858} 7859 7860/// isLegalAddressImmediate - Return true if the integer value can be used 7861/// as the offset of the target addressing mode for load / store of the 7862/// given type. 7863static bool isLegalAddressImmediate(int64_t V, EVT VT, 7864 const ARMSubtarget *Subtarget) { 7865 if (V == 0) 7866 return true; 7867 7868 if (!VT.isSimple()) 7869 return false; 7870 7871 if (Subtarget->isThumb1Only()) 7872 return isLegalT1AddressImmediate(V, VT); 7873 else if (Subtarget->isThumb2()) 7874 return isLegalT2AddressImmediate(V, VT, Subtarget); 7875 7876 // ARM mode. 7877 if (V < 0) 7878 V = - V; 7879 switch (VT.getSimpleVT().SimpleTy) { 7880 default: return false; 7881 case MVT::i1: 7882 case MVT::i8: 7883 case MVT::i32: 7884 // +- imm12 7885 return V == (V & ((1LL << 12) - 1)); 7886 case MVT::i16: 7887 // +- imm8 7888 return V == (V & ((1LL << 8) - 1)); 7889 case MVT::f32: 7890 case MVT::f64: 7891 if (!Subtarget->hasVFP2()) // FIXME: NEON? 7892 return false; 7893 if ((V & 3) != 0) 7894 return false; 7895 V >>= 2; 7896 return V == (V & ((1LL << 8) - 1)); 7897 } 7898} 7899 7900bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 7901 EVT VT) const { 7902 int Scale = AM.Scale; 7903 if (Scale < 0) 7904 return false; 7905 7906 switch (VT.getSimpleVT().SimpleTy) { 7907 default: return false; 7908 case MVT::i1: 7909 case MVT::i8: 7910 case MVT::i16: 7911 case MVT::i32: 7912 if (Scale == 1) 7913 return true; 7914 // r + r << imm 7915 Scale = Scale & ~1; 7916 return Scale == 2 || Scale == 4 || Scale == 8; 7917 case MVT::i64: 7918 // r + r 7919 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 7920 return true; 7921 return false; 7922 case MVT::isVoid: 7923 // Note, we allow "void" uses (basically, uses that aren't loads or 7924 // stores), because arm allows folding a scale into many arithmetic 7925 // operations. This should be made more precise and revisited later. 7926 7927 // Allow r << imm, but the imm has to be a multiple of two. 7928 if (Scale & 1) return false; 7929 return isPowerOf2_32(Scale); 7930 } 7931} 7932 7933/// isLegalAddressingMode - Return true if the addressing mode represented 7934/// by AM is legal for this target, for a load/store of the specified type. 7935bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 7936 Type *Ty) const { 7937 EVT VT = getValueType(Ty, true); 7938 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 7939 return false; 7940 7941 // Can never fold addr of global into load/store. 7942 if (AM.BaseGV) 7943 return false; 7944 7945 switch (AM.Scale) { 7946 case 0: // no scale reg, must be "r+i" or "r", or "i". 7947 break; 7948 case 1: 7949 if (Subtarget->isThumb1Only()) 7950 return false; 7951 // FALL THROUGH. 7952 default: 7953 // ARM doesn't support any R+R*scale+imm addr modes. 7954 if (AM.BaseOffs) 7955 return false; 7956 7957 if (!VT.isSimple()) 7958 return false; 7959 7960 if (Subtarget->isThumb2()) 7961 return isLegalT2ScaledAddressingMode(AM, VT); 7962 7963 int Scale = AM.Scale; 7964 switch (VT.getSimpleVT().SimpleTy) { 7965 default: return false; 7966 case MVT::i1: 7967 case MVT::i8: 7968 case MVT::i32: 7969 if (Scale < 0) Scale = -Scale; 7970 if (Scale == 1) 7971 return true; 7972 // r + r << imm 7973 return isPowerOf2_32(Scale & ~1); 7974 case MVT::i16: 7975 case MVT::i64: 7976 // r + r 7977 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 7978 return true; 7979 return false; 7980 7981 case MVT::isVoid: 7982 // Note, we allow "void" uses (basically, uses that aren't loads or 7983 // stores), because arm allows folding a scale into many arithmetic 7984 // operations. This should be made more precise and revisited later. 7985 7986 // Allow r << imm, but the imm has to be a multiple of two. 7987 if (Scale & 1) return false; 7988 return isPowerOf2_32(Scale); 7989 } 7990 break; 7991 } 7992 return true; 7993} 7994 7995/// isLegalICmpImmediate - Return true if the specified immediate is legal 7996/// icmp immediate, that is the target has icmp instructions which can compare 7997/// a register against the immediate without having to materialize the 7998/// immediate into a register. 7999bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 8000 if (!Subtarget->isThumb()) 8001 return ARM_AM::getSOImmVal(Imm) != -1; 8002 if (Subtarget->isThumb2()) 8003 return ARM_AM::getT2SOImmVal(Imm) != -1; 8004 return Imm >= 0 && Imm <= 255; 8005} 8006 8007/// isLegalAddImmediate - Return true if the specified immediate is legal 8008/// add immediate, that is the target has add instructions which can add 8009/// a register with the immediate without having to materialize the 8010/// immediate into a register. 8011bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 8012 return ARM_AM::getSOImmVal(Imm) != -1; 8013} 8014 8015static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 8016 bool isSEXTLoad, SDValue &Base, 8017 SDValue &Offset, bool &isInc, 8018 SelectionDAG &DAG) { 8019 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8020 return false; 8021 8022 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 8023 // AddressingMode 3 8024 Base = Ptr->getOperand(0); 8025 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8026 int RHSC = (int)RHS->getZExtValue(); 8027 if (RHSC < 0 && RHSC > -256) { 8028 assert(Ptr->getOpcode() == ISD::ADD); 8029 isInc = false; 8030 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8031 return true; 8032 } 8033 } 8034 isInc = (Ptr->getOpcode() == ISD::ADD); 8035 Offset = Ptr->getOperand(1); 8036 return true; 8037 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 8038 // AddressingMode 2 8039 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8040 int RHSC = (int)RHS->getZExtValue(); 8041 if (RHSC < 0 && RHSC > -0x1000) { 8042 assert(Ptr->getOpcode() == ISD::ADD); 8043 isInc = false; 8044 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8045 Base = Ptr->getOperand(0); 8046 return true; 8047 } 8048 } 8049 8050 if (Ptr->getOpcode() == ISD::ADD) { 8051 isInc = true; 8052 ARM_AM::ShiftOpc ShOpcVal= 8053 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 8054 if (ShOpcVal != ARM_AM::no_shift) { 8055 Base = Ptr->getOperand(1); 8056 Offset = Ptr->getOperand(0); 8057 } else { 8058 Base = Ptr->getOperand(0); 8059 Offset = Ptr->getOperand(1); 8060 } 8061 return true; 8062 } 8063 8064 isInc = (Ptr->getOpcode() == ISD::ADD); 8065 Base = Ptr->getOperand(0); 8066 Offset = Ptr->getOperand(1); 8067 return true; 8068 } 8069 8070 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 8071 return false; 8072} 8073 8074static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 8075 bool isSEXTLoad, SDValue &Base, 8076 SDValue &Offset, bool &isInc, 8077 SelectionDAG &DAG) { 8078 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8079 return false; 8080 8081 Base = Ptr->getOperand(0); 8082 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8083 int RHSC = (int)RHS->getZExtValue(); 8084 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 8085 assert(Ptr->getOpcode() == ISD::ADD); 8086 isInc = false; 8087 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8088 return true; 8089 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 8090 isInc = Ptr->getOpcode() == ISD::ADD; 8091 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 8092 return true; 8093 } 8094 } 8095 8096 return false; 8097} 8098 8099/// getPreIndexedAddressParts - returns true by value, base pointer and 8100/// offset pointer and addressing mode by reference if the node's address 8101/// can be legally represented as pre-indexed load / store address. 8102bool 8103ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 8104 SDValue &Offset, 8105 ISD::MemIndexedMode &AM, 8106 SelectionDAG &DAG) const { 8107 if (Subtarget->isThumb1Only()) 8108 return false; 8109 8110 EVT VT; 8111 SDValue Ptr; 8112 bool isSEXTLoad = false; 8113 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8114 Ptr = LD->getBasePtr(); 8115 VT = LD->getMemoryVT(); 8116 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8117 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8118 Ptr = ST->getBasePtr(); 8119 VT = ST->getMemoryVT(); 8120 } else 8121 return false; 8122 8123 bool isInc; 8124 bool isLegal = false; 8125 if (Subtarget->isThumb2()) 8126 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8127 Offset, isInc, DAG); 8128 else 8129 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8130 Offset, isInc, DAG); 8131 if (!isLegal) 8132 return false; 8133 8134 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 8135 return true; 8136} 8137 8138/// getPostIndexedAddressParts - returns true by value, base pointer and 8139/// offset pointer and addressing mode by reference if this node can be 8140/// combined with a load / store to form a post-indexed load / store. 8141bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 8142 SDValue &Base, 8143 SDValue &Offset, 8144 ISD::MemIndexedMode &AM, 8145 SelectionDAG &DAG) const { 8146 if (Subtarget->isThumb1Only()) 8147 return false; 8148 8149 EVT VT; 8150 SDValue Ptr; 8151 bool isSEXTLoad = false; 8152 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8153 VT = LD->getMemoryVT(); 8154 Ptr = LD->getBasePtr(); 8155 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8156 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8157 VT = ST->getMemoryVT(); 8158 Ptr = ST->getBasePtr(); 8159 } else 8160 return false; 8161 8162 bool isInc; 8163 bool isLegal = false; 8164 if (Subtarget->isThumb2()) 8165 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8166 isInc, DAG); 8167 else 8168 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8169 isInc, DAG); 8170 if (!isLegal) 8171 return false; 8172 8173 if (Ptr != Base) { 8174 // Swap base ptr and offset to catch more post-index load / store when 8175 // it's legal. In Thumb2 mode, offset must be an immediate. 8176 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 8177 !Subtarget->isThumb2()) 8178 std::swap(Base, Offset); 8179 8180 // Post-indexed load / store update the base pointer. 8181 if (Ptr != Base) 8182 return false; 8183 } 8184 8185 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 8186 return true; 8187} 8188 8189void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 8190 const APInt &Mask, 8191 APInt &KnownZero, 8192 APInt &KnownOne, 8193 const SelectionDAG &DAG, 8194 unsigned Depth) const { 8195 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 8196 switch (Op.getOpcode()) { 8197 default: break; 8198 case ARMISD::CMOV: { 8199 // Bits are known zero/one if known on the LHS and RHS. 8200 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 8201 if (KnownZero == 0 && KnownOne == 0) return; 8202 8203 APInt KnownZeroRHS, KnownOneRHS; 8204 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 8205 KnownZeroRHS, KnownOneRHS, Depth+1); 8206 KnownZero &= KnownZeroRHS; 8207 KnownOne &= KnownOneRHS; 8208 return; 8209 } 8210 } 8211} 8212 8213//===----------------------------------------------------------------------===// 8214// ARM Inline Assembly Support 8215//===----------------------------------------------------------------------===// 8216 8217bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 8218 // Looking for "rev" which is V6+. 8219 if (!Subtarget->hasV6Ops()) 8220 return false; 8221 8222 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 8223 std::string AsmStr = IA->getAsmString(); 8224 SmallVector<StringRef, 4> AsmPieces; 8225 SplitString(AsmStr, AsmPieces, ";\n"); 8226 8227 switch (AsmPieces.size()) { 8228 default: return false; 8229 case 1: 8230 AsmStr = AsmPieces[0]; 8231 AsmPieces.clear(); 8232 SplitString(AsmStr, AsmPieces, " \t,"); 8233 8234 // rev $0, $1 8235 if (AsmPieces.size() == 3 && 8236 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 8237 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 8238 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 8239 if (Ty && Ty->getBitWidth() == 32) 8240 return IntrinsicLowering::LowerToByteSwap(CI); 8241 } 8242 break; 8243 } 8244 8245 return false; 8246} 8247 8248/// getConstraintType - Given a constraint letter, return the type of 8249/// constraint it is for this target. 8250ARMTargetLowering::ConstraintType 8251ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 8252 if (Constraint.size() == 1) { 8253 switch (Constraint[0]) { 8254 default: break; 8255 case 'l': return C_RegisterClass; 8256 case 'w': return C_RegisterClass; 8257 case 'h': return C_RegisterClass; 8258 case 'x': return C_RegisterClass; 8259 case 't': return C_RegisterClass; 8260 case 'j': return C_Other; // Constant for movw. 8261 // An address with a single base register. Due to the way we 8262 // currently handle addresses it is the same as an 'r' memory constraint. 8263 case 'Q': return C_Memory; 8264 } 8265 } else if (Constraint.size() == 2) { 8266 switch (Constraint[0]) { 8267 default: break; 8268 // All 'U+' constraints are addresses. 8269 case 'U': return C_Memory; 8270 } 8271 } 8272 return TargetLowering::getConstraintType(Constraint); 8273} 8274 8275/// Examine constraint type and operand type and determine a weight value. 8276/// This object must already have been set up with the operand type 8277/// and the current alternative constraint selected. 8278TargetLowering::ConstraintWeight 8279ARMTargetLowering::getSingleConstraintMatchWeight( 8280 AsmOperandInfo &info, const char *constraint) const { 8281 ConstraintWeight weight = CW_Invalid; 8282 Value *CallOperandVal = info.CallOperandVal; 8283 // If we don't have a value, we can't do a match, 8284 // but allow it at the lowest weight. 8285 if (CallOperandVal == NULL) 8286 return CW_Default; 8287 Type *type = CallOperandVal->getType(); 8288 // Look at the constraint type. 8289 switch (*constraint) { 8290 default: 8291 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 8292 break; 8293 case 'l': 8294 if (type->isIntegerTy()) { 8295 if (Subtarget->isThumb()) 8296 weight = CW_SpecificReg; 8297 else 8298 weight = CW_Register; 8299 } 8300 break; 8301 case 'w': 8302 if (type->isFloatingPointTy()) 8303 weight = CW_Register; 8304 break; 8305 } 8306 return weight; 8307} 8308 8309typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 8310RCPair 8311ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 8312 EVT VT) const { 8313 if (Constraint.size() == 1) { 8314 // GCC ARM Constraint Letters 8315 switch (Constraint[0]) { 8316 case 'l': // Low regs or general regs. 8317 if (Subtarget->isThumb()) 8318 return RCPair(0U, ARM::tGPRRegisterClass); 8319 else 8320 return RCPair(0U, ARM::GPRRegisterClass); 8321 case 'h': // High regs or no regs. 8322 if (Subtarget->isThumb()) 8323 return RCPair(0U, ARM::hGPRRegisterClass); 8324 break; 8325 case 'r': 8326 return RCPair(0U, ARM::GPRRegisterClass); 8327 case 'w': 8328 if (VT == MVT::f32) 8329 return RCPair(0U, ARM::SPRRegisterClass); 8330 if (VT.getSizeInBits() == 64) 8331 return RCPair(0U, ARM::DPRRegisterClass); 8332 if (VT.getSizeInBits() == 128) 8333 return RCPair(0U, ARM::QPRRegisterClass); 8334 break; 8335 case 'x': 8336 if (VT == MVT::f32) 8337 return RCPair(0U, ARM::SPR_8RegisterClass); 8338 if (VT.getSizeInBits() == 64) 8339 return RCPair(0U, ARM::DPR_8RegisterClass); 8340 if (VT.getSizeInBits() == 128) 8341 return RCPair(0U, ARM::QPR_8RegisterClass); 8342 break; 8343 case 't': 8344 if (VT == MVT::f32) 8345 return RCPair(0U, ARM::SPRRegisterClass); 8346 break; 8347 } 8348 } 8349 if (StringRef("{cc}").equals_lower(Constraint)) 8350 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 8351 8352 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 8353} 8354 8355/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 8356/// vector. If it is invalid, don't add anything to Ops. 8357void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 8358 std::string &Constraint, 8359 std::vector<SDValue>&Ops, 8360 SelectionDAG &DAG) const { 8361 SDValue Result(0, 0); 8362 8363 // Currently only support length 1 constraints. 8364 if (Constraint.length() != 1) return; 8365 8366 char ConstraintLetter = Constraint[0]; 8367 switch (ConstraintLetter) { 8368 default: break; 8369 case 'j': 8370 case 'I': case 'J': case 'K': case 'L': 8371 case 'M': case 'N': case 'O': 8372 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 8373 if (!C) 8374 return; 8375 8376 int64_t CVal64 = C->getSExtValue(); 8377 int CVal = (int) CVal64; 8378 // None of these constraints allow values larger than 32 bits. Check 8379 // that the value fits in an int. 8380 if (CVal != CVal64) 8381 return; 8382 8383 switch (ConstraintLetter) { 8384 case 'j': 8385 // Constant suitable for movw, must be between 0 and 8386 // 65535. 8387 if (Subtarget->hasV6T2Ops()) 8388 if (CVal >= 0 && CVal <= 65535) 8389 break; 8390 return; 8391 case 'I': 8392 if (Subtarget->isThumb1Only()) { 8393 // This must be a constant between 0 and 255, for ADD 8394 // immediates. 8395 if (CVal >= 0 && CVal <= 255) 8396 break; 8397 } else if (Subtarget->isThumb2()) { 8398 // A constant that can be used as an immediate value in a 8399 // data-processing instruction. 8400 if (ARM_AM::getT2SOImmVal(CVal) != -1) 8401 break; 8402 } else { 8403 // A constant that can be used as an immediate value in a 8404 // data-processing instruction. 8405 if (ARM_AM::getSOImmVal(CVal) != -1) 8406 break; 8407 } 8408 return; 8409 8410 case 'J': 8411 if (Subtarget->isThumb()) { // FIXME thumb2 8412 // This must be a constant between -255 and -1, for negated ADD 8413 // immediates. This can be used in GCC with an "n" modifier that 8414 // prints the negated value, for use with SUB instructions. It is 8415 // not useful otherwise but is implemented for compatibility. 8416 if (CVal >= -255 && CVal <= -1) 8417 break; 8418 } else { 8419 // This must be a constant between -4095 and 4095. It is not clear 8420 // what this constraint is intended for. Implemented for 8421 // compatibility with GCC. 8422 if (CVal >= -4095 && CVal <= 4095) 8423 break; 8424 } 8425 return; 8426 8427 case 'K': 8428 if (Subtarget->isThumb1Only()) { 8429 // A 32-bit value where only one byte has a nonzero value. Exclude 8430 // zero to match GCC. This constraint is used by GCC internally for 8431 // constants that can be loaded with a move/shift combination. 8432 // It is not useful otherwise but is implemented for compatibility. 8433 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 8434 break; 8435 } else if (Subtarget->isThumb2()) { 8436 // A constant whose bitwise inverse can be used as an immediate 8437 // value in a data-processing instruction. This can be used in GCC 8438 // with a "B" modifier that prints the inverted value, for use with 8439 // BIC and MVN instructions. It is not useful otherwise but is 8440 // implemented for compatibility. 8441 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 8442 break; 8443 } else { 8444 // A constant whose bitwise inverse can be used as an immediate 8445 // value in a data-processing instruction. This can be used in GCC 8446 // with a "B" modifier that prints the inverted value, for use with 8447 // BIC and MVN instructions. It is not useful otherwise but is 8448 // implemented for compatibility. 8449 if (ARM_AM::getSOImmVal(~CVal) != -1) 8450 break; 8451 } 8452 return; 8453 8454 case 'L': 8455 if (Subtarget->isThumb1Only()) { 8456 // This must be a constant between -7 and 7, 8457 // for 3-operand ADD/SUB immediate instructions. 8458 if (CVal >= -7 && CVal < 7) 8459 break; 8460 } else if (Subtarget->isThumb2()) { 8461 // A constant whose negation can be used as an immediate value in a 8462 // data-processing instruction. This can be used in GCC with an "n" 8463 // modifier that prints the negated value, for use with SUB 8464 // instructions. It is not useful otherwise but is implemented for 8465 // compatibility. 8466 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 8467 break; 8468 } else { 8469 // A constant whose negation can be used as an immediate value in a 8470 // data-processing instruction. This can be used in GCC with an "n" 8471 // modifier that prints the negated value, for use with SUB 8472 // instructions. It is not useful otherwise but is implemented for 8473 // compatibility. 8474 if (ARM_AM::getSOImmVal(-CVal) != -1) 8475 break; 8476 } 8477 return; 8478 8479 case 'M': 8480 if (Subtarget->isThumb()) { // FIXME thumb2 8481 // This must be a multiple of 4 between 0 and 1020, for 8482 // ADD sp + immediate. 8483 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 8484 break; 8485 } else { 8486 // A power of two or a constant between 0 and 32. This is used in 8487 // GCC for the shift amount on shifted register operands, but it is 8488 // useful in general for any shift amounts. 8489 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 8490 break; 8491 } 8492 return; 8493 8494 case 'N': 8495 if (Subtarget->isThumb()) { // FIXME thumb2 8496 // This must be a constant between 0 and 31, for shift amounts. 8497 if (CVal >= 0 && CVal <= 31) 8498 break; 8499 } 8500 return; 8501 8502 case 'O': 8503 if (Subtarget->isThumb()) { // FIXME thumb2 8504 // This must be a multiple of 4 between -508 and 508, for 8505 // ADD/SUB sp = sp + immediate. 8506 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 8507 break; 8508 } 8509 return; 8510 } 8511 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 8512 break; 8513 } 8514 8515 if (Result.getNode()) { 8516 Ops.push_back(Result); 8517 return; 8518 } 8519 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 8520} 8521 8522bool 8523ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 8524 // The ARM target isn't yet aware of offsets. 8525 return false; 8526} 8527 8528bool ARM::isBitFieldInvertedMask(unsigned v) { 8529 if (v == 0xffffffff) 8530 return 0; 8531 // there can be 1's on either or both "outsides", all the "inside" 8532 // bits must be 0's 8533 unsigned int lsb = 0, msb = 31; 8534 while (v & (1 << msb)) --msb; 8535 while (v & (1 << lsb)) ++lsb; 8536 for (unsigned int i = lsb; i <= msb; ++i) { 8537 if (v & (1 << i)) 8538 return 0; 8539 } 8540 return 1; 8541} 8542 8543/// isFPImmLegal - Returns true if the target can instruction select the 8544/// specified FP immediate natively. If false, the legalizer will 8545/// materialize the FP immediate as a load from a constant pool. 8546bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 8547 if (!Subtarget->hasVFP3()) 8548 return false; 8549 if (VT == MVT::f32) 8550 return ARM_AM::getFP32Imm(Imm) != -1; 8551 if (VT == MVT::f64) 8552 return ARM_AM::getFP64Imm(Imm) != -1; 8553 return false; 8554} 8555 8556/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 8557/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 8558/// specified in the intrinsic calls. 8559bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 8560 const CallInst &I, 8561 unsigned Intrinsic) const { 8562 switch (Intrinsic) { 8563 case Intrinsic::arm_neon_vld1: 8564 case Intrinsic::arm_neon_vld2: 8565 case Intrinsic::arm_neon_vld3: 8566 case Intrinsic::arm_neon_vld4: 8567 case Intrinsic::arm_neon_vld2lane: 8568 case Intrinsic::arm_neon_vld3lane: 8569 case Intrinsic::arm_neon_vld4lane: { 8570 Info.opc = ISD::INTRINSIC_W_CHAIN; 8571 // Conservatively set memVT to the entire set of vectors loaded. 8572 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 8573 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8574 Info.ptrVal = I.getArgOperand(0); 8575 Info.offset = 0; 8576 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8577 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8578 Info.vol = false; // volatile loads with NEON intrinsics not supported 8579 Info.readMem = true; 8580 Info.writeMem = false; 8581 return true; 8582 } 8583 case Intrinsic::arm_neon_vst1: 8584 case Intrinsic::arm_neon_vst2: 8585 case Intrinsic::arm_neon_vst3: 8586 case Intrinsic::arm_neon_vst4: 8587 case Intrinsic::arm_neon_vst2lane: 8588 case Intrinsic::arm_neon_vst3lane: 8589 case Intrinsic::arm_neon_vst4lane: { 8590 Info.opc = ISD::INTRINSIC_VOID; 8591 // Conservatively set memVT to the entire set of vectors stored. 8592 unsigned NumElts = 0; 8593 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 8594 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 8595 if (!ArgTy->isVectorTy()) 8596 break; 8597 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 8598 } 8599 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8600 Info.ptrVal = I.getArgOperand(0); 8601 Info.offset = 0; 8602 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8603 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8604 Info.vol = false; // volatile stores with NEON intrinsics not supported 8605 Info.readMem = false; 8606 Info.writeMem = true; 8607 return true; 8608 } 8609 case Intrinsic::arm_strexd: { 8610 Info.opc = ISD::INTRINSIC_W_CHAIN; 8611 Info.memVT = MVT::i64; 8612 Info.ptrVal = I.getArgOperand(2); 8613 Info.offset = 0; 8614 Info.align = 8; 8615 Info.vol = true; 8616 Info.readMem = false; 8617 Info.writeMem = true; 8618 return true; 8619 } 8620 case Intrinsic::arm_ldrexd: { 8621 Info.opc = ISD::INTRINSIC_W_CHAIN; 8622 Info.memVT = MVT::i64; 8623 Info.ptrVal = I.getArgOperand(0); 8624 Info.offset = 0; 8625 Info.align = 8; 8626 Info.vol = true; 8627 Info.readMem = true; 8628 Info.writeMem = false; 8629 return true; 8630 } 8631 default: 8632 break; 8633 } 8634 8635 return false; 8636} 8637