ARMISelLowering.cpp revision f31151f34ec37dd49bdf998e9e352d572f4a8e06
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMCallingConv.h" 18#include "ARMConstantPoolValue.h" 19#include "ARMISelLowering.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMRegisterInfo.h" 23#include "ARMSubtarget.h" 24#include "ARMTargetMachine.h" 25#include "ARMTargetObjectFile.h" 26#include "MCTargetDesc/ARMAddressingModes.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineModuleInfo.h" 42#include "llvm/CodeGen/MachineRegisterInfo.h" 43#include "llvm/CodeGen/PseudoSourceValue.h" 44#include "llvm/CodeGen/SelectionDAG.h" 45#include "llvm/MC/MCSectionMachO.h" 46#include "llvm/Target/TargetOptions.h" 47#include "llvm/ADT/VectorExtras.h" 48#include "llvm/ADT/StringExtras.h" 49#include "llvm/ADT/Statistic.h" 50#include "llvm/Support/CommandLine.h" 51#include "llvm/Support/ErrorHandling.h" 52#include "llvm/Support/MathExtras.h" 53#include "llvm/Support/raw_ostream.h" 54#include <sstream> 55using namespace llvm; 56 57STATISTIC(NumTailCalls, "Number of tail calls"); 58STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 59 60// This option should go away when tail calls fully work. 61static cl::opt<bool> 62EnableARMTailCalls("arm-tail-calls", cl::Hidden, 63 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 64 cl::init(false)); 65 66cl::opt<bool> 67EnableARMLongCalls("arm-long-calls", cl::Hidden, 68 cl::desc("Generate calls via indirect call instructions"), 69 cl::init(false)); 70 71static cl::opt<bool> 72ARMInterworking("arm-interworking", cl::Hidden, 73 cl::desc("Enable / disable ARM interworking (for debugging only)"), 74 cl::init(true)); 75 76namespace llvm { 77 class ARMCCState : public CCState { 78 public: 79 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 80 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 81 LLVMContext &C, ParmContext PC) 82 : CCState(CC, isVarArg, MF, TM, locs, C) { 83 assert(((PC == Call) || (PC == Prologue)) && 84 "ARMCCState users must specify whether their context is call" 85 "or prologue generation."); 86 CallOrPrologue = PC; 87 } 88 }; 89} 90 91// The APCS parameter registers. 92static const unsigned GPRArgRegs[] = { 93 ARM::R0, ARM::R1, ARM::R2, ARM::R3 94}; 95 96void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 97 EVT PromotedBitwiseVT) { 98 if (VT != PromotedLdStVT) { 99 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 100 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 101 PromotedLdStVT.getSimpleVT()); 102 103 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 104 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 105 PromotedLdStVT.getSimpleVT()); 106 } 107 108 EVT ElemTy = VT.getVectorElementType(); 109 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 110 setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom); 111 setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom); 112 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 113 if (ElemTy != MVT::i32) { 114 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 115 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 116 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 117 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 118 } 119 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 120 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 121 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 122 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 123 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 124 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 125 if (VT.isInteger()) { 126 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 127 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 128 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 129 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 130 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 131 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 132 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 133 setTruncStoreAction(VT.getSimpleVT(), 134 (MVT::SimpleValueType)InnerVT, Expand); 135 } 136 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 137 138 // Promote all bit-wise operations. 139 if (VT.isInteger() && VT != PromotedBitwiseVT) { 140 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 141 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 142 PromotedBitwiseVT.getSimpleVT()); 143 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 144 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 145 PromotedBitwiseVT.getSimpleVT()); 146 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 147 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 148 PromotedBitwiseVT.getSimpleVT()); 149 } 150 151 // Neon does not support vector divide/remainder operations. 152 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 153 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 154 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 155 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 156 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 157 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 158} 159 160void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 161 addRegisterClass(VT, ARM::DPRRegisterClass); 162 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 163} 164 165void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 166 addRegisterClass(VT, ARM::QPRRegisterClass); 167 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 168} 169 170static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 171 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 172 return new TargetLoweringObjectFileMachO(); 173 174 return new ARMElfTargetObjectFile(); 175} 176 177ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 178 : TargetLowering(TM, createTLOF(TM)) { 179 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 180 RegInfo = TM.getRegisterInfo(); 181 Itins = TM.getInstrItineraryData(); 182 183 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 184 185 if (Subtarget->isTargetDarwin()) { 186 // Uses VFP for Thumb libfuncs if available. 187 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 188 // Single-precision floating-point arithmetic. 189 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 190 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 191 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 192 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 193 194 // Double-precision floating-point arithmetic. 195 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 196 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 197 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 198 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 199 200 // Single-precision comparisons. 201 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 202 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 203 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 204 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 205 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 206 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 207 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 208 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 209 210 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 216 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 217 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 218 219 // Double-precision comparisons. 220 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 221 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 222 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 223 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 224 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 225 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 226 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 227 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 228 229 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 230 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 231 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 232 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 233 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 234 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 235 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 236 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 237 238 // Floating-point to integer conversions. 239 // i64 conversions are done via library routines even when generating VFP 240 // instructions, so use the same ones. 241 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 242 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 243 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 244 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 245 246 // Conversions between floating types. 247 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 248 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 249 250 // Integer to floating-point conversions. 251 // i64 conversions are done via library routines even when generating VFP 252 // instructions, so use the same ones. 253 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 254 // e.g., __floatunsidf vs. __floatunssidfvfp. 255 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 256 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 257 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 258 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 259 } 260 } 261 262 // These libcalls are not available in 32-bit. 263 setLibcallName(RTLIB::SHL_I128, 0); 264 setLibcallName(RTLIB::SRL_I128, 0); 265 setLibcallName(RTLIB::SRA_I128, 0); 266 267 if (Subtarget->isAAPCS_ABI()) { 268 // Double-precision floating-point arithmetic helper functions 269 // RTABI chapter 4.1.2, Table 2 270 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 271 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 272 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 273 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 274 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 277 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 278 279 // Double-precision floating-point comparison helper functions 280 // RTABI chapter 4.1.2, Table 3 281 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 282 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 283 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 284 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 285 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 286 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 287 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 288 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 289 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 290 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 291 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 292 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 293 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 294 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 295 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 296 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 297 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 298 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 299 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 300 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 301 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 302 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 303 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 304 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 305 306 // Single-precision floating-point arithmetic helper functions 307 // RTABI chapter 4.1.2, Table 4 308 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 309 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 310 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 311 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 312 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 315 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 316 317 // Single-precision floating-point comparison helper functions 318 // RTABI chapter 4.1.2, Table 5 319 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 320 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 321 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 322 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 323 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 324 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 325 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 326 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 327 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 328 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 329 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 330 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 331 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 332 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 333 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 334 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 335 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 342 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 343 344 // Floating-point to integer conversions. 345 // RTABI chapter 4.1.2, Table 6 346 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 347 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 348 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 349 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 350 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 351 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 352 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 353 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 354 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 361 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 362 363 // Conversions between floating types. 364 // RTABI chapter 4.1.2, Table 7 365 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 366 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 367 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 368 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 369 370 // Integer to floating-point conversions. 371 // RTABI chapter 4.1.2, Table 8 372 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 373 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 374 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 375 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 376 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 377 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 378 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 379 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 380 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 384 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 385 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 387 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 388 389 // Long long helper functions 390 // RTABI chapter 4.2, Table 9 391 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 392 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 393 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 394 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 395 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 396 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 397 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 399 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 400 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 401 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 402 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 403 404 // Integer division functions 405 // RTABI chapter 4.3.1 406 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 407 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 408 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 409 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 410 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 411 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 412 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 413 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 414 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 415 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 416 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 417 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 418 419 // Memory operations 420 // RTABI chapter 4.3.4 421 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 422 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 423 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 424 } 425 426 // Use divmod compiler-rt calls for iOS 5.0 and later. 427 if (Subtarget->getTargetTriple().getOS() == Triple::IOS && 428 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 429 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 430 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 431 } 432 433 if (Subtarget->isThumb1Only()) 434 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 435 else 436 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 437 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 438 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 439 if (!Subtarget->isFPOnlySP()) 440 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 441 442 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 443 } 444 445 if (Subtarget->hasNEON()) { 446 addDRTypeForNEON(MVT::v2f32); 447 addDRTypeForNEON(MVT::v8i8); 448 addDRTypeForNEON(MVT::v4i16); 449 addDRTypeForNEON(MVT::v2i32); 450 addDRTypeForNEON(MVT::v1i64); 451 452 addQRTypeForNEON(MVT::v4f32); 453 addQRTypeForNEON(MVT::v2f64); 454 addQRTypeForNEON(MVT::v16i8); 455 addQRTypeForNEON(MVT::v8i16); 456 addQRTypeForNEON(MVT::v4i32); 457 addQRTypeForNEON(MVT::v2i64); 458 459 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 460 // neither Neon nor VFP support any arithmetic operations on it. 461 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 462 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 463 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 464 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 465 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 466 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 467 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 468 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 469 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 470 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 471 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 472 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 473 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 474 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 475 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 476 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 477 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 478 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 479 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 480 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 481 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 482 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 483 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 484 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 485 486 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 487 488 // Neon does not support some operations on v1i64 and v2i64 types. 489 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 490 // Custom handling for some quad-vector types to detect VMULL. 491 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 492 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 493 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 494 // Custom handling for some vector types to avoid expensive expansions 495 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 496 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 497 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 498 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 499 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 500 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 501 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 502 // a destination type that is wider than the source. 503 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 504 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 505 506 setTargetDAGCombine(ISD::INTRINSIC_VOID); 507 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 508 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 509 setTargetDAGCombine(ISD::SHL); 510 setTargetDAGCombine(ISD::SRL); 511 setTargetDAGCombine(ISD::SRA); 512 setTargetDAGCombine(ISD::SIGN_EXTEND); 513 setTargetDAGCombine(ISD::ZERO_EXTEND); 514 setTargetDAGCombine(ISD::ANY_EXTEND); 515 setTargetDAGCombine(ISD::SELECT_CC); 516 setTargetDAGCombine(ISD::BUILD_VECTOR); 517 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 518 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 519 setTargetDAGCombine(ISD::STORE); 520 setTargetDAGCombine(ISD::FP_TO_SINT); 521 setTargetDAGCombine(ISD::FP_TO_UINT); 522 setTargetDAGCombine(ISD::FDIV); 523 524 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand); 525 } 526 527 computeRegisterProperties(); 528 529 // ARM does not have f32 extending load. 530 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 531 532 // ARM does not have i1 sign extending load. 533 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 534 535 // ARM supports all 4 flavors of integer indexed load / store. 536 if (!Subtarget->isThumb1Only()) { 537 for (unsigned im = (unsigned)ISD::PRE_INC; 538 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 539 setIndexedLoadAction(im, MVT::i1, Legal); 540 setIndexedLoadAction(im, MVT::i8, Legal); 541 setIndexedLoadAction(im, MVT::i16, Legal); 542 setIndexedLoadAction(im, MVT::i32, Legal); 543 setIndexedStoreAction(im, MVT::i1, Legal); 544 setIndexedStoreAction(im, MVT::i8, Legal); 545 setIndexedStoreAction(im, MVT::i16, Legal); 546 setIndexedStoreAction(im, MVT::i32, Legal); 547 } 548 } 549 550 // i64 operation support. 551 setOperationAction(ISD::MUL, MVT::i64, Expand); 552 setOperationAction(ISD::MULHU, MVT::i32, Expand); 553 if (Subtarget->isThumb1Only()) { 554 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 555 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 556 } 557 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 558 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 559 setOperationAction(ISD::MULHS, MVT::i32, Expand); 560 561 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 562 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 563 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 564 setOperationAction(ISD::SRL, MVT::i64, Custom); 565 setOperationAction(ISD::SRA, MVT::i64, Custom); 566 567 if (!Subtarget->isThumb1Only()) { 568 // FIXME: We should do this for Thumb1 as well. 569 setOperationAction(ISD::ADDC, MVT::i32, Custom); 570 setOperationAction(ISD::ADDE, MVT::i32, Custom); 571 setOperationAction(ISD::SUBC, MVT::i32, Custom); 572 setOperationAction(ISD::SUBE, MVT::i32, Custom); 573 } 574 575 // ARM does not have ROTL. 576 setOperationAction(ISD::ROTL, MVT::i32, Expand); 577 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 578 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 579 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 580 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 581 582 // Only ARMv6 has BSWAP. 583 if (!Subtarget->hasV6Ops()) 584 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 585 586 // These are expanded into libcalls. 587 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 588 // v7M has a hardware divider 589 setOperationAction(ISD::SDIV, MVT::i32, Expand); 590 setOperationAction(ISD::UDIV, MVT::i32, Expand); 591 } 592 setOperationAction(ISD::SREM, MVT::i32, Expand); 593 setOperationAction(ISD::UREM, MVT::i32, Expand); 594 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 595 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 596 597 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 598 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 599 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 600 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 601 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 602 603 setOperationAction(ISD::TRAP, MVT::Other, Legal); 604 605 // Use the default implementation. 606 setOperationAction(ISD::VASTART, MVT::Other, Custom); 607 setOperationAction(ISD::VAARG, MVT::Other, Expand); 608 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 609 setOperationAction(ISD::VAEND, MVT::Other, Expand); 610 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 611 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 612 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 613 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 614 setExceptionPointerRegister(ARM::R0); 615 setExceptionSelectorRegister(ARM::R1); 616 617 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 618 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 619 // the default expansion. 620 // FIXME: This should be checking for v6k, not just v6. 621 if (Subtarget->hasDataBarrier() || 622 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 623 // membarrier needs custom lowering; the rest are legal and handled 624 // normally. 625 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 626 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 627 // Custom lowering for 64-bit ops 628 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 629 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 630 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 631 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 632 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 633 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 634 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 635 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 636 setInsertFencesForAtomic(true); 637 } else { 638 // Set them all for expansion, which will force libcalls. 639 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 640 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 641 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 642 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 643 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 644 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 645 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 646 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 647 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 648 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 649 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 650 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 651 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 652 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 653 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 654 // Unordered/Monotonic case. 655 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 656 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 657 // Since the libcalls include locking, fold in the fences 658 setShouldFoldAtomicFences(true); 659 } 660 661 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 662 663 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 664 if (!Subtarget->hasV6Ops()) { 665 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 666 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 667 } 668 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 669 670 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 671 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 672 // iff target supports vfp2. 673 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 674 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 675 } 676 677 // We want to custom lower some of our intrinsics. 678 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 679 if (Subtarget->isTargetDarwin()) { 680 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 681 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 682 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 683 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 684 } 685 686 setOperationAction(ISD::SETCC, MVT::i32, Expand); 687 setOperationAction(ISD::SETCC, MVT::f32, Expand); 688 setOperationAction(ISD::SETCC, MVT::f64, Expand); 689 setOperationAction(ISD::SELECT, MVT::i32, Custom); 690 setOperationAction(ISD::SELECT, MVT::f32, Custom); 691 setOperationAction(ISD::SELECT, MVT::f64, Custom); 692 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 693 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 694 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 695 696 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 697 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 698 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 699 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 700 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 701 702 // We don't support sin/cos/fmod/copysign/pow 703 setOperationAction(ISD::FSIN, MVT::f64, Expand); 704 setOperationAction(ISD::FSIN, MVT::f32, Expand); 705 setOperationAction(ISD::FCOS, MVT::f32, Expand); 706 setOperationAction(ISD::FCOS, MVT::f64, Expand); 707 setOperationAction(ISD::FREM, MVT::f64, Expand); 708 setOperationAction(ISD::FREM, MVT::f32, Expand); 709 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 710 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 711 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 712 } 713 setOperationAction(ISD::FPOW, MVT::f64, Expand); 714 setOperationAction(ISD::FPOW, MVT::f32, Expand); 715 716 setOperationAction(ISD::FMA, MVT::f64, Expand); 717 setOperationAction(ISD::FMA, MVT::f32, Expand); 718 719 // Various VFP goodness 720 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 721 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 722 if (Subtarget->hasVFP2()) { 723 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 724 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 725 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 726 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 727 } 728 // Special handling for half-precision FP. 729 if (!Subtarget->hasFP16()) { 730 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 731 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 732 } 733 } 734 735 // We have target-specific dag combine patterns for the following nodes: 736 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 737 setTargetDAGCombine(ISD::ADD); 738 setTargetDAGCombine(ISD::SUB); 739 setTargetDAGCombine(ISD::MUL); 740 741 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 742 setTargetDAGCombine(ISD::OR); 743 if (Subtarget->hasNEON()) 744 setTargetDAGCombine(ISD::AND); 745 746 setStackPointerRegisterToSaveRestore(ARM::SP); 747 748 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 749 setSchedulingPreference(Sched::RegPressure); 750 else 751 setSchedulingPreference(Sched::Hybrid); 752 753 //// temporary - rewrite interface to use type 754 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 755 756 // On ARM arguments smaller than 4 bytes are extended, so all arguments 757 // are at least 4 bytes aligned. 758 setMinStackArgumentAlignment(4); 759 760 benefitFromCodePlacementOpt = true; 761 762 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 763} 764 765// FIXME: It might make sense to define the representative register class as the 766// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 767// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 768// SPR's representative would be DPR_VFP2. This should work well if register 769// pressure tracking were modified such that a register use would increment the 770// pressure of the register class's representative and all of it's super 771// classes' representatives transitively. We have not implemented this because 772// of the difficulty prior to coalescing of modeling operand register classes 773// due to the common occurrence of cross class copies and subregister insertions 774// and extractions. 775std::pair<const TargetRegisterClass*, uint8_t> 776ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 777 const TargetRegisterClass *RRC = 0; 778 uint8_t Cost = 1; 779 switch (VT.getSimpleVT().SimpleTy) { 780 default: 781 return TargetLowering::findRepresentativeClass(VT); 782 // Use DPR as representative register class for all floating point 783 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 784 // the cost is 1 for both f32 and f64. 785 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 786 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 787 RRC = ARM::DPRRegisterClass; 788 // When NEON is used for SP, only half of the register file is available 789 // because operations that define both SP and DP results will be constrained 790 // to the VFP2 class (D0-D15). We currently model this constraint prior to 791 // coalescing by double-counting the SP regs. See the FIXME above. 792 if (Subtarget->useNEONForSinglePrecisionFP()) 793 Cost = 2; 794 break; 795 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 796 case MVT::v4f32: case MVT::v2f64: 797 RRC = ARM::DPRRegisterClass; 798 Cost = 2; 799 break; 800 case MVT::v4i64: 801 RRC = ARM::DPRRegisterClass; 802 Cost = 4; 803 break; 804 case MVT::v8i64: 805 RRC = ARM::DPRRegisterClass; 806 Cost = 8; 807 break; 808 } 809 return std::make_pair(RRC, Cost); 810} 811 812const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 813 switch (Opcode) { 814 default: return 0; 815 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 816 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 817 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 818 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 819 case ARMISD::CALL: return "ARMISD::CALL"; 820 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 821 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 822 case ARMISD::tCALL: return "ARMISD::tCALL"; 823 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 824 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 825 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 826 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 827 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 828 case ARMISD::CMP: return "ARMISD::CMP"; 829 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 830 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 831 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 832 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 833 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 834 case ARMISD::CMOV: return "ARMISD::CMOV"; 835 836 case ARMISD::RBIT: return "ARMISD::RBIT"; 837 838 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 839 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 840 case ARMISD::SITOF: return "ARMISD::SITOF"; 841 case ARMISD::UITOF: return "ARMISD::UITOF"; 842 843 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 844 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 845 case ARMISD::RRX: return "ARMISD::RRX"; 846 847 case ARMISD::ADDC: return "ARMISD::ADDC"; 848 case ARMISD::ADDE: return "ARMISD::ADDE"; 849 case ARMISD::SUBC: return "ARMISD::SUBC"; 850 case ARMISD::SUBE: return "ARMISD::SUBE"; 851 852 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 853 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 854 855 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 856 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 857 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 858 859 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 860 861 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 862 863 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 864 865 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 866 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 867 868 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 869 870 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 871 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 872 case ARMISD::VCGE: return "ARMISD::VCGE"; 873 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 874 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 875 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 876 case ARMISD::VCGT: return "ARMISD::VCGT"; 877 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 878 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 879 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 880 case ARMISD::VTST: return "ARMISD::VTST"; 881 882 case ARMISD::VSHL: return "ARMISD::VSHL"; 883 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 884 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 885 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 886 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 887 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 888 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 889 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 890 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 891 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 892 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 893 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 894 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 895 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 896 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 897 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 898 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 899 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 900 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 901 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 902 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 903 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 904 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 905 case ARMISD::VDUP: return "ARMISD::VDUP"; 906 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 907 case ARMISD::VEXT: return "ARMISD::VEXT"; 908 case ARMISD::VREV64: return "ARMISD::VREV64"; 909 case ARMISD::VREV32: return "ARMISD::VREV32"; 910 case ARMISD::VREV16: return "ARMISD::VREV16"; 911 case ARMISD::VZIP: return "ARMISD::VZIP"; 912 case ARMISD::VUZP: return "ARMISD::VUZP"; 913 case ARMISD::VTRN: return "ARMISD::VTRN"; 914 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 915 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 916 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 917 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 918 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 919 case ARMISD::FMAX: return "ARMISD::FMAX"; 920 case ARMISD::FMIN: return "ARMISD::FMIN"; 921 case ARMISD::BFI: return "ARMISD::BFI"; 922 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 923 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 924 case ARMISD::VBSL: return "ARMISD::VBSL"; 925 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 926 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 927 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 928 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 929 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 930 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 931 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 932 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 933 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 934 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 935 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 936 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 937 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 938 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 939 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 940 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 941 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 942 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 943 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 944 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 945 } 946} 947 948EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 949 if (!VT.isVector()) return getPointerTy(); 950 return VT.changeVectorElementTypeToInteger(); 951} 952 953/// getRegClassFor - Return the register class that should be used for the 954/// specified value type. 955TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 956 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 957 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 958 // load / store 4 to 8 consecutive D registers. 959 if (Subtarget->hasNEON()) { 960 if (VT == MVT::v4i64) 961 return ARM::QQPRRegisterClass; 962 else if (VT == MVT::v8i64) 963 return ARM::QQQQPRRegisterClass; 964 } 965 return TargetLowering::getRegClassFor(VT); 966} 967 968// Create a fast isel object. 969FastISel * 970ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 971 return ARM::createFastISel(funcInfo); 972} 973 974/// getMaximalGlobalOffset - Returns the maximal possible offset which can 975/// be used for loads / stores from the global. 976unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 977 return (Subtarget->isThumb1Only() ? 127 : 4095); 978} 979 980Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 981 unsigned NumVals = N->getNumValues(); 982 if (!NumVals) 983 return Sched::RegPressure; 984 985 for (unsigned i = 0; i != NumVals; ++i) { 986 EVT VT = N->getValueType(i); 987 if (VT == MVT::Glue || VT == MVT::Other) 988 continue; 989 if (VT.isFloatingPoint() || VT.isVector()) 990 return Sched::ILP; 991 } 992 993 if (!N->isMachineOpcode()) 994 return Sched::RegPressure; 995 996 // Load are scheduled for latency even if there instruction itinerary 997 // is not available. 998 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 999 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1000 1001 if (MCID.getNumDefs() == 0) 1002 return Sched::RegPressure; 1003 if (!Itins->isEmpty() && 1004 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1005 return Sched::ILP; 1006 1007 return Sched::RegPressure; 1008} 1009 1010//===----------------------------------------------------------------------===// 1011// Lowering Code 1012//===----------------------------------------------------------------------===// 1013 1014/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1015static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1016 switch (CC) { 1017 default: llvm_unreachable("Unknown condition code!"); 1018 case ISD::SETNE: return ARMCC::NE; 1019 case ISD::SETEQ: return ARMCC::EQ; 1020 case ISD::SETGT: return ARMCC::GT; 1021 case ISD::SETGE: return ARMCC::GE; 1022 case ISD::SETLT: return ARMCC::LT; 1023 case ISD::SETLE: return ARMCC::LE; 1024 case ISD::SETUGT: return ARMCC::HI; 1025 case ISD::SETUGE: return ARMCC::HS; 1026 case ISD::SETULT: return ARMCC::LO; 1027 case ISD::SETULE: return ARMCC::LS; 1028 } 1029} 1030 1031/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1032static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1033 ARMCC::CondCodes &CondCode2) { 1034 CondCode2 = ARMCC::AL; 1035 switch (CC) { 1036 default: llvm_unreachable("Unknown FP condition!"); 1037 case ISD::SETEQ: 1038 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1039 case ISD::SETGT: 1040 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1041 case ISD::SETGE: 1042 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1043 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1044 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1045 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1046 case ISD::SETO: CondCode = ARMCC::VC; break; 1047 case ISD::SETUO: CondCode = ARMCC::VS; break; 1048 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1049 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1050 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1051 case ISD::SETLT: 1052 case ISD::SETULT: CondCode = ARMCC::LT; break; 1053 case ISD::SETLE: 1054 case ISD::SETULE: CondCode = ARMCC::LE; break; 1055 case ISD::SETNE: 1056 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1057 } 1058} 1059 1060//===----------------------------------------------------------------------===// 1061// Calling Convention Implementation 1062//===----------------------------------------------------------------------===// 1063 1064#include "ARMGenCallingConv.inc" 1065 1066/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1067/// given CallingConvention value. 1068CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1069 bool Return, 1070 bool isVarArg) const { 1071 switch (CC) { 1072 default: 1073 llvm_unreachable("Unsupported calling convention"); 1074 case CallingConv::Fast: 1075 if (Subtarget->hasVFP2() && !isVarArg) { 1076 if (!Subtarget->isAAPCS_ABI()) 1077 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1078 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1079 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1080 } 1081 // Fallthrough 1082 case CallingConv::C: { 1083 // Use target triple & subtarget features to do actual dispatch. 1084 if (!Subtarget->isAAPCS_ABI()) 1085 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1086 else if (Subtarget->hasVFP2() && 1087 FloatABIType == FloatABI::Hard && !isVarArg) 1088 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1089 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1090 } 1091 case CallingConv::ARM_AAPCS_VFP: 1092 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1093 case CallingConv::ARM_AAPCS: 1094 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1095 case CallingConv::ARM_APCS: 1096 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1097 } 1098} 1099 1100/// LowerCallResult - Lower the result values of a call into the 1101/// appropriate copies out of appropriate physical registers. 1102SDValue 1103ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1104 CallingConv::ID CallConv, bool isVarArg, 1105 const SmallVectorImpl<ISD::InputArg> &Ins, 1106 DebugLoc dl, SelectionDAG &DAG, 1107 SmallVectorImpl<SDValue> &InVals) const { 1108 1109 // Assign locations to each value returned by this call. 1110 SmallVector<CCValAssign, 16> RVLocs; 1111 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1112 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1113 CCInfo.AnalyzeCallResult(Ins, 1114 CCAssignFnForNode(CallConv, /* Return*/ true, 1115 isVarArg)); 1116 1117 // Copy all of the result registers out of their specified physreg. 1118 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1119 CCValAssign VA = RVLocs[i]; 1120 1121 SDValue Val; 1122 if (VA.needsCustom()) { 1123 // Handle f64 or half of a v2f64. 1124 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1125 InFlag); 1126 Chain = Lo.getValue(1); 1127 InFlag = Lo.getValue(2); 1128 VA = RVLocs[++i]; // skip ahead to next loc 1129 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1130 InFlag); 1131 Chain = Hi.getValue(1); 1132 InFlag = Hi.getValue(2); 1133 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1134 1135 if (VA.getLocVT() == MVT::v2f64) { 1136 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1137 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1138 DAG.getConstant(0, MVT::i32)); 1139 1140 VA = RVLocs[++i]; // skip ahead to next loc 1141 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1142 Chain = Lo.getValue(1); 1143 InFlag = Lo.getValue(2); 1144 VA = RVLocs[++i]; // skip ahead to next loc 1145 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1146 Chain = Hi.getValue(1); 1147 InFlag = Hi.getValue(2); 1148 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1149 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1150 DAG.getConstant(1, MVT::i32)); 1151 } 1152 } else { 1153 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1154 InFlag); 1155 Chain = Val.getValue(1); 1156 InFlag = Val.getValue(2); 1157 } 1158 1159 switch (VA.getLocInfo()) { 1160 default: llvm_unreachable("Unknown loc info!"); 1161 case CCValAssign::Full: break; 1162 case CCValAssign::BCvt: 1163 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1164 break; 1165 } 1166 1167 InVals.push_back(Val); 1168 } 1169 1170 return Chain; 1171} 1172 1173/// LowerMemOpCallTo - Store the argument to the stack. 1174SDValue 1175ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1176 SDValue StackPtr, SDValue Arg, 1177 DebugLoc dl, SelectionDAG &DAG, 1178 const CCValAssign &VA, 1179 ISD::ArgFlagsTy Flags) const { 1180 unsigned LocMemOffset = VA.getLocMemOffset(); 1181 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1182 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1183 return DAG.getStore(Chain, dl, Arg, PtrOff, 1184 MachinePointerInfo::getStack(LocMemOffset), 1185 false, false, 0); 1186} 1187 1188void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1189 SDValue Chain, SDValue &Arg, 1190 RegsToPassVector &RegsToPass, 1191 CCValAssign &VA, CCValAssign &NextVA, 1192 SDValue &StackPtr, 1193 SmallVector<SDValue, 8> &MemOpChains, 1194 ISD::ArgFlagsTy Flags) const { 1195 1196 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1197 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1198 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1199 1200 if (NextVA.isRegLoc()) 1201 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1202 else { 1203 assert(NextVA.isMemLoc()); 1204 if (StackPtr.getNode() == 0) 1205 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1206 1207 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1208 dl, DAG, NextVA, 1209 Flags)); 1210 } 1211} 1212 1213/// LowerCall - Lowering a call into a callseq_start <- 1214/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1215/// nodes. 1216SDValue 1217ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1218 CallingConv::ID CallConv, bool isVarArg, 1219 bool &isTailCall, 1220 const SmallVectorImpl<ISD::OutputArg> &Outs, 1221 const SmallVectorImpl<SDValue> &OutVals, 1222 const SmallVectorImpl<ISD::InputArg> &Ins, 1223 DebugLoc dl, SelectionDAG &DAG, 1224 SmallVectorImpl<SDValue> &InVals) const { 1225 MachineFunction &MF = DAG.getMachineFunction(); 1226 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1227 bool IsSibCall = false; 1228 // Disable tail calls if they're not supported. 1229 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 1230 isTailCall = false; 1231 if (isTailCall) { 1232 // Check if it's really possible to do a tail call. 1233 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1234 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1235 Outs, OutVals, Ins, DAG); 1236 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1237 // detected sibcalls. 1238 if (isTailCall) { 1239 ++NumTailCalls; 1240 IsSibCall = true; 1241 } 1242 } 1243 1244 // Analyze operands of the call, assigning locations to each operand. 1245 SmallVector<CCValAssign, 16> ArgLocs; 1246 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1247 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1248 CCInfo.AnalyzeCallOperands(Outs, 1249 CCAssignFnForNode(CallConv, /* Return*/ false, 1250 isVarArg)); 1251 1252 // Get a count of how many bytes are to be pushed on the stack. 1253 unsigned NumBytes = CCInfo.getNextStackOffset(); 1254 1255 // For tail calls, memory operands are available in our caller's stack. 1256 if (IsSibCall) 1257 NumBytes = 0; 1258 1259 // Adjust the stack pointer for the new arguments... 1260 // These operations are automatically eliminated by the prolog/epilog pass 1261 if (!IsSibCall) 1262 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1263 1264 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1265 1266 RegsToPassVector RegsToPass; 1267 SmallVector<SDValue, 8> MemOpChains; 1268 1269 // Walk the register/memloc assignments, inserting copies/loads. In the case 1270 // of tail call optimization, arguments are handled later. 1271 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1272 i != e; 1273 ++i, ++realArgIdx) { 1274 CCValAssign &VA = ArgLocs[i]; 1275 SDValue Arg = OutVals[realArgIdx]; 1276 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1277 bool isByVal = Flags.isByVal(); 1278 1279 // Promote the value if needed. 1280 switch (VA.getLocInfo()) { 1281 default: llvm_unreachable("Unknown loc info!"); 1282 case CCValAssign::Full: break; 1283 case CCValAssign::SExt: 1284 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1285 break; 1286 case CCValAssign::ZExt: 1287 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1288 break; 1289 case CCValAssign::AExt: 1290 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1291 break; 1292 case CCValAssign::BCvt: 1293 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1294 break; 1295 } 1296 1297 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1298 if (VA.needsCustom()) { 1299 if (VA.getLocVT() == MVT::v2f64) { 1300 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1301 DAG.getConstant(0, MVT::i32)); 1302 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1303 DAG.getConstant(1, MVT::i32)); 1304 1305 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1306 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1307 1308 VA = ArgLocs[++i]; // skip ahead to next loc 1309 if (VA.isRegLoc()) { 1310 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1311 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1312 } else { 1313 assert(VA.isMemLoc()); 1314 1315 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1316 dl, DAG, VA, Flags)); 1317 } 1318 } else { 1319 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1320 StackPtr, MemOpChains, Flags); 1321 } 1322 } else if (VA.isRegLoc()) { 1323 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1324 } else if (isByVal) { 1325 assert(VA.isMemLoc()); 1326 unsigned offset = 0; 1327 1328 // True if this byval aggregate will be split between registers 1329 // and memory. 1330 if (CCInfo.isFirstByValRegValid()) { 1331 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1332 unsigned int i, j; 1333 for (i = 0, j = CCInfo.getFirstByValReg(); j < ARM::R4; i++, j++) { 1334 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1335 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1336 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1337 MachinePointerInfo(), 1338 false, false, 0); 1339 MemOpChains.push_back(Load.getValue(1)); 1340 RegsToPass.push_back(std::make_pair(j, Load)); 1341 } 1342 offset = ARM::R4 - CCInfo.getFirstByValReg(); 1343 CCInfo.clearFirstByValReg(); 1344 } 1345 1346 unsigned LocMemOffset = VA.getLocMemOffset(); 1347 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1348 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1349 StkPtrOff); 1350 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1351 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1352 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1353 MVT::i32); 1354 // TODO: Disable AlwaysInline when it becomes possible 1355 // to emit a nested call sequence. 1356 MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, 1357 Flags.getByValAlign(), 1358 /*isVolatile=*/false, 1359 /*AlwaysInline=*/true, 1360 MachinePointerInfo(0), 1361 MachinePointerInfo(0))); 1362 1363 } else if (!IsSibCall) { 1364 assert(VA.isMemLoc()); 1365 1366 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1367 dl, DAG, VA, Flags)); 1368 } 1369 } 1370 1371 if (!MemOpChains.empty()) 1372 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1373 &MemOpChains[0], MemOpChains.size()); 1374 1375 // Build a sequence of copy-to-reg nodes chained together with token chain 1376 // and flag operands which copy the outgoing args into the appropriate regs. 1377 SDValue InFlag; 1378 // Tail call byval lowering might overwrite argument registers so in case of 1379 // tail call optimization the copies to registers are lowered later. 1380 if (!isTailCall) 1381 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1382 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1383 RegsToPass[i].second, InFlag); 1384 InFlag = Chain.getValue(1); 1385 } 1386 1387 // For tail calls lower the arguments to the 'real' stack slot. 1388 if (isTailCall) { 1389 // Force all the incoming stack arguments to be loaded from the stack 1390 // before any new outgoing arguments are stored to the stack, because the 1391 // outgoing stack slots may alias the incoming argument stack slots, and 1392 // the alias isn't otherwise explicit. This is slightly more conservative 1393 // than necessary, because it means that each store effectively depends 1394 // on every argument instead of just those arguments it would clobber. 1395 1396 // Do not flag preceding copytoreg stuff together with the following stuff. 1397 InFlag = SDValue(); 1398 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1399 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1400 RegsToPass[i].second, InFlag); 1401 InFlag = Chain.getValue(1); 1402 } 1403 InFlag =SDValue(); 1404 } 1405 1406 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1407 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1408 // node so that legalize doesn't hack it. 1409 bool isDirect = false; 1410 bool isARMFunc = false; 1411 bool isLocalARMFunc = false; 1412 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1413 1414 if (EnableARMLongCalls) { 1415 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1416 && "long-calls with non-static relocation model!"); 1417 // Handle a global address or an external symbol. If it's not one of 1418 // those, the target's already in a register, so we don't need to do 1419 // anything extra. 1420 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1421 const GlobalValue *GV = G->getGlobal(); 1422 // Create a constant pool entry for the callee address 1423 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1424 ARMConstantPoolValue *CPV = 1425 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1426 1427 // Get the address of the callee into a register 1428 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1429 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1430 Callee = DAG.getLoad(getPointerTy(), dl, 1431 DAG.getEntryNode(), CPAddr, 1432 MachinePointerInfo::getConstantPool(), 1433 false, false, 0); 1434 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1435 const char *Sym = S->getSymbol(); 1436 1437 // Create a constant pool entry for the callee address 1438 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1439 ARMConstantPoolValue *CPV = 1440 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1441 ARMPCLabelIndex, 0); 1442 // Get the address of the callee into a register 1443 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1444 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1445 Callee = DAG.getLoad(getPointerTy(), dl, 1446 DAG.getEntryNode(), CPAddr, 1447 MachinePointerInfo::getConstantPool(), 1448 false, false, 0); 1449 } 1450 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1451 const GlobalValue *GV = G->getGlobal(); 1452 isDirect = true; 1453 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1454 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1455 getTargetMachine().getRelocationModel() != Reloc::Static; 1456 isARMFunc = !Subtarget->isThumb() || isStub; 1457 // ARM call to a local ARM function is predicable. 1458 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1459 // tBX takes a register source operand. 1460 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1461 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1462 ARMConstantPoolValue *CPV = 1463 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1464 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1465 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1466 Callee = DAG.getLoad(getPointerTy(), dl, 1467 DAG.getEntryNode(), CPAddr, 1468 MachinePointerInfo::getConstantPool(), 1469 false, false, 0); 1470 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1471 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1472 getPointerTy(), Callee, PICLabel); 1473 } else { 1474 // On ELF targets for PIC code, direct calls should go through the PLT 1475 unsigned OpFlags = 0; 1476 if (Subtarget->isTargetELF() && 1477 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1478 OpFlags = ARMII::MO_PLT; 1479 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1480 } 1481 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1482 isDirect = true; 1483 bool isStub = Subtarget->isTargetDarwin() && 1484 getTargetMachine().getRelocationModel() != Reloc::Static; 1485 isARMFunc = !Subtarget->isThumb() || isStub; 1486 // tBX takes a register source operand. 1487 const char *Sym = S->getSymbol(); 1488 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1489 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1490 ARMConstantPoolValue *CPV = 1491 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1492 ARMPCLabelIndex, 4); 1493 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1494 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1495 Callee = DAG.getLoad(getPointerTy(), dl, 1496 DAG.getEntryNode(), CPAddr, 1497 MachinePointerInfo::getConstantPool(), 1498 false, false, 0); 1499 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1500 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1501 getPointerTy(), Callee, PICLabel); 1502 } else { 1503 unsigned OpFlags = 0; 1504 // On ELF targets for PIC code, direct calls should go through the PLT 1505 if (Subtarget->isTargetELF() && 1506 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1507 OpFlags = ARMII::MO_PLT; 1508 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1509 } 1510 } 1511 1512 // FIXME: handle tail calls differently. 1513 unsigned CallOpc; 1514 if (Subtarget->isThumb()) { 1515 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1516 CallOpc = ARMISD::CALL_NOLINK; 1517 else 1518 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1519 } else { 1520 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1521 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1522 : ARMISD::CALL_NOLINK; 1523 } 1524 1525 std::vector<SDValue> Ops; 1526 Ops.push_back(Chain); 1527 Ops.push_back(Callee); 1528 1529 // Add argument registers to the end of the list so that they are known live 1530 // into the call. 1531 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1532 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1533 RegsToPass[i].second.getValueType())); 1534 1535 if (InFlag.getNode()) 1536 Ops.push_back(InFlag); 1537 1538 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1539 if (isTailCall) 1540 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1541 1542 // Returns a chain and a flag for retval copy to use. 1543 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1544 InFlag = Chain.getValue(1); 1545 1546 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1547 DAG.getIntPtrConstant(0, true), InFlag); 1548 if (!Ins.empty()) 1549 InFlag = Chain.getValue(1); 1550 1551 // Handle result values, copying them out of physregs into vregs that we 1552 // return. 1553 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1554 dl, DAG, InVals); 1555} 1556 1557/// HandleByVal - Every parameter *after* a byval parameter is passed 1558/// on the stack. Remember the next parameter register to allocate, 1559/// and then confiscate the rest of the parameter registers to insure 1560/// this. 1561void 1562llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { 1563 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1564 assert((State->getCallOrPrologue() == Prologue || 1565 State->getCallOrPrologue() == Call) && 1566 "unhandled ParmContext"); 1567 if ((!State->isFirstByValRegValid()) && 1568 (ARM::R0 <= reg) && (reg <= ARM::R3)) { 1569 State->setFirstByValReg(reg); 1570 // At a call site, a byval parameter that is split between 1571 // registers and memory needs its size truncated here. In a 1572 // function prologue, such byval parameters are reassembled in 1573 // memory, and are not truncated. 1574 if (State->getCallOrPrologue() == Call) { 1575 unsigned excess = 4 * (ARM::R4 - reg); 1576 assert(size >= excess && "expected larger existing stack allocation"); 1577 size -= excess; 1578 } 1579 } 1580 // Confiscate any remaining parameter registers to preclude their 1581 // assignment to subsequent parameters. 1582 while (State->AllocateReg(GPRArgRegs, 4)) 1583 ; 1584} 1585 1586/// MatchingStackOffset - Return true if the given stack call argument is 1587/// already available in the same position (relatively) of the caller's 1588/// incoming argument stack. 1589static 1590bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1591 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1592 const ARMInstrInfo *TII) { 1593 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1594 int FI = INT_MAX; 1595 if (Arg.getOpcode() == ISD::CopyFromReg) { 1596 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1597 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1598 return false; 1599 MachineInstr *Def = MRI->getVRegDef(VR); 1600 if (!Def) 1601 return false; 1602 if (!Flags.isByVal()) { 1603 if (!TII->isLoadFromStackSlot(Def, FI)) 1604 return false; 1605 } else { 1606 return false; 1607 } 1608 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1609 if (Flags.isByVal()) 1610 // ByVal argument is passed in as a pointer but it's now being 1611 // dereferenced. e.g. 1612 // define @foo(%struct.X* %A) { 1613 // tail call @bar(%struct.X* byval %A) 1614 // } 1615 return false; 1616 SDValue Ptr = Ld->getBasePtr(); 1617 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1618 if (!FINode) 1619 return false; 1620 FI = FINode->getIndex(); 1621 } else 1622 return false; 1623 1624 assert(FI != INT_MAX); 1625 if (!MFI->isFixedObjectIndex(FI)) 1626 return false; 1627 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1628} 1629 1630/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1631/// for tail call optimization. Targets which want to do tail call 1632/// optimization should implement this function. 1633bool 1634ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1635 CallingConv::ID CalleeCC, 1636 bool isVarArg, 1637 bool isCalleeStructRet, 1638 bool isCallerStructRet, 1639 const SmallVectorImpl<ISD::OutputArg> &Outs, 1640 const SmallVectorImpl<SDValue> &OutVals, 1641 const SmallVectorImpl<ISD::InputArg> &Ins, 1642 SelectionDAG& DAG) const { 1643 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1644 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1645 bool CCMatch = CallerCC == CalleeCC; 1646 1647 // Look for obvious safe cases to perform tail call optimization that do not 1648 // require ABI changes. This is what gcc calls sibcall. 1649 1650 // Do not sibcall optimize vararg calls unless the call site is not passing 1651 // any arguments. 1652 if (isVarArg && !Outs.empty()) 1653 return false; 1654 1655 // Also avoid sibcall optimization if either caller or callee uses struct 1656 // return semantics. 1657 if (isCalleeStructRet || isCallerStructRet) 1658 return false; 1659 1660 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1661 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1662 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1663 // support in the assembler and linker to be used. This would need to be 1664 // fixed to fully support tail calls in Thumb1. 1665 // 1666 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1667 // LR. This means if we need to reload LR, it takes an extra instructions, 1668 // which outweighs the value of the tail call; but here we don't know yet 1669 // whether LR is going to be used. Probably the right approach is to 1670 // generate the tail call here and turn it back into CALL/RET in 1671 // emitEpilogue if LR is used. 1672 1673 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1674 // but we need to make sure there are enough registers; the only valid 1675 // registers are the 4 used for parameters. We don't currently do this 1676 // case. 1677 if (Subtarget->isThumb1Only()) 1678 return false; 1679 1680 // If the calling conventions do not match, then we'd better make sure the 1681 // results are returned in the same way as what the caller expects. 1682 if (!CCMatch) { 1683 SmallVector<CCValAssign, 16> RVLocs1; 1684 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1685 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1686 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1687 1688 SmallVector<CCValAssign, 16> RVLocs2; 1689 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1690 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1691 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1692 1693 if (RVLocs1.size() != RVLocs2.size()) 1694 return false; 1695 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1696 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1697 return false; 1698 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1699 return false; 1700 if (RVLocs1[i].isRegLoc()) { 1701 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1702 return false; 1703 } else { 1704 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1705 return false; 1706 } 1707 } 1708 } 1709 1710 // If the callee takes no arguments then go on to check the results of the 1711 // call. 1712 if (!Outs.empty()) { 1713 // Check if stack adjustment is needed. For now, do not do this if any 1714 // argument is passed on the stack. 1715 SmallVector<CCValAssign, 16> ArgLocs; 1716 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1717 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1718 CCInfo.AnalyzeCallOperands(Outs, 1719 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1720 if (CCInfo.getNextStackOffset()) { 1721 MachineFunction &MF = DAG.getMachineFunction(); 1722 1723 // Check if the arguments are already laid out in the right way as 1724 // the caller's fixed stack objects. 1725 MachineFrameInfo *MFI = MF.getFrameInfo(); 1726 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1727 const ARMInstrInfo *TII = 1728 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1729 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1730 i != e; 1731 ++i, ++realArgIdx) { 1732 CCValAssign &VA = ArgLocs[i]; 1733 EVT RegVT = VA.getLocVT(); 1734 SDValue Arg = OutVals[realArgIdx]; 1735 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1736 if (VA.getLocInfo() == CCValAssign::Indirect) 1737 return false; 1738 if (VA.needsCustom()) { 1739 // f64 and vector types are split into multiple registers or 1740 // register/stack-slot combinations. The types will not match 1741 // the registers; give up on memory f64 refs until we figure 1742 // out what to do about this. 1743 if (!VA.isRegLoc()) 1744 return false; 1745 if (!ArgLocs[++i].isRegLoc()) 1746 return false; 1747 if (RegVT == MVT::v2f64) { 1748 if (!ArgLocs[++i].isRegLoc()) 1749 return false; 1750 if (!ArgLocs[++i].isRegLoc()) 1751 return false; 1752 } 1753 } else if (!VA.isRegLoc()) { 1754 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1755 MFI, MRI, TII)) 1756 return false; 1757 } 1758 } 1759 } 1760 } 1761 1762 return true; 1763} 1764 1765SDValue 1766ARMTargetLowering::LowerReturn(SDValue Chain, 1767 CallingConv::ID CallConv, bool isVarArg, 1768 const SmallVectorImpl<ISD::OutputArg> &Outs, 1769 const SmallVectorImpl<SDValue> &OutVals, 1770 DebugLoc dl, SelectionDAG &DAG) const { 1771 1772 // CCValAssign - represent the assignment of the return value to a location. 1773 SmallVector<CCValAssign, 16> RVLocs; 1774 1775 // CCState - Info about the registers and stack slots. 1776 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1777 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1778 1779 // Analyze outgoing return values. 1780 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1781 isVarArg)); 1782 1783 // If this is the first return lowered for this function, add 1784 // the regs to the liveout set for the function. 1785 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1786 for (unsigned i = 0; i != RVLocs.size(); ++i) 1787 if (RVLocs[i].isRegLoc()) 1788 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1789 } 1790 1791 SDValue Flag; 1792 1793 // Copy the result values into the output registers. 1794 for (unsigned i = 0, realRVLocIdx = 0; 1795 i != RVLocs.size(); 1796 ++i, ++realRVLocIdx) { 1797 CCValAssign &VA = RVLocs[i]; 1798 assert(VA.isRegLoc() && "Can only return in registers!"); 1799 1800 SDValue Arg = OutVals[realRVLocIdx]; 1801 1802 switch (VA.getLocInfo()) { 1803 default: llvm_unreachable("Unknown loc info!"); 1804 case CCValAssign::Full: break; 1805 case CCValAssign::BCvt: 1806 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1807 break; 1808 } 1809 1810 if (VA.needsCustom()) { 1811 if (VA.getLocVT() == MVT::v2f64) { 1812 // Extract the first half and return it in two registers. 1813 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1814 DAG.getConstant(0, MVT::i32)); 1815 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1816 DAG.getVTList(MVT::i32, MVT::i32), Half); 1817 1818 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1819 Flag = Chain.getValue(1); 1820 VA = RVLocs[++i]; // skip ahead to next loc 1821 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1822 HalfGPRs.getValue(1), Flag); 1823 Flag = Chain.getValue(1); 1824 VA = RVLocs[++i]; // skip ahead to next loc 1825 1826 // Extract the 2nd half and fall through to handle it as an f64 value. 1827 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1828 DAG.getConstant(1, MVT::i32)); 1829 } 1830 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1831 // available. 1832 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1833 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1834 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1835 Flag = Chain.getValue(1); 1836 VA = RVLocs[++i]; // skip ahead to next loc 1837 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1838 Flag); 1839 } else 1840 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1841 1842 // Guarantee that all emitted copies are 1843 // stuck together, avoiding something bad. 1844 Flag = Chain.getValue(1); 1845 } 1846 1847 SDValue result; 1848 if (Flag.getNode()) 1849 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1850 else // Return Void 1851 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1852 1853 return result; 1854} 1855 1856bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1857 if (N->getNumValues() != 1) 1858 return false; 1859 if (!N->hasNUsesOfValue(1, 0)) 1860 return false; 1861 1862 unsigned NumCopies = 0; 1863 SDNode* Copies[2]; 1864 SDNode *Use = *N->use_begin(); 1865 if (Use->getOpcode() == ISD::CopyToReg) { 1866 Copies[NumCopies++] = Use; 1867 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1868 // f64 returned in a pair of GPRs. 1869 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1870 UI != UE; ++UI) { 1871 if (UI->getOpcode() != ISD::CopyToReg) 1872 return false; 1873 Copies[UI.getUse().getResNo()] = *UI; 1874 ++NumCopies; 1875 } 1876 } else if (Use->getOpcode() == ISD::BITCAST) { 1877 // f32 returned in a single GPR. 1878 if (!Use->hasNUsesOfValue(1, 0)) 1879 return false; 1880 Use = *Use->use_begin(); 1881 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1882 return false; 1883 Copies[NumCopies++] = Use; 1884 } else { 1885 return false; 1886 } 1887 1888 if (NumCopies != 1 && NumCopies != 2) 1889 return false; 1890 1891 bool HasRet = false; 1892 for (unsigned i = 0; i < NumCopies; ++i) { 1893 SDNode *Copy = Copies[i]; 1894 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1895 UI != UE; ++UI) { 1896 if (UI->getOpcode() == ISD::CopyToReg) { 1897 SDNode *Use = *UI; 1898 if (Use == Copies[0] || Use == Copies[1]) 1899 continue; 1900 return false; 1901 } 1902 if (UI->getOpcode() != ARMISD::RET_FLAG) 1903 return false; 1904 HasRet = true; 1905 } 1906 } 1907 1908 return HasRet; 1909} 1910 1911bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1912 if (!EnableARMTailCalls) 1913 return false; 1914 1915 if (!CI->isTailCall()) 1916 return false; 1917 1918 return !Subtarget->isThumb1Only(); 1919} 1920 1921// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1922// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1923// one of the above mentioned nodes. It has to be wrapped because otherwise 1924// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1925// be used to form addressing mode. These wrapped nodes will be selected 1926// into MOVi. 1927static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1928 EVT PtrVT = Op.getValueType(); 1929 // FIXME there is no actual debug info here 1930 DebugLoc dl = Op.getDebugLoc(); 1931 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1932 SDValue Res; 1933 if (CP->isMachineConstantPoolEntry()) 1934 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1935 CP->getAlignment()); 1936 else 1937 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1938 CP->getAlignment()); 1939 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1940} 1941 1942unsigned ARMTargetLowering::getJumpTableEncoding() const { 1943 return MachineJumpTableInfo::EK_Inline; 1944} 1945 1946SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1947 SelectionDAG &DAG) const { 1948 MachineFunction &MF = DAG.getMachineFunction(); 1949 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1950 unsigned ARMPCLabelIndex = 0; 1951 DebugLoc DL = Op.getDebugLoc(); 1952 EVT PtrVT = getPointerTy(); 1953 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1954 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1955 SDValue CPAddr; 1956 if (RelocM == Reloc::Static) { 1957 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1958 } else { 1959 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1960 ARMPCLabelIndex = AFI->createPICLabelUId(); 1961 ARMConstantPoolValue *CPV = 1962 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 1963 ARMCP::CPBlockAddress, PCAdj); 1964 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1965 } 1966 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1967 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1968 MachinePointerInfo::getConstantPool(), 1969 false, false, 0); 1970 if (RelocM == Reloc::Static) 1971 return Result; 1972 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1973 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1974} 1975 1976// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1977SDValue 1978ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1979 SelectionDAG &DAG) const { 1980 DebugLoc dl = GA->getDebugLoc(); 1981 EVT PtrVT = getPointerTy(); 1982 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1983 MachineFunction &MF = DAG.getMachineFunction(); 1984 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1985 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1986 ARMConstantPoolValue *CPV = 1987 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 1988 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1989 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1990 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1991 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1992 MachinePointerInfo::getConstantPool(), 1993 false, false, 0); 1994 SDValue Chain = Argument.getValue(1); 1995 1996 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1997 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1998 1999 // call __tls_get_addr. 2000 ArgListTy Args; 2001 ArgListEntry Entry; 2002 Entry.Node = Argument; 2003 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2004 Args.push_back(Entry); 2005 // FIXME: is there useful debug info available here? 2006 std::pair<SDValue, SDValue> CallResult = 2007 LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()), 2008 false, false, false, false, 2009 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 2010 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2011 return CallResult.first; 2012} 2013 2014// Lower ISD::GlobalTLSAddress using the "initial exec" or 2015// "local exec" model. 2016SDValue 2017ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2018 SelectionDAG &DAG) const { 2019 const GlobalValue *GV = GA->getGlobal(); 2020 DebugLoc dl = GA->getDebugLoc(); 2021 SDValue Offset; 2022 SDValue Chain = DAG.getEntryNode(); 2023 EVT PtrVT = getPointerTy(); 2024 // Get the Thread Pointer 2025 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2026 2027 if (GV->isDeclaration()) { 2028 MachineFunction &MF = DAG.getMachineFunction(); 2029 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2030 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2031 // Initial exec model. 2032 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2033 ARMConstantPoolValue *CPV = 2034 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2035 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2036 true); 2037 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2038 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2039 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2040 MachinePointerInfo::getConstantPool(), 2041 false, false, 0); 2042 Chain = Offset.getValue(1); 2043 2044 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2045 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2046 2047 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2048 MachinePointerInfo::getConstantPool(), 2049 false, false, 0); 2050 } else { 2051 // local exec model 2052 ARMConstantPoolValue *CPV = 2053 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2054 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2055 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2056 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2057 MachinePointerInfo::getConstantPool(), 2058 false, false, 0); 2059 } 2060 2061 // The address of the thread local variable is the add of the thread 2062 // pointer with the offset of the variable. 2063 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2064} 2065 2066SDValue 2067ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2068 // TODO: implement the "local dynamic" model 2069 assert(Subtarget->isTargetELF() && 2070 "TLS not implemented for non-ELF targets"); 2071 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2072 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 2073 // otherwise use the "Local Exec" TLS Model 2074 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 2075 return LowerToTLSGeneralDynamicModel(GA, DAG); 2076 else 2077 return LowerToTLSExecModels(GA, DAG); 2078} 2079 2080SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2081 SelectionDAG &DAG) const { 2082 EVT PtrVT = getPointerTy(); 2083 DebugLoc dl = Op.getDebugLoc(); 2084 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2085 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2086 if (RelocM == Reloc::PIC_) { 2087 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2088 ARMConstantPoolValue *CPV = 2089 ARMConstantPoolConstant::Create(GV, 2090 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2091 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2092 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2093 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2094 CPAddr, 2095 MachinePointerInfo::getConstantPool(), 2096 false, false, 0); 2097 SDValue Chain = Result.getValue(1); 2098 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2099 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2100 if (!UseGOTOFF) 2101 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2102 MachinePointerInfo::getGOT(), false, false, 0); 2103 return Result; 2104 } 2105 2106 // If we have T2 ops, we can materialize the address directly via movt/movw 2107 // pair. This is always cheaper in terms of performance, but uses at least 2 2108 // extra bytes. 2109 MachineFunction &MF = DAG.getMachineFunction(); 2110 if (Subtarget->useMovt() && 2111 !MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) { 2112 ++NumMovwMovt; 2113 // FIXME: Once remat is capable of dealing with instructions with register 2114 // operands, expand this into two nodes. 2115 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2116 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2117 } else { 2118 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2119 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2120 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2121 MachinePointerInfo::getConstantPool(), 2122 false, false, 0); 2123 } 2124} 2125 2126SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2127 SelectionDAG &DAG) const { 2128 EVT PtrVT = getPointerTy(); 2129 DebugLoc dl = Op.getDebugLoc(); 2130 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2131 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2132 MachineFunction &MF = DAG.getMachineFunction(); 2133 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2134 2135 // FIXME: Enable this for static codegen when tool issues are fixed. 2136 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2137 ++NumMovwMovt; 2138 // FIXME: Once remat is capable of dealing with instructions with register 2139 // operands, expand this into two nodes. 2140 if (RelocM == Reloc::Static) 2141 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2142 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2143 2144 unsigned Wrapper = (RelocM == Reloc::PIC_) 2145 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2146 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2147 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2148 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2149 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2150 MachinePointerInfo::getGOT(), false, false, 0); 2151 return Result; 2152 } 2153 2154 unsigned ARMPCLabelIndex = 0; 2155 SDValue CPAddr; 2156 if (RelocM == Reloc::Static) { 2157 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2158 } else { 2159 ARMPCLabelIndex = AFI->createPICLabelUId(); 2160 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2161 ARMConstantPoolValue *CPV = 2162 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2163 PCAdj); 2164 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2165 } 2166 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2167 2168 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2169 MachinePointerInfo::getConstantPool(), 2170 false, false, 0); 2171 SDValue Chain = Result.getValue(1); 2172 2173 if (RelocM == Reloc::PIC_) { 2174 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2175 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2176 } 2177 2178 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2179 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2180 false, false, 0); 2181 2182 return Result; 2183} 2184 2185SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2186 SelectionDAG &DAG) const { 2187 assert(Subtarget->isTargetELF() && 2188 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2189 MachineFunction &MF = DAG.getMachineFunction(); 2190 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2191 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2192 EVT PtrVT = getPointerTy(); 2193 DebugLoc dl = Op.getDebugLoc(); 2194 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2195 ARMConstantPoolValue *CPV = 2196 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2197 ARMPCLabelIndex, PCAdj); 2198 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2199 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2200 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2201 MachinePointerInfo::getConstantPool(), 2202 false, false, 0); 2203 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2204 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2205} 2206 2207SDValue 2208ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2209 const { 2210 DebugLoc dl = Op.getDebugLoc(); 2211 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2212 Op.getOperand(0), Op.getOperand(1)); 2213} 2214 2215SDValue 2216ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2217 DebugLoc dl = Op.getDebugLoc(); 2218 SDValue Val = DAG.getConstant(0, MVT::i32); 2219 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2220 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2221 Op.getOperand(1), Val); 2222} 2223 2224SDValue 2225ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2226 DebugLoc dl = Op.getDebugLoc(); 2227 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2228 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2229} 2230 2231SDValue 2232ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2233 const ARMSubtarget *Subtarget) const { 2234 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2235 DebugLoc dl = Op.getDebugLoc(); 2236 switch (IntNo) { 2237 default: return SDValue(); // Don't custom lower most intrinsics. 2238 case Intrinsic::arm_thread_pointer: { 2239 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2240 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2241 } 2242 case Intrinsic::eh_sjlj_lsda: { 2243 MachineFunction &MF = DAG.getMachineFunction(); 2244 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2245 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2246 EVT PtrVT = getPointerTy(); 2247 DebugLoc dl = Op.getDebugLoc(); 2248 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2249 SDValue CPAddr; 2250 unsigned PCAdj = (RelocM != Reloc::PIC_) 2251 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2252 ARMConstantPoolValue *CPV = 2253 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2254 ARMCP::CPLSDA, PCAdj); 2255 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2256 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2257 SDValue Result = 2258 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2259 MachinePointerInfo::getConstantPool(), 2260 false, false, 0); 2261 2262 if (RelocM == Reloc::PIC_) { 2263 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2264 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2265 } 2266 return Result; 2267 } 2268 case Intrinsic::arm_neon_vmulls: 2269 case Intrinsic::arm_neon_vmullu: { 2270 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2271 ? ARMISD::VMULLs : ARMISD::VMULLu; 2272 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2273 Op.getOperand(1), Op.getOperand(2)); 2274 } 2275 } 2276} 2277 2278static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2279 const ARMSubtarget *Subtarget) { 2280 DebugLoc dl = Op.getDebugLoc(); 2281 if (!Subtarget->hasDataBarrier()) { 2282 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2283 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2284 // here. 2285 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2286 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2287 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2288 DAG.getConstant(0, MVT::i32)); 2289 } 2290 2291 SDValue Op5 = Op.getOperand(5); 2292 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2293 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2294 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2295 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2296 2297 ARM_MB::MemBOpt DMBOpt; 2298 if (isDeviceBarrier) 2299 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2300 else 2301 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2302 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2303 DAG.getConstant(DMBOpt, MVT::i32)); 2304} 2305 2306 2307static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2308 const ARMSubtarget *Subtarget) { 2309 // FIXME: handle "fence singlethread" more efficiently. 2310 DebugLoc dl = Op.getDebugLoc(); 2311 if (!Subtarget->hasDataBarrier()) { 2312 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2313 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2314 // here. 2315 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2316 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2317 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2318 DAG.getConstant(0, MVT::i32)); 2319 } 2320 2321 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2322 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2323} 2324 2325static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2326 const ARMSubtarget *Subtarget) { 2327 // ARM pre v5TE and Thumb1 does not have preload instructions. 2328 if (!(Subtarget->isThumb2() || 2329 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2330 // Just preserve the chain. 2331 return Op.getOperand(0); 2332 2333 DebugLoc dl = Op.getDebugLoc(); 2334 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2335 if (!isRead && 2336 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2337 // ARMv7 with MP extension has PLDW. 2338 return Op.getOperand(0); 2339 2340 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2341 if (Subtarget->isThumb()) { 2342 // Invert the bits. 2343 isRead = ~isRead & 1; 2344 isData = ~isData & 1; 2345 } 2346 2347 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2348 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2349 DAG.getConstant(isData, MVT::i32)); 2350} 2351 2352static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2353 MachineFunction &MF = DAG.getMachineFunction(); 2354 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2355 2356 // vastart just stores the address of the VarArgsFrameIndex slot into the 2357 // memory location argument. 2358 DebugLoc dl = Op.getDebugLoc(); 2359 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2360 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2361 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2362 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2363 MachinePointerInfo(SV), false, false, 0); 2364} 2365 2366SDValue 2367ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2368 SDValue &Root, SelectionDAG &DAG, 2369 DebugLoc dl) const { 2370 MachineFunction &MF = DAG.getMachineFunction(); 2371 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2372 2373 TargetRegisterClass *RC; 2374 if (AFI->isThumb1OnlyFunction()) 2375 RC = ARM::tGPRRegisterClass; 2376 else 2377 RC = ARM::GPRRegisterClass; 2378 2379 // Transform the arguments stored in physical registers into virtual ones. 2380 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2381 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2382 2383 SDValue ArgValue2; 2384 if (NextVA.isMemLoc()) { 2385 MachineFrameInfo *MFI = MF.getFrameInfo(); 2386 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2387 2388 // Create load node to retrieve arguments from the stack. 2389 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2390 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2391 MachinePointerInfo::getFixedStack(FI), 2392 false, false, 0); 2393 } else { 2394 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2395 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2396 } 2397 2398 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2399} 2400 2401void 2402ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2403 unsigned &VARegSize, unsigned &VARegSaveSize) 2404 const { 2405 unsigned NumGPRs; 2406 if (CCInfo.isFirstByValRegValid()) 2407 NumGPRs = ARM::R4 - CCInfo.getFirstByValReg(); 2408 else { 2409 unsigned int firstUnalloced; 2410 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2411 sizeof(GPRArgRegs) / 2412 sizeof(GPRArgRegs[0])); 2413 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2414 } 2415 2416 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2417 VARegSize = NumGPRs * 4; 2418 VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2419} 2420 2421// The remaining GPRs hold either the beginning of variable-argument 2422// data, or the beginning of an aggregate passed by value (usuall 2423// byval). Either way, we allocate stack slots adjacent to the data 2424// provided by our caller, and store the unallocated registers there. 2425// If this is a variadic function, the va_list pointer will begin with 2426// these values; otherwise, this reassembles a (byval) structure that 2427// was split between registers and memory. 2428void 2429ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2430 DebugLoc dl, SDValue &Chain, 2431 unsigned ArgOffset) const { 2432 MachineFunction &MF = DAG.getMachineFunction(); 2433 MachineFrameInfo *MFI = MF.getFrameInfo(); 2434 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2435 unsigned firstRegToSaveIndex; 2436 if (CCInfo.isFirstByValRegValid()) 2437 firstRegToSaveIndex = CCInfo.getFirstByValReg() - ARM::R0; 2438 else { 2439 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2440 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2441 } 2442 2443 unsigned VARegSize, VARegSaveSize; 2444 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2445 if (VARegSaveSize) { 2446 // If this function is vararg, store any remaining integer argument regs 2447 // to their spots on the stack so that they may be loaded by deferencing 2448 // the result of va_next. 2449 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2450 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(VARegSaveSize, 2451 ArgOffset + VARegSaveSize 2452 - VARegSize, 2453 false)); 2454 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2455 getPointerTy()); 2456 2457 SmallVector<SDValue, 4> MemOps; 2458 for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { 2459 TargetRegisterClass *RC; 2460 if (AFI->isThumb1OnlyFunction()) 2461 RC = ARM::tGPRRegisterClass; 2462 else 2463 RC = ARM::GPRRegisterClass; 2464 2465 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2466 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2467 SDValue Store = 2468 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2469 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2470 false, false, 0); 2471 MemOps.push_back(Store); 2472 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2473 DAG.getConstant(4, getPointerTy())); 2474 } 2475 if (!MemOps.empty()) 2476 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2477 &MemOps[0], MemOps.size()); 2478 } else 2479 // This will point to the next argument passed via stack. 2480 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2481} 2482 2483SDValue 2484ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2485 CallingConv::ID CallConv, bool isVarArg, 2486 const SmallVectorImpl<ISD::InputArg> 2487 &Ins, 2488 DebugLoc dl, SelectionDAG &DAG, 2489 SmallVectorImpl<SDValue> &InVals) 2490 const { 2491 MachineFunction &MF = DAG.getMachineFunction(); 2492 MachineFrameInfo *MFI = MF.getFrameInfo(); 2493 2494 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2495 2496 // Assign locations to all of the incoming arguments. 2497 SmallVector<CCValAssign, 16> ArgLocs; 2498 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2499 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2500 CCInfo.AnalyzeFormalArguments(Ins, 2501 CCAssignFnForNode(CallConv, /* Return*/ false, 2502 isVarArg)); 2503 2504 SmallVector<SDValue, 16> ArgValues; 2505 int lastInsIndex = -1; 2506 2507 SDValue ArgValue; 2508 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2509 CCValAssign &VA = ArgLocs[i]; 2510 2511 // Arguments stored in registers. 2512 if (VA.isRegLoc()) { 2513 EVT RegVT = VA.getLocVT(); 2514 2515 if (VA.needsCustom()) { 2516 // f64 and vector types are split up into multiple registers or 2517 // combinations of registers and stack slots. 2518 if (VA.getLocVT() == MVT::v2f64) { 2519 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2520 Chain, DAG, dl); 2521 VA = ArgLocs[++i]; // skip ahead to next loc 2522 SDValue ArgValue2; 2523 if (VA.isMemLoc()) { 2524 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2525 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2526 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2527 MachinePointerInfo::getFixedStack(FI), 2528 false, false, 0); 2529 } else { 2530 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2531 Chain, DAG, dl); 2532 } 2533 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2534 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2535 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2536 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2537 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2538 } else 2539 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2540 2541 } else { 2542 TargetRegisterClass *RC; 2543 2544 if (RegVT == MVT::f32) 2545 RC = ARM::SPRRegisterClass; 2546 else if (RegVT == MVT::f64) 2547 RC = ARM::DPRRegisterClass; 2548 else if (RegVT == MVT::v2f64) 2549 RC = ARM::QPRRegisterClass; 2550 else if (RegVT == MVT::i32) 2551 RC = (AFI->isThumb1OnlyFunction() ? 2552 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2553 else 2554 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2555 2556 // Transform the arguments in physical registers into virtual ones. 2557 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2558 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2559 } 2560 2561 // If this is an 8 or 16-bit value, it is really passed promoted 2562 // to 32 bits. Insert an assert[sz]ext to capture this, then 2563 // truncate to the right size. 2564 switch (VA.getLocInfo()) { 2565 default: llvm_unreachable("Unknown loc info!"); 2566 case CCValAssign::Full: break; 2567 case CCValAssign::BCvt: 2568 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2569 break; 2570 case CCValAssign::SExt: 2571 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2572 DAG.getValueType(VA.getValVT())); 2573 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2574 break; 2575 case CCValAssign::ZExt: 2576 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2577 DAG.getValueType(VA.getValVT())); 2578 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2579 break; 2580 } 2581 2582 InVals.push_back(ArgValue); 2583 2584 } else { // VA.isRegLoc() 2585 2586 // sanity check 2587 assert(VA.isMemLoc()); 2588 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2589 2590 int index = ArgLocs[i].getValNo(); 2591 2592 // Some Ins[] entries become multiple ArgLoc[] entries. 2593 // Process them only once. 2594 if (index != lastInsIndex) 2595 { 2596 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2597 // FIXME: For now, all byval parameter objects are marked mutable. 2598 // This can be changed with more analysis. 2599 // In case of tail call optimization mark all arguments mutable. 2600 // Since they could be overwritten by lowering of arguments in case of 2601 // a tail call. 2602 if (Flags.isByVal()) { 2603 unsigned VARegSize, VARegSaveSize; 2604 computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); 2605 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); 2606 unsigned Bytes = Flags.getByValSize() - VARegSize; 2607 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2608 int FI = MFI->CreateFixedObject(Bytes, 2609 VA.getLocMemOffset(), false); 2610 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2611 } else { 2612 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2613 VA.getLocMemOffset(), true); 2614 2615 // Create load nodes to retrieve arguments from the stack. 2616 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2617 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2618 MachinePointerInfo::getFixedStack(FI), 2619 false, false, 0)); 2620 } 2621 lastInsIndex = index; 2622 } 2623 } 2624 } 2625 2626 // varargs 2627 if (isVarArg) 2628 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); 2629 2630 return Chain; 2631} 2632 2633/// isFloatingPointZero - Return true if this is +0.0. 2634static bool isFloatingPointZero(SDValue Op) { 2635 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2636 return CFP->getValueAPF().isPosZero(); 2637 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2638 // Maybe this has already been legalized into the constant pool? 2639 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2640 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2641 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2642 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2643 return CFP->getValueAPF().isPosZero(); 2644 } 2645 } 2646 return false; 2647} 2648 2649/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2650/// the given operands. 2651SDValue 2652ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2653 SDValue &ARMcc, SelectionDAG &DAG, 2654 DebugLoc dl) const { 2655 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2656 unsigned C = RHSC->getZExtValue(); 2657 if (!isLegalICmpImmediate(C)) { 2658 // Constant does not fit, try adjusting it by one? 2659 switch (CC) { 2660 default: break; 2661 case ISD::SETLT: 2662 case ISD::SETGE: 2663 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2664 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2665 RHS = DAG.getConstant(C-1, MVT::i32); 2666 } 2667 break; 2668 case ISD::SETULT: 2669 case ISD::SETUGE: 2670 if (C != 0 && isLegalICmpImmediate(C-1)) { 2671 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2672 RHS = DAG.getConstant(C-1, MVT::i32); 2673 } 2674 break; 2675 case ISD::SETLE: 2676 case ISD::SETGT: 2677 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2678 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2679 RHS = DAG.getConstant(C+1, MVT::i32); 2680 } 2681 break; 2682 case ISD::SETULE: 2683 case ISD::SETUGT: 2684 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2685 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2686 RHS = DAG.getConstant(C+1, MVT::i32); 2687 } 2688 break; 2689 } 2690 } 2691 } 2692 2693 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2694 ARMISD::NodeType CompareType; 2695 switch (CondCode) { 2696 default: 2697 CompareType = ARMISD::CMP; 2698 break; 2699 case ARMCC::EQ: 2700 case ARMCC::NE: 2701 // Uses only Z Flag 2702 CompareType = ARMISD::CMPZ; 2703 break; 2704 } 2705 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2706 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2707} 2708 2709/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2710SDValue 2711ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2712 DebugLoc dl) const { 2713 SDValue Cmp; 2714 if (!isFloatingPointZero(RHS)) 2715 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2716 else 2717 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2718 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2719} 2720 2721/// duplicateCmp - Glue values can have only one use, so this function 2722/// duplicates a comparison node. 2723SDValue 2724ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2725 unsigned Opc = Cmp.getOpcode(); 2726 DebugLoc DL = Cmp.getDebugLoc(); 2727 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2728 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2729 2730 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2731 Cmp = Cmp.getOperand(0); 2732 Opc = Cmp.getOpcode(); 2733 if (Opc == ARMISD::CMPFP) 2734 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2735 else { 2736 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2737 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2738 } 2739 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2740} 2741 2742SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2743 SDValue Cond = Op.getOperand(0); 2744 SDValue SelectTrue = Op.getOperand(1); 2745 SDValue SelectFalse = Op.getOperand(2); 2746 DebugLoc dl = Op.getDebugLoc(); 2747 2748 // Convert: 2749 // 2750 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2751 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2752 // 2753 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2754 const ConstantSDNode *CMOVTrue = 2755 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2756 const ConstantSDNode *CMOVFalse = 2757 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2758 2759 if (CMOVTrue && CMOVFalse) { 2760 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2761 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2762 2763 SDValue True; 2764 SDValue False; 2765 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2766 True = SelectTrue; 2767 False = SelectFalse; 2768 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2769 True = SelectFalse; 2770 False = SelectTrue; 2771 } 2772 2773 if (True.getNode() && False.getNode()) { 2774 EVT VT = Op.getValueType(); 2775 SDValue ARMcc = Cond.getOperand(2); 2776 SDValue CCR = Cond.getOperand(3); 2777 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2778 assert(True.getValueType() == VT); 2779 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2780 } 2781 } 2782 } 2783 2784 return DAG.getSelectCC(dl, Cond, 2785 DAG.getConstant(0, Cond.getValueType()), 2786 SelectTrue, SelectFalse, ISD::SETNE); 2787} 2788 2789SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2790 EVT VT = Op.getValueType(); 2791 SDValue LHS = Op.getOperand(0); 2792 SDValue RHS = Op.getOperand(1); 2793 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2794 SDValue TrueVal = Op.getOperand(2); 2795 SDValue FalseVal = Op.getOperand(3); 2796 DebugLoc dl = Op.getDebugLoc(); 2797 2798 if (LHS.getValueType() == MVT::i32) { 2799 SDValue ARMcc; 2800 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2801 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2802 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2803 } 2804 2805 ARMCC::CondCodes CondCode, CondCode2; 2806 FPCCToARMCC(CC, CondCode, CondCode2); 2807 2808 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2809 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2810 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2811 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2812 ARMcc, CCR, Cmp); 2813 if (CondCode2 != ARMCC::AL) { 2814 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2815 // FIXME: Needs another CMP because flag can have but one use. 2816 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2817 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2818 Result, TrueVal, ARMcc2, CCR, Cmp2); 2819 } 2820 return Result; 2821} 2822 2823/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2824/// to morph to an integer compare sequence. 2825static bool canChangeToInt(SDValue Op, bool &SeenZero, 2826 const ARMSubtarget *Subtarget) { 2827 SDNode *N = Op.getNode(); 2828 if (!N->hasOneUse()) 2829 // Otherwise it requires moving the value from fp to integer registers. 2830 return false; 2831 if (!N->getNumValues()) 2832 return false; 2833 EVT VT = Op.getValueType(); 2834 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2835 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2836 // vmrs are very slow, e.g. cortex-a8. 2837 return false; 2838 2839 if (isFloatingPointZero(Op)) { 2840 SeenZero = true; 2841 return true; 2842 } 2843 return ISD::isNormalLoad(N); 2844} 2845 2846static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2847 if (isFloatingPointZero(Op)) 2848 return DAG.getConstant(0, MVT::i32); 2849 2850 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2851 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2852 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2853 Ld->isVolatile(), Ld->isNonTemporal(), 2854 Ld->getAlignment()); 2855 2856 llvm_unreachable("Unknown VFP cmp argument!"); 2857} 2858 2859static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2860 SDValue &RetVal1, SDValue &RetVal2) { 2861 if (isFloatingPointZero(Op)) { 2862 RetVal1 = DAG.getConstant(0, MVT::i32); 2863 RetVal2 = DAG.getConstant(0, MVT::i32); 2864 return; 2865 } 2866 2867 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2868 SDValue Ptr = Ld->getBasePtr(); 2869 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2870 Ld->getChain(), Ptr, 2871 Ld->getPointerInfo(), 2872 Ld->isVolatile(), Ld->isNonTemporal(), 2873 Ld->getAlignment()); 2874 2875 EVT PtrType = Ptr.getValueType(); 2876 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2877 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2878 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2879 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2880 Ld->getChain(), NewPtr, 2881 Ld->getPointerInfo().getWithOffset(4), 2882 Ld->isVolatile(), Ld->isNonTemporal(), 2883 NewAlign); 2884 return; 2885 } 2886 2887 llvm_unreachable("Unknown VFP cmp argument!"); 2888} 2889 2890/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2891/// f32 and even f64 comparisons to integer ones. 2892SDValue 2893ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2894 SDValue Chain = Op.getOperand(0); 2895 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2896 SDValue LHS = Op.getOperand(2); 2897 SDValue RHS = Op.getOperand(3); 2898 SDValue Dest = Op.getOperand(4); 2899 DebugLoc dl = Op.getDebugLoc(); 2900 2901 bool SeenZero = false; 2902 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2903 canChangeToInt(RHS, SeenZero, Subtarget) && 2904 // If one of the operand is zero, it's safe to ignore the NaN case since 2905 // we only care about equality comparisons. 2906 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2907 // If unsafe fp math optimization is enabled and there are no other uses of 2908 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2909 // to an integer comparison. 2910 if (CC == ISD::SETOEQ) 2911 CC = ISD::SETEQ; 2912 else if (CC == ISD::SETUNE) 2913 CC = ISD::SETNE; 2914 2915 SDValue ARMcc; 2916 if (LHS.getValueType() == MVT::f32) { 2917 LHS = bitcastf32Toi32(LHS, DAG); 2918 RHS = bitcastf32Toi32(RHS, DAG); 2919 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2920 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2921 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2922 Chain, Dest, ARMcc, CCR, Cmp); 2923 } 2924 2925 SDValue LHS1, LHS2; 2926 SDValue RHS1, RHS2; 2927 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2928 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2929 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2930 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2931 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2932 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2933 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2934 } 2935 2936 return SDValue(); 2937} 2938 2939SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2940 SDValue Chain = Op.getOperand(0); 2941 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2942 SDValue LHS = Op.getOperand(2); 2943 SDValue RHS = Op.getOperand(3); 2944 SDValue Dest = Op.getOperand(4); 2945 DebugLoc dl = Op.getDebugLoc(); 2946 2947 if (LHS.getValueType() == MVT::i32) { 2948 SDValue ARMcc; 2949 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2950 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2951 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2952 Chain, Dest, ARMcc, CCR, Cmp); 2953 } 2954 2955 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2956 2957 if (UnsafeFPMath && 2958 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2959 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2960 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2961 if (Result.getNode()) 2962 return Result; 2963 } 2964 2965 ARMCC::CondCodes CondCode, CondCode2; 2966 FPCCToARMCC(CC, CondCode, CondCode2); 2967 2968 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2969 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2970 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2971 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2972 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2973 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2974 if (CondCode2 != ARMCC::AL) { 2975 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2976 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2977 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2978 } 2979 return Res; 2980} 2981 2982SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2983 SDValue Chain = Op.getOperand(0); 2984 SDValue Table = Op.getOperand(1); 2985 SDValue Index = Op.getOperand(2); 2986 DebugLoc dl = Op.getDebugLoc(); 2987 2988 EVT PTy = getPointerTy(); 2989 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2990 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2991 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2992 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2993 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2994 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2995 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2996 if (Subtarget->isThumb2()) { 2997 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2998 // which does another jump to the destination. This also makes it easier 2999 // to translate it to TBB / TBH later. 3000 // FIXME: This might not work if the function is extremely large. 3001 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 3002 Addr, Op.getOperand(2), JTI, UId); 3003 } 3004 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3005 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3006 MachinePointerInfo::getJumpTable(), 3007 false, false, 0); 3008 Chain = Addr.getValue(1); 3009 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3010 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3011 } else { 3012 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3013 MachinePointerInfo::getJumpTable(), false, false, 0); 3014 Chain = Addr.getValue(1); 3015 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3016 } 3017} 3018 3019static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3020 DebugLoc dl = Op.getDebugLoc(); 3021 unsigned Opc; 3022 3023 switch (Op.getOpcode()) { 3024 default: 3025 assert(0 && "Invalid opcode!"); 3026 case ISD::FP_TO_SINT: 3027 Opc = ARMISD::FTOSI; 3028 break; 3029 case ISD::FP_TO_UINT: 3030 Opc = ARMISD::FTOUI; 3031 break; 3032 } 3033 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3034 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3035} 3036 3037static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3038 EVT VT = Op.getValueType(); 3039 DebugLoc dl = Op.getDebugLoc(); 3040 3041 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3042 "Invalid type for custom lowering!"); 3043 if (VT != MVT::v4f32) 3044 return DAG.UnrollVectorOp(Op.getNode()); 3045 3046 unsigned CastOpc; 3047 unsigned Opc; 3048 switch (Op.getOpcode()) { 3049 default: 3050 assert(0 && "Invalid opcode!"); 3051 case ISD::SINT_TO_FP: 3052 CastOpc = ISD::SIGN_EXTEND; 3053 Opc = ISD::SINT_TO_FP; 3054 break; 3055 case ISD::UINT_TO_FP: 3056 CastOpc = ISD::ZERO_EXTEND; 3057 Opc = ISD::UINT_TO_FP; 3058 break; 3059 } 3060 3061 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3062 return DAG.getNode(Opc, dl, VT, Op); 3063} 3064 3065static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3066 EVT VT = Op.getValueType(); 3067 if (VT.isVector()) 3068 return LowerVectorINT_TO_FP(Op, DAG); 3069 3070 DebugLoc dl = Op.getDebugLoc(); 3071 unsigned Opc; 3072 3073 switch (Op.getOpcode()) { 3074 default: 3075 assert(0 && "Invalid opcode!"); 3076 case ISD::SINT_TO_FP: 3077 Opc = ARMISD::SITOF; 3078 break; 3079 case ISD::UINT_TO_FP: 3080 Opc = ARMISD::UITOF; 3081 break; 3082 } 3083 3084 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3085 return DAG.getNode(Opc, dl, VT, Op); 3086} 3087 3088SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3089 // Implement fcopysign with a fabs and a conditional fneg. 3090 SDValue Tmp0 = Op.getOperand(0); 3091 SDValue Tmp1 = Op.getOperand(1); 3092 DebugLoc dl = Op.getDebugLoc(); 3093 EVT VT = Op.getValueType(); 3094 EVT SrcVT = Tmp1.getValueType(); 3095 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3096 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3097 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3098 3099 if (UseNEON) { 3100 // Use VBSL to copy the sign bit. 3101 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3102 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3103 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3104 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3105 if (VT == MVT::f64) 3106 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3107 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3108 DAG.getConstant(32, MVT::i32)); 3109 else /*if (VT == MVT::f32)*/ 3110 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3111 if (SrcVT == MVT::f32) { 3112 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3113 if (VT == MVT::f64) 3114 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3115 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3116 DAG.getConstant(32, MVT::i32)); 3117 } else if (VT == MVT::f32) 3118 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3119 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3120 DAG.getConstant(32, MVT::i32)); 3121 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3122 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3123 3124 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3125 MVT::i32); 3126 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3127 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3128 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3129 3130 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3131 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3132 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3133 if (VT == MVT::f32) { 3134 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3135 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3136 DAG.getConstant(0, MVT::i32)); 3137 } else { 3138 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3139 } 3140 3141 return Res; 3142 } 3143 3144 // Bitcast operand 1 to i32. 3145 if (SrcVT == MVT::f64) 3146 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3147 &Tmp1, 1).getValue(1); 3148 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3149 3150 // Or in the signbit with integer operations. 3151 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3152 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3153 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3154 if (VT == MVT::f32) { 3155 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3156 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3157 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3158 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3159 } 3160 3161 // f64: Or the high part with signbit and then combine two parts. 3162 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3163 &Tmp0, 1); 3164 SDValue Lo = Tmp0.getValue(0); 3165 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3166 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3167 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3168} 3169 3170SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3171 MachineFunction &MF = DAG.getMachineFunction(); 3172 MachineFrameInfo *MFI = MF.getFrameInfo(); 3173 MFI->setReturnAddressIsTaken(true); 3174 3175 EVT VT = Op.getValueType(); 3176 DebugLoc dl = Op.getDebugLoc(); 3177 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3178 if (Depth) { 3179 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3180 SDValue Offset = DAG.getConstant(4, MVT::i32); 3181 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3182 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3183 MachinePointerInfo(), false, false, 0); 3184 } 3185 3186 // Return LR, which contains the return address. Mark it an implicit live-in. 3187 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3188 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3189} 3190 3191SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3192 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3193 MFI->setFrameAddressIsTaken(true); 3194 3195 EVT VT = Op.getValueType(); 3196 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3197 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3198 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3199 ? ARM::R7 : ARM::R11; 3200 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3201 while (Depth--) 3202 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3203 MachinePointerInfo(), 3204 false, false, 0); 3205 return FrameAddr; 3206} 3207 3208/// ExpandBITCAST - If the target supports VFP, this function is called to 3209/// expand a bit convert where either the source or destination type is i64 to 3210/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3211/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3212/// vectors), since the legalizer won't know what to do with that. 3213static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3214 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3215 DebugLoc dl = N->getDebugLoc(); 3216 SDValue Op = N->getOperand(0); 3217 3218 // This function is only supposed to be called for i64 types, either as the 3219 // source or destination of the bit convert. 3220 EVT SrcVT = Op.getValueType(); 3221 EVT DstVT = N->getValueType(0); 3222 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3223 "ExpandBITCAST called for non-i64 type"); 3224 3225 // Turn i64->f64 into VMOVDRR. 3226 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3227 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3228 DAG.getConstant(0, MVT::i32)); 3229 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3230 DAG.getConstant(1, MVT::i32)); 3231 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3232 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3233 } 3234 3235 // Turn f64->i64 into VMOVRRD. 3236 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3237 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3238 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3239 // Merge the pieces into a single i64 value. 3240 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3241 } 3242 3243 return SDValue(); 3244} 3245 3246/// getZeroVector - Returns a vector of specified type with all zero elements. 3247/// Zero vectors are used to represent vector negation and in those cases 3248/// will be implemented with the NEON VNEG instruction. However, VNEG does 3249/// not support i64 elements, so sometimes the zero vectors will need to be 3250/// explicitly constructed. Regardless, use a canonical VMOV to create the 3251/// zero vector. 3252static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3253 assert(VT.isVector() && "Expected a vector type"); 3254 // The canonical modified immediate encoding of a zero vector is....0! 3255 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3256 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3257 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3258 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3259} 3260 3261/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3262/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3263SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3264 SelectionDAG &DAG) const { 3265 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3266 EVT VT = Op.getValueType(); 3267 unsigned VTBits = VT.getSizeInBits(); 3268 DebugLoc dl = Op.getDebugLoc(); 3269 SDValue ShOpLo = Op.getOperand(0); 3270 SDValue ShOpHi = Op.getOperand(1); 3271 SDValue ShAmt = Op.getOperand(2); 3272 SDValue ARMcc; 3273 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3274 3275 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3276 3277 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3278 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3279 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3280 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3281 DAG.getConstant(VTBits, MVT::i32)); 3282 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3283 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3284 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3285 3286 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3287 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3288 ARMcc, DAG, dl); 3289 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3290 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3291 CCR, Cmp); 3292 3293 SDValue Ops[2] = { Lo, Hi }; 3294 return DAG.getMergeValues(Ops, 2, dl); 3295} 3296 3297/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3298/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3299SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3300 SelectionDAG &DAG) const { 3301 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3302 EVT VT = Op.getValueType(); 3303 unsigned VTBits = VT.getSizeInBits(); 3304 DebugLoc dl = Op.getDebugLoc(); 3305 SDValue ShOpLo = Op.getOperand(0); 3306 SDValue ShOpHi = Op.getOperand(1); 3307 SDValue ShAmt = Op.getOperand(2); 3308 SDValue ARMcc; 3309 3310 assert(Op.getOpcode() == ISD::SHL_PARTS); 3311 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3312 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3313 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3314 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3315 DAG.getConstant(VTBits, MVT::i32)); 3316 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3317 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3318 3319 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3320 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3321 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3322 ARMcc, DAG, dl); 3323 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3324 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3325 CCR, Cmp); 3326 3327 SDValue Ops[2] = { Lo, Hi }; 3328 return DAG.getMergeValues(Ops, 2, dl); 3329} 3330 3331SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3332 SelectionDAG &DAG) const { 3333 // The rounding mode is in bits 23:22 of the FPSCR. 3334 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3335 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3336 // so that the shift + and get folded into a bitfield extract. 3337 DebugLoc dl = Op.getDebugLoc(); 3338 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3339 DAG.getConstant(Intrinsic::arm_get_fpscr, 3340 MVT::i32)); 3341 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3342 DAG.getConstant(1U << 22, MVT::i32)); 3343 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3344 DAG.getConstant(22, MVT::i32)); 3345 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3346 DAG.getConstant(3, MVT::i32)); 3347} 3348 3349static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3350 const ARMSubtarget *ST) { 3351 EVT VT = N->getValueType(0); 3352 DebugLoc dl = N->getDebugLoc(); 3353 3354 if (!ST->hasV6T2Ops()) 3355 return SDValue(); 3356 3357 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3358 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3359} 3360 3361static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3362 const ARMSubtarget *ST) { 3363 EVT VT = N->getValueType(0); 3364 DebugLoc dl = N->getDebugLoc(); 3365 3366 if (!VT.isVector()) 3367 return SDValue(); 3368 3369 // Lower vector shifts on NEON to use VSHL. 3370 assert(ST->hasNEON() && "unexpected vector shift"); 3371 3372 // Left shifts translate directly to the vshiftu intrinsic. 3373 if (N->getOpcode() == ISD::SHL) 3374 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3375 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3376 N->getOperand(0), N->getOperand(1)); 3377 3378 assert((N->getOpcode() == ISD::SRA || 3379 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3380 3381 // NEON uses the same intrinsics for both left and right shifts. For 3382 // right shifts, the shift amounts are negative, so negate the vector of 3383 // shift amounts. 3384 EVT ShiftVT = N->getOperand(1).getValueType(); 3385 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3386 getZeroVector(ShiftVT, DAG, dl), 3387 N->getOperand(1)); 3388 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3389 Intrinsic::arm_neon_vshifts : 3390 Intrinsic::arm_neon_vshiftu); 3391 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3392 DAG.getConstant(vshiftInt, MVT::i32), 3393 N->getOperand(0), NegatedCount); 3394} 3395 3396static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3397 const ARMSubtarget *ST) { 3398 EVT VT = N->getValueType(0); 3399 DebugLoc dl = N->getDebugLoc(); 3400 3401 // We can get here for a node like i32 = ISD::SHL i32, i64 3402 if (VT != MVT::i64) 3403 return SDValue(); 3404 3405 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3406 "Unknown shift to lower!"); 3407 3408 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3409 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3410 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3411 return SDValue(); 3412 3413 // If we are in thumb mode, we don't have RRX. 3414 if (ST->isThumb1Only()) return SDValue(); 3415 3416 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3417 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3418 DAG.getConstant(0, MVT::i32)); 3419 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3420 DAG.getConstant(1, MVT::i32)); 3421 3422 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3423 // captures the result into a carry flag. 3424 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3425 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3426 3427 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3428 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3429 3430 // Merge the pieces into a single i64 value. 3431 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3432} 3433 3434static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3435 SDValue TmpOp0, TmpOp1; 3436 bool Invert = false; 3437 bool Swap = false; 3438 unsigned Opc = 0; 3439 3440 SDValue Op0 = Op.getOperand(0); 3441 SDValue Op1 = Op.getOperand(1); 3442 SDValue CC = Op.getOperand(2); 3443 EVT VT = Op.getValueType(); 3444 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3445 DebugLoc dl = Op.getDebugLoc(); 3446 3447 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3448 switch (SetCCOpcode) { 3449 default: llvm_unreachable("Illegal FP comparison"); break; 3450 case ISD::SETUNE: 3451 case ISD::SETNE: Invert = true; // Fallthrough 3452 case ISD::SETOEQ: 3453 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3454 case ISD::SETOLT: 3455 case ISD::SETLT: Swap = true; // Fallthrough 3456 case ISD::SETOGT: 3457 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3458 case ISD::SETOLE: 3459 case ISD::SETLE: Swap = true; // Fallthrough 3460 case ISD::SETOGE: 3461 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3462 case ISD::SETUGE: Swap = true; // Fallthrough 3463 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3464 case ISD::SETUGT: Swap = true; // Fallthrough 3465 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3466 case ISD::SETUEQ: Invert = true; // Fallthrough 3467 case ISD::SETONE: 3468 // Expand this to (OLT | OGT). 3469 TmpOp0 = Op0; 3470 TmpOp1 = Op1; 3471 Opc = ISD::OR; 3472 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3473 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3474 break; 3475 case ISD::SETUO: Invert = true; // Fallthrough 3476 case ISD::SETO: 3477 // Expand this to (OLT | OGE). 3478 TmpOp0 = Op0; 3479 TmpOp1 = Op1; 3480 Opc = ISD::OR; 3481 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3482 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3483 break; 3484 } 3485 } else { 3486 // Integer comparisons. 3487 switch (SetCCOpcode) { 3488 default: llvm_unreachable("Illegal integer comparison"); break; 3489 case ISD::SETNE: Invert = true; 3490 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3491 case ISD::SETLT: Swap = true; 3492 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3493 case ISD::SETLE: Swap = true; 3494 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3495 case ISD::SETULT: Swap = true; 3496 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3497 case ISD::SETULE: Swap = true; 3498 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3499 } 3500 3501 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3502 if (Opc == ARMISD::VCEQ) { 3503 3504 SDValue AndOp; 3505 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3506 AndOp = Op0; 3507 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3508 AndOp = Op1; 3509 3510 // Ignore bitconvert. 3511 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3512 AndOp = AndOp.getOperand(0); 3513 3514 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3515 Opc = ARMISD::VTST; 3516 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3517 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3518 Invert = !Invert; 3519 } 3520 } 3521 } 3522 3523 if (Swap) 3524 std::swap(Op0, Op1); 3525 3526 // If one of the operands is a constant vector zero, attempt to fold the 3527 // comparison to a specialized compare-against-zero form. 3528 SDValue SingleOp; 3529 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3530 SingleOp = Op0; 3531 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3532 if (Opc == ARMISD::VCGE) 3533 Opc = ARMISD::VCLEZ; 3534 else if (Opc == ARMISD::VCGT) 3535 Opc = ARMISD::VCLTZ; 3536 SingleOp = Op1; 3537 } 3538 3539 SDValue Result; 3540 if (SingleOp.getNode()) { 3541 switch (Opc) { 3542 case ARMISD::VCEQ: 3543 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3544 case ARMISD::VCGE: 3545 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3546 case ARMISD::VCLEZ: 3547 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3548 case ARMISD::VCGT: 3549 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3550 case ARMISD::VCLTZ: 3551 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3552 default: 3553 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3554 } 3555 } else { 3556 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3557 } 3558 3559 if (Invert) 3560 Result = DAG.getNOT(dl, Result, VT); 3561 3562 return Result; 3563} 3564 3565/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3566/// valid vector constant for a NEON instruction with a "modified immediate" 3567/// operand (e.g., VMOV). If so, return the encoded value. 3568static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3569 unsigned SplatBitSize, SelectionDAG &DAG, 3570 EVT &VT, bool is128Bits, NEONModImmType type) { 3571 unsigned OpCmode, Imm; 3572 3573 // SplatBitSize is set to the smallest size that splats the vector, so a 3574 // zero vector will always have SplatBitSize == 8. However, NEON modified 3575 // immediate instructions others than VMOV do not support the 8-bit encoding 3576 // of a zero vector, and the default encoding of zero is supposed to be the 3577 // 32-bit version. 3578 if (SplatBits == 0) 3579 SplatBitSize = 32; 3580 3581 switch (SplatBitSize) { 3582 case 8: 3583 if (type != VMOVModImm) 3584 return SDValue(); 3585 // Any 1-byte value is OK. Op=0, Cmode=1110. 3586 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3587 OpCmode = 0xe; 3588 Imm = SplatBits; 3589 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3590 break; 3591 3592 case 16: 3593 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3594 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3595 if ((SplatBits & ~0xff) == 0) { 3596 // Value = 0x00nn: Op=x, Cmode=100x. 3597 OpCmode = 0x8; 3598 Imm = SplatBits; 3599 break; 3600 } 3601 if ((SplatBits & ~0xff00) == 0) { 3602 // Value = 0xnn00: Op=x, Cmode=101x. 3603 OpCmode = 0xa; 3604 Imm = SplatBits >> 8; 3605 break; 3606 } 3607 return SDValue(); 3608 3609 case 32: 3610 // NEON's 32-bit VMOV supports splat values where: 3611 // * only one byte is nonzero, or 3612 // * the least significant byte is 0xff and the second byte is nonzero, or 3613 // * the least significant 2 bytes are 0xff and the third is nonzero. 3614 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3615 if ((SplatBits & ~0xff) == 0) { 3616 // Value = 0x000000nn: Op=x, Cmode=000x. 3617 OpCmode = 0; 3618 Imm = SplatBits; 3619 break; 3620 } 3621 if ((SplatBits & ~0xff00) == 0) { 3622 // Value = 0x0000nn00: Op=x, Cmode=001x. 3623 OpCmode = 0x2; 3624 Imm = SplatBits >> 8; 3625 break; 3626 } 3627 if ((SplatBits & ~0xff0000) == 0) { 3628 // Value = 0x00nn0000: Op=x, Cmode=010x. 3629 OpCmode = 0x4; 3630 Imm = SplatBits >> 16; 3631 break; 3632 } 3633 if ((SplatBits & ~0xff000000) == 0) { 3634 // Value = 0xnn000000: Op=x, Cmode=011x. 3635 OpCmode = 0x6; 3636 Imm = SplatBits >> 24; 3637 break; 3638 } 3639 3640 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3641 if (type == OtherModImm) return SDValue(); 3642 3643 if ((SplatBits & ~0xffff) == 0 && 3644 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3645 // Value = 0x0000nnff: Op=x, Cmode=1100. 3646 OpCmode = 0xc; 3647 Imm = SplatBits >> 8; 3648 SplatBits |= 0xff; 3649 break; 3650 } 3651 3652 if ((SplatBits & ~0xffffff) == 0 && 3653 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3654 // Value = 0x00nnffff: Op=x, Cmode=1101. 3655 OpCmode = 0xd; 3656 Imm = SplatBits >> 16; 3657 SplatBits |= 0xffff; 3658 break; 3659 } 3660 3661 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3662 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3663 // VMOV.I32. A (very) minor optimization would be to replicate the value 3664 // and fall through here to test for a valid 64-bit splat. But, then the 3665 // caller would also need to check and handle the change in size. 3666 return SDValue(); 3667 3668 case 64: { 3669 if (type != VMOVModImm) 3670 return SDValue(); 3671 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3672 uint64_t BitMask = 0xff; 3673 uint64_t Val = 0; 3674 unsigned ImmMask = 1; 3675 Imm = 0; 3676 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3677 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3678 Val |= BitMask; 3679 Imm |= ImmMask; 3680 } else if ((SplatBits & BitMask) != 0) { 3681 return SDValue(); 3682 } 3683 BitMask <<= 8; 3684 ImmMask <<= 1; 3685 } 3686 // Op=1, Cmode=1110. 3687 OpCmode = 0x1e; 3688 SplatBits = Val; 3689 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3690 break; 3691 } 3692 3693 default: 3694 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3695 return SDValue(); 3696 } 3697 3698 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3699 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3700} 3701 3702static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3703 bool &ReverseVEXT, unsigned &Imm) { 3704 unsigned NumElts = VT.getVectorNumElements(); 3705 ReverseVEXT = false; 3706 3707 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3708 if (M[0] < 0) 3709 return false; 3710 3711 Imm = M[0]; 3712 3713 // If this is a VEXT shuffle, the immediate value is the index of the first 3714 // element. The other shuffle indices must be the successive elements after 3715 // the first one. 3716 unsigned ExpectedElt = Imm; 3717 for (unsigned i = 1; i < NumElts; ++i) { 3718 // Increment the expected index. If it wraps around, it may still be 3719 // a VEXT but the source vectors must be swapped. 3720 ExpectedElt += 1; 3721 if (ExpectedElt == NumElts * 2) { 3722 ExpectedElt = 0; 3723 ReverseVEXT = true; 3724 } 3725 3726 if (M[i] < 0) continue; // ignore UNDEF indices 3727 if (ExpectedElt != static_cast<unsigned>(M[i])) 3728 return false; 3729 } 3730 3731 // Adjust the index value if the source operands will be swapped. 3732 if (ReverseVEXT) 3733 Imm -= NumElts; 3734 3735 return true; 3736} 3737 3738/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3739/// instruction with the specified blocksize. (The order of the elements 3740/// within each block of the vector is reversed.) 3741static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3742 unsigned BlockSize) { 3743 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3744 "Only possible block sizes for VREV are: 16, 32, 64"); 3745 3746 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3747 if (EltSz == 64) 3748 return false; 3749 3750 unsigned NumElts = VT.getVectorNumElements(); 3751 unsigned BlockElts = M[0] + 1; 3752 // If the first shuffle index is UNDEF, be optimistic. 3753 if (M[0] < 0) 3754 BlockElts = BlockSize / EltSz; 3755 3756 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3757 return false; 3758 3759 for (unsigned i = 0; i < NumElts; ++i) { 3760 if (M[i] < 0) continue; // ignore UNDEF indices 3761 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3762 return false; 3763 } 3764 3765 return true; 3766} 3767 3768static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3769 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3770 // range, then 0 is placed into the resulting vector. So pretty much any mask 3771 // of 8 elements can work here. 3772 return VT == MVT::v8i8 && M.size() == 8; 3773} 3774 3775static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3776 unsigned &WhichResult) { 3777 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3778 if (EltSz == 64) 3779 return false; 3780 3781 unsigned NumElts = VT.getVectorNumElements(); 3782 WhichResult = (M[0] == 0 ? 0 : 1); 3783 for (unsigned i = 0; i < NumElts; i += 2) { 3784 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3785 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3786 return false; 3787 } 3788 return true; 3789} 3790 3791/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3792/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3793/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3794static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3795 unsigned &WhichResult) { 3796 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3797 if (EltSz == 64) 3798 return false; 3799 3800 unsigned NumElts = VT.getVectorNumElements(); 3801 WhichResult = (M[0] == 0 ? 0 : 1); 3802 for (unsigned i = 0; i < NumElts; i += 2) { 3803 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3804 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3805 return false; 3806 } 3807 return true; 3808} 3809 3810static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3811 unsigned &WhichResult) { 3812 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3813 if (EltSz == 64) 3814 return false; 3815 3816 unsigned NumElts = VT.getVectorNumElements(); 3817 WhichResult = (M[0] == 0 ? 0 : 1); 3818 for (unsigned i = 0; i != NumElts; ++i) { 3819 if (M[i] < 0) continue; // ignore UNDEF indices 3820 if ((unsigned) M[i] != 2 * i + WhichResult) 3821 return false; 3822 } 3823 3824 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3825 if (VT.is64BitVector() && EltSz == 32) 3826 return false; 3827 3828 return true; 3829} 3830 3831/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3832/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3833/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3834static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3835 unsigned &WhichResult) { 3836 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3837 if (EltSz == 64) 3838 return false; 3839 3840 unsigned Half = VT.getVectorNumElements() / 2; 3841 WhichResult = (M[0] == 0 ? 0 : 1); 3842 for (unsigned j = 0; j != 2; ++j) { 3843 unsigned Idx = WhichResult; 3844 for (unsigned i = 0; i != Half; ++i) { 3845 int MIdx = M[i + j * Half]; 3846 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3847 return false; 3848 Idx += 2; 3849 } 3850 } 3851 3852 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3853 if (VT.is64BitVector() && EltSz == 32) 3854 return false; 3855 3856 return true; 3857} 3858 3859static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3860 unsigned &WhichResult) { 3861 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3862 if (EltSz == 64) 3863 return false; 3864 3865 unsigned NumElts = VT.getVectorNumElements(); 3866 WhichResult = (M[0] == 0 ? 0 : 1); 3867 unsigned Idx = WhichResult * NumElts / 2; 3868 for (unsigned i = 0; i != NumElts; i += 2) { 3869 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3870 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3871 return false; 3872 Idx += 1; 3873 } 3874 3875 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3876 if (VT.is64BitVector() && EltSz == 32) 3877 return false; 3878 3879 return true; 3880} 3881 3882/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3883/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3884/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3885static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3886 unsigned &WhichResult) { 3887 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3888 if (EltSz == 64) 3889 return false; 3890 3891 unsigned NumElts = VT.getVectorNumElements(); 3892 WhichResult = (M[0] == 0 ? 0 : 1); 3893 unsigned Idx = WhichResult * NumElts / 2; 3894 for (unsigned i = 0; i != NumElts; i += 2) { 3895 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3896 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3897 return false; 3898 Idx += 1; 3899 } 3900 3901 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3902 if (VT.is64BitVector() && EltSz == 32) 3903 return false; 3904 3905 return true; 3906} 3907 3908// If N is an integer constant that can be moved into a register in one 3909// instruction, return an SDValue of such a constant (will become a MOV 3910// instruction). Otherwise return null. 3911static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3912 const ARMSubtarget *ST, DebugLoc dl) { 3913 uint64_t Val; 3914 if (!isa<ConstantSDNode>(N)) 3915 return SDValue(); 3916 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3917 3918 if (ST->isThumb1Only()) { 3919 if (Val <= 255 || ~Val <= 255) 3920 return DAG.getConstant(Val, MVT::i32); 3921 } else { 3922 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3923 return DAG.getConstant(Val, MVT::i32); 3924 } 3925 return SDValue(); 3926} 3927 3928// If this is a case we can't handle, return null and let the default 3929// expansion code take care of it. 3930SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3931 const ARMSubtarget *ST) const { 3932 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3933 DebugLoc dl = Op.getDebugLoc(); 3934 EVT VT = Op.getValueType(); 3935 3936 APInt SplatBits, SplatUndef; 3937 unsigned SplatBitSize; 3938 bool HasAnyUndefs; 3939 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3940 if (SplatBitSize <= 64) { 3941 // Check if an immediate VMOV works. 3942 EVT VmovVT; 3943 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3944 SplatUndef.getZExtValue(), SplatBitSize, 3945 DAG, VmovVT, VT.is128BitVector(), 3946 VMOVModImm); 3947 if (Val.getNode()) { 3948 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3949 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3950 } 3951 3952 // Try an immediate VMVN. 3953 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 3954 Val = isNEONModifiedImm(NegatedImm, 3955 SplatUndef.getZExtValue(), SplatBitSize, 3956 DAG, VmovVT, VT.is128BitVector(), 3957 VMVNModImm); 3958 if (Val.getNode()) { 3959 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3960 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3961 } 3962 } 3963 } 3964 3965 // Scan through the operands to see if only one value is used. 3966 unsigned NumElts = VT.getVectorNumElements(); 3967 bool isOnlyLowElement = true; 3968 bool usesOnlyOneValue = true; 3969 bool isConstant = true; 3970 SDValue Value; 3971 for (unsigned i = 0; i < NumElts; ++i) { 3972 SDValue V = Op.getOperand(i); 3973 if (V.getOpcode() == ISD::UNDEF) 3974 continue; 3975 if (i > 0) 3976 isOnlyLowElement = false; 3977 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3978 isConstant = false; 3979 3980 if (!Value.getNode()) 3981 Value = V; 3982 else if (V != Value) 3983 usesOnlyOneValue = false; 3984 } 3985 3986 if (!Value.getNode()) 3987 return DAG.getUNDEF(VT); 3988 3989 if (isOnlyLowElement) 3990 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3991 3992 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3993 3994 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3995 // i32 and try again. 3996 if (usesOnlyOneValue && EltSize <= 32) { 3997 if (!isConstant) 3998 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3999 if (VT.getVectorElementType().isFloatingPoint()) { 4000 SmallVector<SDValue, 8> Ops; 4001 for (unsigned i = 0; i < NumElts; ++i) 4002 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4003 Op.getOperand(i))); 4004 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 4005 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 4006 Val = LowerBUILD_VECTOR(Val, DAG, ST); 4007 if (Val.getNode()) 4008 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4009 } 4010 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 4011 if (Val.getNode()) 4012 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4013 } 4014 4015 // If all elements are constants and the case above didn't get hit, fall back 4016 // to the default expansion, which will generate a load from the constant 4017 // pool. 4018 if (isConstant) 4019 return SDValue(); 4020 4021 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4022 if (NumElts >= 4) { 4023 SDValue shuffle = ReconstructShuffle(Op, DAG); 4024 if (shuffle != SDValue()) 4025 return shuffle; 4026 } 4027 4028 // Vectors with 32- or 64-bit elements can be built by directly assigning 4029 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4030 // will be legalized. 4031 if (EltSize >= 32) { 4032 // Do the expansion with floating-point types, since that is what the VFP 4033 // registers are defined to use, and since i64 is not legal. 4034 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4035 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4036 SmallVector<SDValue, 8> Ops; 4037 for (unsigned i = 0; i < NumElts; ++i) 4038 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4039 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4040 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4041 } 4042 4043 return SDValue(); 4044} 4045 4046// Gather data to see if the operation can be modelled as a 4047// shuffle in combination with VEXTs. 4048SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4049 SelectionDAG &DAG) const { 4050 DebugLoc dl = Op.getDebugLoc(); 4051 EVT VT = Op.getValueType(); 4052 unsigned NumElts = VT.getVectorNumElements(); 4053 4054 SmallVector<SDValue, 2> SourceVecs; 4055 SmallVector<unsigned, 2> MinElts; 4056 SmallVector<unsigned, 2> MaxElts; 4057 4058 for (unsigned i = 0; i < NumElts; ++i) { 4059 SDValue V = Op.getOperand(i); 4060 if (V.getOpcode() == ISD::UNDEF) 4061 continue; 4062 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4063 // A shuffle can only come from building a vector from various 4064 // elements of other vectors. 4065 return SDValue(); 4066 } else if (V.getOperand(0).getValueType().getVectorElementType() != 4067 VT.getVectorElementType()) { 4068 // This code doesn't know how to handle shuffles where the vector 4069 // element types do not match (this happens because type legalization 4070 // promotes the return type of EXTRACT_VECTOR_ELT). 4071 // FIXME: It might be appropriate to extend this code to handle 4072 // mismatched types. 4073 return SDValue(); 4074 } 4075 4076 // Record this extraction against the appropriate vector if possible... 4077 SDValue SourceVec = V.getOperand(0); 4078 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4079 bool FoundSource = false; 4080 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4081 if (SourceVecs[j] == SourceVec) { 4082 if (MinElts[j] > EltNo) 4083 MinElts[j] = EltNo; 4084 if (MaxElts[j] < EltNo) 4085 MaxElts[j] = EltNo; 4086 FoundSource = true; 4087 break; 4088 } 4089 } 4090 4091 // Or record a new source if not... 4092 if (!FoundSource) { 4093 SourceVecs.push_back(SourceVec); 4094 MinElts.push_back(EltNo); 4095 MaxElts.push_back(EltNo); 4096 } 4097 } 4098 4099 // Currently only do something sane when at most two source vectors 4100 // involved. 4101 if (SourceVecs.size() > 2) 4102 return SDValue(); 4103 4104 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4105 int VEXTOffsets[2] = {0, 0}; 4106 4107 // This loop extracts the usage patterns of the source vectors 4108 // and prepares appropriate SDValues for a shuffle if possible. 4109 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4110 if (SourceVecs[i].getValueType() == VT) { 4111 // No VEXT necessary 4112 ShuffleSrcs[i] = SourceVecs[i]; 4113 VEXTOffsets[i] = 0; 4114 continue; 4115 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4116 // It probably isn't worth padding out a smaller vector just to 4117 // break it down again in a shuffle. 4118 return SDValue(); 4119 } 4120 4121 // Since only 64-bit and 128-bit vectors are legal on ARM and 4122 // we've eliminated the other cases... 4123 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4124 "unexpected vector sizes in ReconstructShuffle"); 4125 4126 if (MaxElts[i] - MinElts[i] >= NumElts) { 4127 // Span too large for a VEXT to cope 4128 return SDValue(); 4129 } 4130 4131 if (MinElts[i] >= NumElts) { 4132 // The extraction can just take the second half 4133 VEXTOffsets[i] = NumElts; 4134 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4135 SourceVecs[i], 4136 DAG.getIntPtrConstant(NumElts)); 4137 } else if (MaxElts[i] < NumElts) { 4138 // The extraction can just take the first half 4139 VEXTOffsets[i] = 0; 4140 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4141 SourceVecs[i], 4142 DAG.getIntPtrConstant(0)); 4143 } else { 4144 // An actual VEXT is needed 4145 VEXTOffsets[i] = MinElts[i]; 4146 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4147 SourceVecs[i], 4148 DAG.getIntPtrConstant(0)); 4149 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4150 SourceVecs[i], 4151 DAG.getIntPtrConstant(NumElts)); 4152 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4153 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4154 } 4155 } 4156 4157 SmallVector<int, 8> Mask; 4158 4159 for (unsigned i = 0; i < NumElts; ++i) { 4160 SDValue Entry = Op.getOperand(i); 4161 if (Entry.getOpcode() == ISD::UNDEF) { 4162 Mask.push_back(-1); 4163 continue; 4164 } 4165 4166 SDValue ExtractVec = Entry.getOperand(0); 4167 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4168 .getOperand(1))->getSExtValue(); 4169 if (ExtractVec == SourceVecs[0]) { 4170 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4171 } else { 4172 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4173 } 4174 } 4175 4176 // Final check before we try to produce nonsense... 4177 if (isShuffleMaskLegal(Mask, VT)) 4178 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4179 &Mask[0]); 4180 4181 return SDValue(); 4182} 4183 4184/// isShuffleMaskLegal - Targets can use this to indicate that they only 4185/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4186/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4187/// are assumed to be legal. 4188bool 4189ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4190 EVT VT) const { 4191 if (VT.getVectorNumElements() == 4 && 4192 (VT.is128BitVector() || VT.is64BitVector())) { 4193 unsigned PFIndexes[4]; 4194 for (unsigned i = 0; i != 4; ++i) { 4195 if (M[i] < 0) 4196 PFIndexes[i] = 8; 4197 else 4198 PFIndexes[i] = M[i]; 4199 } 4200 4201 // Compute the index in the perfect shuffle table. 4202 unsigned PFTableIndex = 4203 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4204 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4205 unsigned Cost = (PFEntry >> 30); 4206 4207 if (Cost <= 4) 4208 return true; 4209 } 4210 4211 bool ReverseVEXT; 4212 unsigned Imm, WhichResult; 4213 4214 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4215 return (EltSize >= 32 || 4216 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4217 isVREVMask(M, VT, 64) || 4218 isVREVMask(M, VT, 32) || 4219 isVREVMask(M, VT, 16) || 4220 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4221 isVTBLMask(M, VT) || 4222 isVTRNMask(M, VT, WhichResult) || 4223 isVUZPMask(M, VT, WhichResult) || 4224 isVZIPMask(M, VT, WhichResult) || 4225 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4226 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4227 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4228} 4229 4230/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4231/// the specified operations to build the shuffle. 4232static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4233 SDValue RHS, SelectionDAG &DAG, 4234 DebugLoc dl) { 4235 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4236 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4237 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4238 4239 enum { 4240 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4241 OP_VREV, 4242 OP_VDUP0, 4243 OP_VDUP1, 4244 OP_VDUP2, 4245 OP_VDUP3, 4246 OP_VEXT1, 4247 OP_VEXT2, 4248 OP_VEXT3, 4249 OP_VUZPL, // VUZP, left result 4250 OP_VUZPR, // VUZP, right result 4251 OP_VZIPL, // VZIP, left result 4252 OP_VZIPR, // VZIP, right result 4253 OP_VTRNL, // VTRN, left result 4254 OP_VTRNR // VTRN, right result 4255 }; 4256 4257 if (OpNum == OP_COPY) { 4258 if (LHSID == (1*9+2)*9+3) return LHS; 4259 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4260 return RHS; 4261 } 4262 4263 SDValue OpLHS, OpRHS; 4264 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4265 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4266 EVT VT = OpLHS.getValueType(); 4267 4268 switch (OpNum) { 4269 default: llvm_unreachable("Unknown shuffle opcode!"); 4270 case OP_VREV: 4271 // VREV divides the vector in half and swaps within the half. 4272 if (VT.getVectorElementType() == MVT::i32 || 4273 VT.getVectorElementType() == MVT::f32) 4274 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4275 // vrev <4 x i16> -> VREV32 4276 if (VT.getVectorElementType() == MVT::i16) 4277 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4278 // vrev <4 x i8> -> VREV16 4279 assert(VT.getVectorElementType() == MVT::i8); 4280 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4281 case OP_VDUP0: 4282 case OP_VDUP1: 4283 case OP_VDUP2: 4284 case OP_VDUP3: 4285 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4286 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4287 case OP_VEXT1: 4288 case OP_VEXT2: 4289 case OP_VEXT3: 4290 return DAG.getNode(ARMISD::VEXT, dl, VT, 4291 OpLHS, OpRHS, 4292 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4293 case OP_VUZPL: 4294 case OP_VUZPR: 4295 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4296 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4297 case OP_VZIPL: 4298 case OP_VZIPR: 4299 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4300 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4301 case OP_VTRNL: 4302 case OP_VTRNR: 4303 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4304 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4305 } 4306} 4307 4308static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4309 SmallVectorImpl<int> &ShuffleMask, 4310 SelectionDAG &DAG) { 4311 // Check to see if we can use the VTBL instruction. 4312 SDValue V1 = Op.getOperand(0); 4313 SDValue V2 = Op.getOperand(1); 4314 DebugLoc DL = Op.getDebugLoc(); 4315 4316 SmallVector<SDValue, 8> VTBLMask; 4317 for (SmallVectorImpl<int>::iterator 4318 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4319 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4320 4321 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4322 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4323 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4324 &VTBLMask[0], 8)); 4325 4326 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4327 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4328 &VTBLMask[0], 8)); 4329} 4330 4331static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4332 SDValue V1 = Op.getOperand(0); 4333 SDValue V2 = Op.getOperand(1); 4334 DebugLoc dl = Op.getDebugLoc(); 4335 EVT VT = Op.getValueType(); 4336 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4337 SmallVector<int, 8> ShuffleMask; 4338 4339 // Convert shuffles that are directly supported on NEON to target-specific 4340 // DAG nodes, instead of keeping them as shuffles and matching them again 4341 // during code selection. This is more efficient and avoids the possibility 4342 // of inconsistencies between legalization and selection. 4343 // FIXME: floating-point vectors should be canonicalized to integer vectors 4344 // of the same time so that they get CSEd properly. 4345 SVN->getMask(ShuffleMask); 4346 4347 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4348 if (EltSize <= 32) { 4349 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4350 int Lane = SVN->getSplatIndex(); 4351 // If this is undef splat, generate it via "just" vdup, if possible. 4352 if (Lane == -1) Lane = 0; 4353 4354 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4355 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4356 } 4357 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4358 DAG.getConstant(Lane, MVT::i32)); 4359 } 4360 4361 bool ReverseVEXT; 4362 unsigned Imm; 4363 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4364 if (ReverseVEXT) 4365 std::swap(V1, V2); 4366 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4367 DAG.getConstant(Imm, MVT::i32)); 4368 } 4369 4370 if (isVREVMask(ShuffleMask, VT, 64)) 4371 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4372 if (isVREVMask(ShuffleMask, VT, 32)) 4373 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4374 if (isVREVMask(ShuffleMask, VT, 16)) 4375 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4376 4377 // Check for Neon shuffles that modify both input vectors in place. 4378 // If both results are used, i.e., if there are two shuffles with the same 4379 // source operands and with masks corresponding to both results of one of 4380 // these operations, DAG memoization will ensure that a single node is 4381 // used for both shuffles. 4382 unsigned WhichResult; 4383 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4384 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4385 V1, V2).getValue(WhichResult); 4386 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4387 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4388 V1, V2).getValue(WhichResult); 4389 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4390 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4391 V1, V2).getValue(WhichResult); 4392 4393 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4394 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4395 V1, V1).getValue(WhichResult); 4396 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4397 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4398 V1, V1).getValue(WhichResult); 4399 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4400 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4401 V1, V1).getValue(WhichResult); 4402 } 4403 4404 // If the shuffle is not directly supported and it has 4 elements, use 4405 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4406 unsigned NumElts = VT.getVectorNumElements(); 4407 if (NumElts == 4) { 4408 unsigned PFIndexes[4]; 4409 for (unsigned i = 0; i != 4; ++i) { 4410 if (ShuffleMask[i] < 0) 4411 PFIndexes[i] = 8; 4412 else 4413 PFIndexes[i] = ShuffleMask[i]; 4414 } 4415 4416 // Compute the index in the perfect shuffle table. 4417 unsigned PFTableIndex = 4418 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4419 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4420 unsigned Cost = (PFEntry >> 30); 4421 4422 if (Cost <= 4) 4423 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4424 } 4425 4426 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4427 if (EltSize >= 32) { 4428 // Do the expansion with floating-point types, since that is what the VFP 4429 // registers are defined to use, and since i64 is not legal. 4430 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4431 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4432 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4433 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4434 SmallVector<SDValue, 8> Ops; 4435 for (unsigned i = 0; i < NumElts; ++i) { 4436 if (ShuffleMask[i] < 0) 4437 Ops.push_back(DAG.getUNDEF(EltVT)); 4438 else 4439 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4440 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4441 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4442 MVT::i32))); 4443 } 4444 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4445 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4446 } 4447 4448 if (VT == MVT::v8i8) { 4449 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4450 if (NewOp.getNode()) 4451 return NewOp; 4452 } 4453 4454 return SDValue(); 4455} 4456 4457static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4458 // INSERT_VECTOR_ELT is legal only for immediate indexes. 4459 SDValue Lane = Op.getOperand(2); 4460 if (!isa<ConstantSDNode>(Lane)) 4461 return SDValue(); 4462 4463 return Op; 4464} 4465 4466static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4467 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4468 SDValue Lane = Op.getOperand(1); 4469 if (!isa<ConstantSDNode>(Lane)) 4470 return SDValue(); 4471 4472 SDValue Vec = Op.getOperand(0); 4473 if (Op.getValueType() == MVT::i32 && 4474 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4475 DebugLoc dl = Op.getDebugLoc(); 4476 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4477 } 4478 4479 return Op; 4480} 4481 4482static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4483 // The only time a CONCAT_VECTORS operation can have legal types is when 4484 // two 64-bit vectors are concatenated to a 128-bit vector. 4485 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4486 "unexpected CONCAT_VECTORS"); 4487 DebugLoc dl = Op.getDebugLoc(); 4488 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4489 SDValue Op0 = Op.getOperand(0); 4490 SDValue Op1 = Op.getOperand(1); 4491 if (Op0.getOpcode() != ISD::UNDEF) 4492 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4493 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4494 DAG.getIntPtrConstant(0)); 4495 if (Op1.getOpcode() != ISD::UNDEF) 4496 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4497 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4498 DAG.getIntPtrConstant(1)); 4499 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4500} 4501 4502/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4503/// element has been zero/sign-extended, depending on the isSigned parameter, 4504/// from an integer type half its size. 4505static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4506 bool isSigned) { 4507 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4508 EVT VT = N->getValueType(0); 4509 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4510 SDNode *BVN = N->getOperand(0).getNode(); 4511 if (BVN->getValueType(0) != MVT::v4i32 || 4512 BVN->getOpcode() != ISD::BUILD_VECTOR) 4513 return false; 4514 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4515 unsigned HiElt = 1 - LoElt; 4516 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4517 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4518 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4519 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4520 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4521 return false; 4522 if (isSigned) { 4523 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4524 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4525 return true; 4526 } else { 4527 if (Hi0->isNullValue() && Hi1->isNullValue()) 4528 return true; 4529 } 4530 return false; 4531 } 4532 4533 if (N->getOpcode() != ISD::BUILD_VECTOR) 4534 return false; 4535 4536 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4537 SDNode *Elt = N->getOperand(i).getNode(); 4538 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4539 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4540 unsigned HalfSize = EltSize / 2; 4541 if (isSigned) { 4542 if (!isIntN(HalfSize, C->getSExtValue())) 4543 return false; 4544 } else { 4545 if (!isUIntN(HalfSize, C->getZExtValue())) 4546 return false; 4547 } 4548 continue; 4549 } 4550 return false; 4551 } 4552 4553 return true; 4554} 4555 4556/// isSignExtended - Check if a node is a vector value that is sign-extended 4557/// or a constant BUILD_VECTOR with sign-extended elements. 4558static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4559 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4560 return true; 4561 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4562 return true; 4563 return false; 4564} 4565 4566/// isZeroExtended - Check if a node is a vector value that is zero-extended 4567/// or a constant BUILD_VECTOR with zero-extended elements. 4568static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4569 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4570 return true; 4571 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4572 return true; 4573 return false; 4574} 4575 4576/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4577/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4578static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4579 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4580 return N->getOperand(0); 4581 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4582 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4583 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4584 LD->isNonTemporal(), LD->getAlignment()); 4585 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4586 // have been legalized as a BITCAST from v4i32. 4587 if (N->getOpcode() == ISD::BITCAST) { 4588 SDNode *BVN = N->getOperand(0).getNode(); 4589 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4590 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4591 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4592 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4593 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4594 } 4595 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4596 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4597 EVT VT = N->getValueType(0); 4598 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4599 unsigned NumElts = VT.getVectorNumElements(); 4600 MVT TruncVT = MVT::getIntegerVT(EltSize); 4601 SmallVector<SDValue, 8> Ops; 4602 for (unsigned i = 0; i != NumElts; ++i) { 4603 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4604 const APInt &CInt = C->getAPIntValue(); 4605 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4606 } 4607 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4608 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4609} 4610 4611static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4612 unsigned Opcode = N->getOpcode(); 4613 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4614 SDNode *N0 = N->getOperand(0).getNode(); 4615 SDNode *N1 = N->getOperand(1).getNode(); 4616 return N0->hasOneUse() && N1->hasOneUse() && 4617 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4618 } 4619 return false; 4620} 4621 4622static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4623 unsigned Opcode = N->getOpcode(); 4624 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4625 SDNode *N0 = N->getOperand(0).getNode(); 4626 SDNode *N1 = N->getOperand(1).getNode(); 4627 return N0->hasOneUse() && N1->hasOneUse() && 4628 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4629 } 4630 return false; 4631} 4632 4633static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4634 // Multiplications are only custom-lowered for 128-bit vectors so that 4635 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4636 EVT VT = Op.getValueType(); 4637 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4638 SDNode *N0 = Op.getOperand(0).getNode(); 4639 SDNode *N1 = Op.getOperand(1).getNode(); 4640 unsigned NewOpc = 0; 4641 bool isMLA = false; 4642 bool isN0SExt = isSignExtended(N0, DAG); 4643 bool isN1SExt = isSignExtended(N1, DAG); 4644 if (isN0SExt && isN1SExt) 4645 NewOpc = ARMISD::VMULLs; 4646 else { 4647 bool isN0ZExt = isZeroExtended(N0, DAG); 4648 bool isN1ZExt = isZeroExtended(N1, DAG); 4649 if (isN0ZExt && isN1ZExt) 4650 NewOpc = ARMISD::VMULLu; 4651 else if (isN1SExt || isN1ZExt) { 4652 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4653 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4654 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4655 NewOpc = ARMISD::VMULLs; 4656 isMLA = true; 4657 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4658 NewOpc = ARMISD::VMULLu; 4659 isMLA = true; 4660 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4661 std::swap(N0, N1); 4662 NewOpc = ARMISD::VMULLu; 4663 isMLA = true; 4664 } 4665 } 4666 4667 if (!NewOpc) { 4668 if (VT == MVT::v2i64) 4669 // Fall through to expand this. It is not legal. 4670 return SDValue(); 4671 else 4672 // Other vector multiplications are legal. 4673 return Op; 4674 } 4675 } 4676 4677 // Legalize to a VMULL instruction. 4678 DebugLoc DL = Op.getDebugLoc(); 4679 SDValue Op0; 4680 SDValue Op1 = SkipExtension(N1, DAG); 4681 if (!isMLA) { 4682 Op0 = SkipExtension(N0, DAG); 4683 assert(Op0.getValueType().is64BitVector() && 4684 Op1.getValueType().is64BitVector() && 4685 "unexpected types for extended operands to VMULL"); 4686 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4687 } 4688 4689 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4690 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4691 // vmull q0, d4, d6 4692 // vmlal q0, d5, d6 4693 // is faster than 4694 // vaddl q0, d4, d5 4695 // vmovl q1, d6 4696 // vmul q0, q0, q1 4697 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4698 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4699 EVT Op1VT = Op1.getValueType(); 4700 return DAG.getNode(N0->getOpcode(), DL, VT, 4701 DAG.getNode(NewOpc, DL, VT, 4702 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4703 DAG.getNode(NewOpc, DL, VT, 4704 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4705} 4706 4707static SDValue 4708LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4709 // Convert to float 4710 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4711 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4712 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4713 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4714 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4715 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4716 // Get reciprocal estimate. 4717 // float4 recip = vrecpeq_f32(yf); 4718 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4719 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4720 // Because char has a smaller range than uchar, we can actually get away 4721 // without any newton steps. This requires that we use a weird bias 4722 // of 0xb000, however (again, this has been exhaustively tested). 4723 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4724 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4725 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4726 Y = DAG.getConstant(0xb000, MVT::i32); 4727 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4728 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4729 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4730 // Convert back to short. 4731 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4732 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4733 return X; 4734} 4735 4736static SDValue 4737LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4738 SDValue N2; 4739 // Convert to float. 4740 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4741 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4742 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4743 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4744 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4745 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4746 4747 // Use reciprocal estimate and one refinement step. 4748 // float4 recip = vrecpeq_f32(yf); 4749 // recip *= vrecpsq_f32(yf, recip); 4750 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4751 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4752 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4753 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4754 N1, N2); 4755 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4756 // Because short has a smaller range than ushort, we can actually get away 4757 // with only a single newton step. This requires that we use a weird bias 4758 // of 89, however (again, this has been exhaustively tested). 4759 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 4760 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4761 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4762 N1 = DAG.getConstant(0x89, MVT::i32); 4763 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4764 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4765 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4766 // Convert back to integer and return. 4767 // return vmovn_s32(vcvt_s32_f32(result)); 4768 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4769 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4770 return N0; 4771} 4772 4773static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4774 EVT VT = Op.getValueType(); 4775 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4776 "unexpected type for custom-lowering ISD::SDIV"); 4777 4778 DebugLoc dl = Op.getDebugLoc(); 4779 SDValue N0 = Op.getOperand(0); 4780 SDValue N1 = Op.getOperand(1); 4781 SDValue N2, N3; 4782 4783 if (VT == MVT::v8i8) { 4784 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4785 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4786 4787 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4788 DAG.getIntPtrConstant(4)); 4789 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4790 DAG.getIntPtrConstant(4)); 4791 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4792 DAG.getIntPtrConstant(0)); 4793 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4794 DAG.getIntPtrConstant(0)); 4795 4796 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4797 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4798 4799 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4800 N0 = LowerCONCAT_VECTORS(N0, DAG); 4801 4802 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4803 return N0; 4804 } 4805 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4806} 4807 4808static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4809 EVT VT = Op.getValueType(); 4810 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4811 "unexpected type for custom-lowering ISD::UDIV"); 4812 4813 DebugLoc dl = Op.getDebugLoc(); 4814 SDValue N0 = Op.getOperand(0); 4815 SDValue N1 = Op.getOperand(1); 4816 SDValue N2, N3; 4817 4818 if (VT == MVT::v8i8) { 4819 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4820 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4821 4822 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4823 DAG.getIntPtrConstant(4)); 4824 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4825 DAG.getIntPtrConstant(4)); 4826 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4827 DAG.getIntPtrConstant(0)); 4828 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4829 DAG.getIntPtrConstant(0)); 4830 4831 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4832 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4833 4834 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4835 N0 = LowerCONCAT_VECTORS(N0, DAG); 4836 4837 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4838 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4839 N0); 4840 return N0; 4841 } 4842 4843 // v4i16 sdiv ... Convert to float. 4844 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4845 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4846 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4847 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4848 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4849 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4850 4851 // Use reciprocal estimate and two refinement steps. 4852 // float4 recip = vrecpeq_f32(yf); 4853 // recip *= vrecpsq_f32(yf, recip); 4854 // recip *= vrecpsq_f32(yf, recip); 4855 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4856 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 4857 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4858 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4859 BN1, N2); 4860 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4861 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4862 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4863 BN1, N2); 4864 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4865 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4866 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4867 // and that it will never cause us to return an answer too large). 4868 // float4 result = as_float4(as_int4(xf*recip) + 2); 4869 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4870 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4871 N1 = DAG.getConstant(2, MVT::i32); 4872 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4873 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4874 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4875 // Convert back to integer and return. 4876 // return vmovn_u32(vcvt_s32_f32(result)); 4877 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4878 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4879 return N0; 4880} 4881 4882static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 4883 EVT VT = Op.getNode()->getValueType(0); 4884 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4885 4886 unsigned Opc; 4887 bool ExtraOp = false; 4888 switch (Op.getOpcode()) { 4889 default: assert(0 && "Invalid code"); 4890 case ISD::ADDC: Opc = ARMISD::ADDC; break; 4891 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 4892 case ISD::SUBC: Opc = ARMISD::SUBC; break; 4893 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 4894 } 4895 4896 if (!ExtraOp) 4897 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4898 Op.getOperand(1)); 4899 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 4900 Op.getOperand(1), Op.getOperand(2)); 4901} 4902 4903static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 4904 // Monotonic load/store is legal for all targets 4905 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 4906 return Op; 4907 4908 // Aquire/Release load/store is not legal for targets without a 4909 // dmb or equivalent available. 4910 return SDValue(); 4911} 4912 4913 4914static void 4915ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 4916 SelectionDAG &DAG, unsigned NewOp) { 4917 DebugLoc dl = Node->getDebugLoc(); 4918 assert (Node->getValueType(0) == MVT::i64 && 4919 "Only know how to expand i64 atomics"); 4920 4921 SmallVector<SDValue, 6> Ops; 4922 Ops.push_back(Node->getOperand(0)); // Chain 4923 Ops.push_back(Node->getOperand(1)); // Ptr 4924 // Low part of Val1 4925 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4926 Node->getOperand(2), DAG.getIntPtrConstant(0))); 4927 // High part of Val1 4928 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4929 Node->getOperand(2), DAG.getIntPtrConstant(1))); 4930 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 4931 // High part of Val1 4932 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4933 Node->getOperand(3), DAG.getIntPtrConstant(0))); 4934 // High part of Val2 4935 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 4936 Node->getOperand(3), DAG.getIntPtrConstant(1))); 4937 } 4938 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4939 SDValue Result = 4940 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 4941 cast<MemSDNode>(Node)->getMemOperand()); 4942 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 4943 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 4944 Results.push_back(Result.getValue(2)); 4945} 4946 4947SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4948 switch (Op.getOpcode()) { 4949 default: llvm_unreachable("Don't know how to custom lower this!"); 4950 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4951 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4952 case ISD::GlobalAddress: 4953 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4954 LowerGlobalAddressELF(Op, DAG); 4955 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4956 case ISD::SELECT: return LowerSELECT(Op, DAG); 4957 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4958 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4959 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4960 case ISD::VASTART: return LowerVASTART(Op, DAG); 4961 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4962 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 4963 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4964 case ISD::SINT_TO_FP: 4965 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4966 case ISD::FP_TO_SINT: 4967 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4968 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4969 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4970 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4971 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4972 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4973 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4974 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4975 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4976 Subtarget); 4977 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4978 case ISD::SHL: 4979 case ISD::SRL: 4980 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4981 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4982 case ISD::SRL_PARTS: 4983 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4984 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4985 case ISD::SETCC: return LowerVSETCC(Op, DAG); 4986 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4987 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4988 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 4989 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4990 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4991 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4992 case ISD::MUL: return LowerMUL(Op, DAG); 4993 case ISD::SDIV: return LowerSDIV(Op, DAG); 4994 case ISD::UDIV: return LowerUDIV(Op, DAG); 4995 case ISD::ADDC: 4996 case ISD::ADDE: 4997 case ISD::SUBC: 4998 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 4999 case ISD::ATOMIC_LOAD: 5000 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 5001 } 5002 return SDValue(); 5003} 5004 5005/// ReplaceNodeResults - Replace the results of node with an illegal result 5006/// type with new values built out of custom code. 5007void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 5008 SmallVectorImpl<SDValue>&Results, 5009 SelectionDAG &DAG) const { 5010 SDValue Res; 5011 switch (N->getOpcode()) { 5012 default: 5013 llvm_unreachable("Don't know how to custom expand this!"); 5014 break; 5015 case ISD::BITCAST: 5016 Res = ExpandBITCAST(N, DAG); 5017 break; 5018 case ISD::SRL: 5019 case ISD::SRA: 5020 Res = Expand64BitShift(N, DAG, Subtarget); 5021 break; 5022 case ISD::ATOMIC_LOAD_ADD: 5023 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 5024 return; 5025 case ISD::ATOMIC_LOAD_AND: 5026 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 5027 return; 5028 case ISD::ATOMIC_LOAD_NAND: 5029 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 5030 return; 5031 case ISD::ATOMIC_LOAD_OR: 5032 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 5033 return; 5034 case ISD::ATOMIC_LOAD_SUB: 5035 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 5036 return; 5037 case ISD::ATOMIC_LOAD_XOR: 5038 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5039 return; 5040 case ISD::ATOMIC_SWAP: 5041 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5042 return; 5043 case ISD::ATOMIC_CMP_SWAP: 5044 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5045 return; 5046 } 5047 if (Res.getNode()) 5048 Results.push_back(Res); 5049} 5050 5051//===----------------------------------------------------------------------===// 5052// ARM Scheduler Hooks 5053//===----------------------------------------------------------------------===// 5054 5055MachineBasicBlock * 5056ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5057 MachineBasicBlock *BB, 5058 unsigned Size) const { 5059 unsigned dest = MI->getOperand(0).getReg(); 5060 unsigned ptr = MI->getOperand(1).getReg(); 5061 unsigned oldval = MI->getOperand(2).getReg(); 5062 unsigned newval = MI->getOperand(3).getReg(); 5063 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5064 DebugLoc dl = MI->getDebugLoc(); 5065 bool isThumb2 = Subtarget->isThumb2(); 5066 5067 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5068 unsigned scratch = 5069 MRI.createVirtualRegister(isThumb2 ? ARM::rGPRRegisterClass 5070 : ARM::GPRRegisterClass); 5071 5072 if (isThumb2) { 5073 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5074 MRI.constrainRegClass(oldval, ARM::rGPRRegisterClass); 5075 MRI.constrainRegClass(newval, ARM::rGPRRegisterClass); 5076 } 5077 5078 unsigned ldrOpc, strOpc; 5079 switch (Size) { 5080 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5081 case 1: 5082 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5083 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5084 break; 5085 case 2: 5086 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5087 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5088 break; 5089 case 4: 5090 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5091 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5092 break; 5093 } 5094 5095 MachineFunction *MF = BB->getParent(); 5096 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5097 MachineFunction::iterator It = BB; 5098 ++It; // insert the new blocks after the current block 5099 5100 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5101 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5102 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5103 MF->insert(It, loop1MBB); 5104 MF->insert(It, loop2MBB); 5105 MF->insert(It, exitMBB); 5106 5107 // Transfer the remainder of BB and its successor edges to exitMBB. 5108 exitMBB->splice(exitMBB->begin(), BB, 5109 llvm::next(MachineBasicBlock::iterator(MI)), 5110 BB->end()); 5111 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5112 5113 // thisMBB: 5114 // ... 5115 // fallthrough --> loop1MBB 5116 BB->addSuccessor(loop1MBB); 5117 5118 // loop1MBB: 5119 // ldrex dest, [ptr] 5120 // cmp dest, oldval 5121 // bne exitMBB 5122 BB = loop1MBB; 5123 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5124 if (ldrOpc == ARM::t2LDREX) 5125 MIB.addImm(0); 5126 AddDefaultPred(MIB); 5127 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5128 .addReg(dest).addReg(oldval)); 5129 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5130 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5131 BB->addSuccessor(loop2MBB); 5132 BB->addSuccessor(exitMBB); 5133 5134 // loop2MBB: 5135 // strex scratch, newval, [ptr] 5136 // cmp scratch, #0 5137 // bne loop1MBB 5138 BB = loop2MBB; 5139 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5140 if (strOpc == ARM::t2STREX) 5141 MIB.addImm(0); 5142 AddDefaultPred(MIB); 5143 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5144 .addReg(scratch).addImm(0)); 5145 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5146 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5147 BB->addSuccessor(loop1MBB); 5148 BB->addSuccessor(exitMBB); 5149 5150 // exitMBB: 5151 // ... 5152 BB = exitMBB; 5153 5154 MI->eraseFromParent(); // The instruction is gone now. 5155 5156 return BB; 5157} 5158 5159MachineBasicBlock * 5160ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5161 unsigned Size, unsigned BinOpcode) const { 5162 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5163 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5164 5165 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5166 MachineFunction *MF = BB->getParent(); 5167 MachineFunction::iterator It = BB; 5168 ++It; 5169 5170 unsigned dest = MI->getOperand(0).getReg(); 5171 unsigned ptr = MI->getOperand(1).getReg(); 5172 unsigned incr = MI->getOperand(2).getReg(); 5173 DebugLoc dl = MI->getDebugLoc(); 5174 bool isThumb2 = Subtarget->isThumb2(); 5175 5176 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5177 if (isThumb2) { 5178 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5179 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5180 } 5181 5182 unsigned ldrOpc, strOpc; 5183 switch (Size) { 5184 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5185 case 1: 5186 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5187 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5188 break; 5189 case 2: 5190 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5191 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5192 break; 5193 case 4: 5194 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5195 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5196 break; 5197 } 5198 5199 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5200 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5201 MF->insert(It, loopMBB); 5202 MF->insert(It, exitMBB); 5203 5204 // Transfer the remainder of BB and its successor edges to exitMBB. 5205 exitMBB->splice(exitMBB->begin(), BB, 5206 llvm::next(MachineBasicBlock::iterator(MI)), 5207 BB->end()); 5208 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5209 5210 TargetRegisterClass *TRC = 5211 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5212 unsigned scratch = MRI.createVirtualRegister(TRC); 5213 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5214 5215 // thisMBB: 5216 // ... 5217 // fallthrough --> loopMBB 5218 BB->addSuccessor(loopMBB); 5219 5220 // loopMBB: 5221 // ldrex dest, ptr 5222 // <binop> scratch2, dest, incr 5223 // strex scratch, scratch2, ptr 5224 // cmp scratch, #0 5225 // bne- loopMBB 5226 // fallthrough --> exitMBB 5227 BB = loopMBB; 5228 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5229 if (ldrOpc == ARM::t2LDREX) 5230 MIB.addImm(0); 5231 AddDefaultPred(MIB); 5232 if (BinOpcode) { 5233 // operand order needs to go the other way for NAND 5234 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 5235 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5236 addReg(incr).addReg(dest)).addReg(0); 5237 else 5238 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 5239 addReg(dest).addReg(incr)).addReg(0); 5240 } 5241 5242 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5243 if (strOpc == ARM::t2STREX) 5244 MIB.addImm(0); 5245 AddDefaultPred(MIB); 5246 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5247 .addReg(scratch).addImm(0)); 5248 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5249 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5250 5251 BB->addSuccessor(loopMBB); 5252 BB->addSuccessor(exitMBB); 5253 5254 // exitMBB: 5255 // ... 5256 BB = exitMBB; 5257 5258 MI->eraseFromParent(); // The instruction is gone now. 5259 5260 return BB; 5261} 5262 5263MachineBasicBlock * 5264ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 5265 MachineBasicBlock *BB, 5266 unsigned Size, 5267 bool signExtend, 5268 ARMCC::CondCodes Cond) const { 5269 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5270 5271 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5272 MachineFunction *MF = BB->getParent(); 5273 MachineFunction::iterator It = BB; 5274 ++It; 5275 5276 unsigned dest = MI->getOperand(0).getReg(); 5277 unsigned ptr = MI->getOperand(1).getReg(); 5278 unsigned incr = MI->getOperand(2).getReg(); 5279 unsigned oldval = dest; 5280 DebugLoc dl = MI->getDebugLoc(); 5281 bool isThumb2 = Subtarget->isThumb2(); 5282 5283 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5284 if (isThumb2) { 5285 MRI.constrainRegClass(dest, ARM::rGPRRegisterClass); 5286 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5287 } 5288 5289 unsigned ldrOpc, strOpc, extendOpc; 5290 switch (Size) { 5291 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5292 case 1: 5293 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5294 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5295 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 5296 break; 5297 case 2: 5298 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5299 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5300 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 5301 break; 5302 case 4: 5303 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5304 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5305 extendOpc = 0; 5306 break; 5307 } 5308 5309 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5310 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5311 MF->insert(It, loopMBB); 5312 MF->insert(It, exitMBB); 5313 5314 // Transfer the remainder of BB and its successor edges to exitMBB. 5315 exitMBB->splice(exitMBB->begin(), BB, 5316 llvm::next(MachineBasicBlock::iterator(MI)), 5317 BB->end()); 5318 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5319 5320 TargetRegisterClass *TRC = 5321 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5322 unsigned scratch = MRI.createVirtualRegister(TRC); 5323 unsigned scratch2 = MRI.createVirtualRegister(TRC); 5324 5325 // thisMBB: 5326 // ... 5327 // fallthrough --> loopMBB 5328 BB->addSuccessor(loopMBB); 5329 5330 // loopMBB: 5331 // ldrex dest, ptr 5332 // (sign extend dest, if required) 5333 // cmp dest, incr 5334 // cmov.cond scratch2, dest, incr 5335 // strex scratch, scratch2, ptr 5336 // cmp scratch, #0 5337 // bne- loopMBB 5338 // fallthrough --> exitMBB 5339 BB = loopMBB; 5340 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5341 if (ldrOpc == ARM::t2LDREX) 5342 MIB.addImm(0); 5343 AddDefaultPred(MIB); 5344 5345 // Sign extend the value, if necessary. 5346 if (signExtend && extendOpc) { 5347 oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass); 5348 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 5349 .addReg(dest) 5350 .addImm(0)); 5351 } 5352 5353 // Build compare and cmov instructions. 5354 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5355 .addReg(oldval).addReg(incr)); 5356 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 5357 .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); 5358 5359 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 5360 if (strOpc == ARM::t2STREX) 5361 MIB.addImm(0); 5362 AddDefaultPred(MIB); 5363 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5364 .addReg(scratch).addImm(0)); 5365 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5366 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5367 5368 BB->addSuccessor(loopMBB); 5369 BB->addSuccessor(exitMBB); 5370 5371 // exitMBB: 5372 // ... 5373 BB = exitMBB; 5374 5375 MI->eraseFromParent(); // The instruction is gone now. 5376 5377 return BB; 5378} 5379 5380MachineBasicBlock * 5381ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 5382 unsigned Op1, unsigned Op2, 5383 bool NeedsCarry, bool IsCmpxchg) const { 5384 // This also handles ATOMIC_SWAP, indicated by Op1==0. 5385 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5386 5387 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5388 MachineFunction *MF = BB->getParent(); 5389 MachineFunction::iterator It = BB; 5390 ++It; 5391 5392 unsigned destlo = MI->getOperand(0).getReg(); 5393 unsigned desthi = MI->getOperand(1).getReg(); 5394 unsigned ptr = MI->getOperand(2).getReg(); 5395 unsigned vallo = MI->getOperand(3).getReg(); 5396 unsigned valhi = MI->getOperand(4).getReg(); 5397 DebugLoc dl = MI->getDebugLoc(); 5398 bool isThumb2 = Subtarget->isThumb2(); 5399 5400 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5401 if (isThumb2) { 5402 MRI.constrainRegClass(destlo, ARM::rGPRRegisterClass); 5403 MRI.constrainRegClass(desthi, ARM::rGPRRegisterClass); 5404 MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass); 5405 } 5406 5407 unsigned ldrOpc = isThumb2 ? ARM::t2LDREXD : ARM::LDREXD; 5408 unsigned strOpc = isThumb2 ? ARM::t2STREXD : ARM::STREXD; 5409 5410 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5411 MachineBasicBlock *contBB = 0, *cont2BB = 0; 5412 if (IsCmpxchg) { 5413 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 5414 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 5415 } 5416 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5417 MF->insert(It, loopMBB); 5418 if (IsCmpxchg) { 5419 MF->insert(It, contBB); 5420 MF->insert(It, cont2BB); 5421 } 5422 MF->insert(It, exitMBB); 5423 5424 // Transfer the remainder of BB and its successor edges to exitMBB. 5425 exitMBB->splice(exitMBB->begin(), BB, 5426 llvm::next(MachineBasicBlock::iterator(MI)), 5427 BB->end()); 5428 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5429 5430 TargetRegisterClass *TRC = 5431 isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5432 unsigned storesuccess = MRI.createVirtualRegister(TRC); 5433 5434 // thisMBB: 5435 // ... 5436 // fallthrough --> loopMBB 5437 BB->addSuccessor(loopMBB); 5438 5439 // loopMBB: 5440 // ldrexd r2, r3, ptr 5441 // <binopa> r0, r2, incr 5442 // <binopb> r1, r3, incr 5443 // strexd storesuccess, r0, r1, ptr 5444 // cmp storesuccess, #0 5445 // bne- loopMBB 5446 // fallthrough --> exitMBB 5447 // 5448 // Note that the registers are explicitly specified because there is not any 5449 // way to force the register allocator to allocate a register pair. 5450 // 5451 // FIXME: The hardcoded registers are not necessary for Thumb2, but we 5452 // need to properly enforce the restriction that the two output registers 5453 // for ldrexd must be different. 5454 BB = loopMBB; 5455 // Load 5456 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 5457 .addReg(ARM::R2, RegState::Define) 5458 .addReg(ARM::R3, RegState::Define).addReg(ptr)); 5459 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 5460 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo).addReg(ARM::R2); 5461 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi).addReg(ARM::R3); 5462 5463 if (IsCmpxchg) { 5464 // Add early exit 5465 for (unsigned i = 0; i < 2; i++) { 5466 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 5467 ARM::CMPrr)) 5468 .addReg(i == 0 ? destlo : desthi) 5469 .addReg(i == 0 ? vallo : valhi)); 5470 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5471 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5472 BB->addSuccessor(exitMBB); 5473 BB->addSuccessor(i == 0 ? contBB : cont2BB); 5474 BB = (i == 0 ? contBB : cont2BB); 5475 } 5476 5477 // Copy to physregs for strexd 5478 unsigned setlo = MI->getOperand(5).getReg(); 5479 unsigned sethi = MI->getOperand(6).getReg(); 5480 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(setlo); 5481 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(sethi); 5482 } else if (Op1) { 5483 // Perform binary operation 5484 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), ARM::R0) 5485 .addReg(destlo).addReg(vallo)) 5486 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 5487 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), ARM::R1) 5488 .addReg(desthi).addReg(valhi)).addReg(0); 5489 } else { 5490 // Copy to physregs for strexd 5491 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R0).addReg(vallo); 5492 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), ARM::R1).addReg(valhi); 5493 } 5494 5495 // Store 5496 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 5497 .addReg(ARM::R0).addReg(ARM::R1).addReg(ptr)); 5498 // Cmp+jump 5499 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5500 .addReg(storesuccess).addImm(0)); 5501 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5502 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5503 5504 BB->addSuccessor(loopMBB); 5505 BB->addSuccessor(exitMBB); 5506 5507 // exitMBB: 5508 // ... 5509 BB = exitMBB; 5510 5511 MI->eraseFromParent(); // The instruction is gone now. 5512 5513 return BB; 5514} 5515 5516/// EmitBasePointerRecalculation - For functions using a base pointer, we 5517/// rematerialize it (via the frame pointer). 5518void ARMTargetLowering:: 5519EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB, 5520 MachineBasicBlock *DispatchBB) const { 5521 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5522 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 5523 MachineFunction &MF = *MI->getParent()->getParent(); 5524 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 5525 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 5526 5527 if (!RI.hasBasePointer(MF)) return; 5528 5529 MachineBasicBlock::iterator MBBI = MI; 5530 5531 int32_t NumBytes = AFI->getFramePtrSpillOffset(); 5532 unsigned FramePtr = RI.getFrameRegister(MF); 5533 assert(MF.getTarget().getFrameLowering()->hasFP(MF) && 5534 "Base pointer without frame pointer?"); 5535 5536 if (AFI->isThumb2Function()) 5537 llvm::emitT2RegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5538 FramePtr, -NumBytes, ARMCC::AL, 0, *AII); 5539 else if (AFI->isThumbFunction()) 5540 llvm::emitThumbRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5541 FramePtr, -NumBytes, *AII, RI); 5542 else 5543 llvm::emitARMRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6, 5544 FramePtr, -NumBytes, ARMCC::AL, 0, *AII); 5545 5546 if (!RI.needsStackRealignment(MF)) return; 5547 5548 // If there's dynamic realignment, adjust for it. 5549 MachineFrameInfo *MFI = MF.getFrameInfo(); 5550 unsigned MaxAlign = MFI->getMaxAlignment(); 5551 assert(!AFI->isThumb1OnlyFunction()); 5552 5553 // Emit bic r6, r6, MaxAlign 5554 unsigned bicOpc = AFI->isThumbFunction() ? ARM::t2BICri : ARM::BICri; 5555 AddDefaultCC( 5556 AddDefaultPred( 5557 BuildMI(*MBB, MBBI, MI->getDebugLoc(), TII->get(bicOpc), ARM::R6) 5558 .addReg(ARM::R6, RegState::Kill) 5559 .addImm(MaxAlign - 1))); 5560} 5561 5562/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 5563/// registers the function context. 5564void ARMTargetLowering:: 5565SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 5566 MachineBasicBlock *DispatchBB, int FI) const { 5567 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5568 DebugLoc dl = MI->getDebugLoc(); 5569 MachineFunction *MF = MBB->getParent(); 5570 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5571 MachineConstantPool *MCP = MF->getConstantPool(); 5572 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5573 const Function *F = MF->getFunction(); 5574 5575 bool isThumb = Subtarget->isThumb(); 5576 bool isThumb2 = Subtarget->isThumb2(); 5577 5578 unsigned PCLabelId = AFI->createPICLabelUId(); 5579 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 5580 ARMConstantPoolValue *CPV = 5581 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 5582 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 5583 5584 const TargetRegisterClass *TRC = 5585 isThumb ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5586 5587 // Grab constant pool and fixed stack memory operands. 5588 MachineMemOperand *CPMMO = 5589 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 5590 MachineMemOperand::MOLoad, 4, 4); 5591 5592 MachineMemOperand *FIMMOSt = 5593 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5594 MachineMemOperand::MOStore, 4, 4); 5595 5596 EmitBasePointerRecalculation(MI, MBB, DispatchBB); 5597 5598 // Load the address of the dispatch MBB into the jump buffer. 5599 if (isThumb2) { 5600 // Incoming value: jbuf 5601 // ldr.n r5, LCPI1_1 5602 // orr r5, r5, #1 5603 // add r5, pc 5604 // str r5, [$jbuf, #+4] ; &jbuf[1] 5605 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5606 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 5607 .addConstantPoolIndex(CPI) 5608 .addMemOperand(CPMMO)); 5609 // Set the low bit because of thumb mode. 5610 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5611 AddDefaultCC( 5612 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 5613 .addReg(NewVReg1, RegState::Kill) 5614 .addImm(0x01))); 5615 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5616 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 5617 .addReg(NewVReg2, RegState::Kill) 5618 .addImm(PCLabelId); 5619 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 5620 .addReg(NewVReg3, RegState::Kill) 5621 .addFrameIndex(FI) 5622 .addImm(36) // &jbuf[1] :: pc 5623 .addMemOperand(FIMMOSt)); 5624 } else if (isThumb) { 5625 // Incoming value: jbuf 5626 // ldr.n r1, LCPI1_4 5627 // add r1, pc 5628 // mov r2, #1 5629 // orrs r1, r2 5630 // add r2, $jbuf, #+4 ; &jbuf[1] 5631 // str r1, [r2] 5632 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5633 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 5634 .addConstantPoolIndex(CPI) 5635 .addMemOperand(CPMMO)); 5636 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5637 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 5638 .addReg(NewVReg1, RegState::Kill) 5639 .addImm(PCLabelId); 5640 // Set the low bit because of thumb mode. 5641 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5642 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 5643 .addReg(ARM::CPSR, RegState::Define) 5644 .addImm(1)); 5645 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5646 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 5647 .addReg(ARM::CPSR, RegState::Define) 5648 .addReg(NewVReg2, RegState::Kill) 5649 .addReg(NewVReg3, RegState::Kill)); 5650 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5651 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 5652 .addFrameIndex(FI) 5653 .addImm(36)); // &jbuf[1] :: pc 5654 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 5655 .addReg(NewVReg4, RegState::Kill) 5656 .addReg(NewVReg5, RegState::Kill) 5657 .addImm(0) 5658 .addMemOperand(FIMMOSt)); 5659 } else { 5660 // Incoming value: jbuf 5661 // ldr r1, LCPI1_1 5662 // add r1, pc, r1 5663 // str r1, [$jbuf, #+4] ; &jbuf[1] 5664 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5665 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 5666 .addConstantPoolIndex(CPI) 5667 .addImm(0) 5668 .addMemOperand(CPMMO)); 5669 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5670 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 5671 .addReg(NewVReg1, RegState::Kill) 5672 .addImm(PCLabelId)); 5673 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 5674 .addReg(NewVReg2, RegState::Kill) 5675 .addFrameIndex(FI) 5676 .addImm(36) // &jbuf[1] :: pc 5677 .addMemOperand(FIMMOSt)); 5678 } 5679} 5680 5681MachineBasicBlock *ARMTargetLowering:: 5682EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 5683 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5684 DebugLoc dl = MI->getDebugLoc(); 5685 MachineFunction *MF = MBB->getParent(); 5686 MachineRegisterInfo *MRI = &MF->getRegInfo(); 5687 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 5688 MachineFrameInfo *MFI = MF->getFrameInfo(); 5689 int FI = MFI->getFunctionContextIndex(); 5690 5691 const TargetRegisterClass *TRC = 5692 Subtarget->isThumb() ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; 5693 5694 // Get a mapping of the call site numbers to all of the landing pads they're 5695 // associated with. 5696 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 5697 unsigned MaxCSNum = 0; 5698 MachineModuleInfo &MMI = MF->getMMI(); 5699 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; ++BB) { 5700 if (!BB->isLandingPad()) continue; 5701 5702 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 5703 // pad. 5704 for (MachineBasicBlock::iterator 5705 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 5706 if (!II->isEHLabel()) continue; 5707 5708 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 5709 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 5710 5711 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 5712 for (SmallVectorImpl<unsigned>::iterator 5713 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 5714 CSI != CSE; ++CSI) { 5715 CallSiteNumToLPad[*CSI].push_back(BB); 5716 MaxCSNum = std::max(MaxCSNum, *CSI); 5717 } 5718 break; 5719 } 5720 } 5721 5722 // Get an ordered list of the machine basic blocks for the jump table. 5723 std::vector<MachineBasicBlock*> LPadList; 5724 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 5725 LPadList.reserve(CallSiteNumToLPad.size()); 5726 for (unsigned I = 1; I <= MaxCSNum; ++I) { 5727 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 5728 for (SmallVectorImpl<MachineBasicBlock*>::iterator 5729 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 5730 LPadList.push_back(*II); 5731 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 5732 } 5733 } 5734 5735 assert(!LPadList.empty() && 5736 "No landing pad destinations for the dispatch jump table!"); 5737 5738 // Create the jump table and associated information. 5739 MachineJumpTableInfo *JTI = 5740 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 5741 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 5742 unsigned UId = AFI->createJumpTableUId(); 5743 5744 // Create the MBBs for the dispatch code. 5745 5746 // Shove the dispatch's address into the return slot in the function context. 5747 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 5748 DispatchBB->setIsLandingPad(); 5749 5750 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 5751 BuildMI(TrapBB, dl, TII->get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP)); 5752 DispatchBB->addSuccessor(TrapBB); 5753 5754 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 5755 DispatchBB->addSuccessor(DispContBB); 5756 5757 // Insert and MBBs. 5758 MF->insert(MF->end(), DispatchBB); 5759 MF->insert(MF->end(), DispContBB); 5760 MF->insert(MF->end(), TrapBB); 5761 5762 // Insert code into the entry block that creates and registers the function 5763 // context. 5764 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 5765 5766 MachineMemOperand *FIMMOLd = 5767 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 5768 MachineMemOperand::MOLoad | 5769 MachineMemOperand::MOVolatile, 4, 4); 5770 5771 unsigned NumLPads = LPadList.size(); 5772 if (Subtarget->isThumb2()) { 5773 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5774 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 5775 .addFrameIndex(FI) 5776 .addImm(4) 5777 .addMemOperand(FIMMOLd)); 5778 5779 if (NumLPads < 256) { 5780 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 5781 .addReg(NewVReg1) 5782 .addImm(LPadList.size())); 5783 } else { 5784 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5785 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 5786 .addImm(NumLPads & 0xFFFF)); 5787 5788 unsigned VReg2 = VReg1; 5789 if ((NumLPads & 0xFFFF0000) != 0) { 5790 VReg2 = MRI->createVirtualRegister(TRC); 5791 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 5792 .addReg(VReg1) 5793 .addImm(NumLPads >> 16)); 5794 } 5795 5796 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 5797 .addReg(NewVReg1) 5798 .addReg(VReg2)); 5799 } 5800 5801 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 5802 .addMBB(TrapBB) 5803 .addImm(ARMCC::HI) 5804 .addReg(ARM::CPSR); 5805 5806 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5807 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 5808 .addJumpTableIndex(MJTI) 5809 .addImm(UId)); 5810 5811 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5812 AddDefaultCC( 5813 AddDefaultPred( 5814 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 5815 .addReg(NewVReg3, RegState::Kill) 5816 .addReg(NewVReg1) 5817 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 5818 5819 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 5820 .addReg(NewVReg4, RegState::Kill) 5821 .addReg(NewVReg1) 5822 .addJumpTableIndex(MJTI) 5823 .addImm(UId); 5824 } else if (Subtarget->isThumb()) { 5825 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5826 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 5827 .addFrameIndex(FI) 5828 .addImm(1) 5829 .addMemOperand(FIMMOLd)); 5830 5831 if (NumLPads < 256) { 5832 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 5833 .addReg(NewVReg1) 5834 .addImm(NumLPads)); 5835 } else { 5836 MachineConstantPool *ConstantPool = MF->getConstantPool(); 5837 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 5838 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 5839 5840 // MachineConstantPool wants an explicit alignment. 5841 unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); 5842 if (Align == 0) 5843 Align = getTargetData()->getTypeAllocSize(C->getType()); 5844 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 5845 5846 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5847 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 5848 .addReg(VReg1, RegState::Define) 5849 .addConstantPoolIndex(Idx)); 5850 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 5851 .addReg(NewVReg1) 5852 .addReg(VReg1)); 5853 } 5854 5855 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 5856 .addMBB(TrapBB) 5857 .addImm(ARMCC::HI) 5858 .addReg(ARM::CPSR); 5859 5860 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 5861 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 5862 .addReg(ARM::CPSR, RegState::Define) 5863 .addReg(NewVReg1) 5864 .addImm(2)); 5865 5866 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5867 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 5868 .addJumpTableIndex(MJTI) 5869 .addImm(UId)); 5870 5871 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5872 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 5873 .addReg(ARM::CPSR, RegState::Define) 5874 .addReg(NewVReg2, RegState::Kill) 5875 .addReg(NewVReg3)); 5876 5877 MachineMemOperand *JTMMOLd = 5878 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 5879 MachineMemOperand::MOLoad, 4, 4); 5880 5881 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5882 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 5883 .addReg(NewVReg4, RegState::Kill) 5884 .addImm(0) 5885 .addMemOperand(JTMMOLd)); 5886 5887 unsigned NewVReg6 = MRI->createVirtualRegister(TRC); 5888 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 5889 .addReg(ARM::CPSR, RegState::Define) 5890 .addReg(NewVReg5, RegState::Kill) 5891 .addReg(NewVReg3)); 5892 5893 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 5894 .addReg(NewVReg6, RegState::Kill) 5895 .addJumpTableIndex(MJTI) 5896 .addImm(UId); 5897 } else { 5898 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 5899 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 5900 .addFrameIndex(FI) 5901 .addImm(4) 5902 .addMemOperand(FIMMOLd)); 5903 5904 if (NumLPads < 256) { 5905 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 5906 .addReg(NewVReg1) 5907 .addImm(NumLPads)); 5908 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 5909 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5910 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 5911 .addImm(NumLPads & 0xFFFF)); 5912 5913 unsigned VReg2 = VReg1; 5914 if ((NumLPads & 0xFFFF0000) != 0) { 5915 VReg2 = MRI->createVirtualRegister(TRC); 5916 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 5917 .addReg(VReg1) 5918 .addImm(NumLPads >> 16)); 5919 } 5920 5921 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 5922 .addReg(NewVReg1) 5923 .addReg(VReg2)); 5924 } else { 5925 MachineConstantPool *ConstantPool = MF->getConstantPool(); 5926 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 5927 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 5928 5929 // MachineConstantPool wants an explicit alignment. 5930 unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); 5931 if (Align == 0) 5932 Align = getTargetData()->getTypeAllocSize(C->getType()); 5933 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 5934 5935 unsigned VReg1 = MRI->createVirtualRegister(TRC); 5936 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 5937 .addReg(VReg1, RegState::Define) 5938 .addConstantPoolIndex(Idx) 5939 .addImm(0)); 5940 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 5941 .addReg(NewVReg1) 5942 .addReg(VReg1, RegState::Kill)); 5943 } 5944 5945 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 5946 .addMBB(TrapBB) 5947 .addImm(ARMCC::HI) 5948 .addReg(ARM::CPSR); 5949 5950 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 5951 AddDefaultCC( 5952 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 5953 .addReg(NewVReg1) 5954 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 5955 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 5956 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 5957 .addJumpTableIndex(MJTI) 5958 .addImm(UId)); 5959 5960 MachineMemOperand *JTMMOLd = 5961 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 5962 MachineMemOperand::MOLoad, 4, 4); 5963 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 5964 AddDefaultPred( 5965 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 5966 .addReg(NewVReg3, RegState::Kill) 5967 .addReg(NewVReg4) 5968 .addImm(0) 5969 .addMemOperand(JTMMOLd)); 5970 5971 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 5972 .addReg(NewVReg5, RegState::Kill) 5973 .addReg(NewVReg4) 5974 .addJumpTableIndex(MJTI) 5975 .addImm(UId); 5976 } 5977 5978 // Add the jump table entries as successors to the MBB. 5979 MachineBasicBlock *PrevMBB = 0; 5980 for (std::vector<MachineBasicBlock*>::iterator 5981 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 5982 MachineBasicBlock *CurMBB = *I; 5983 if (PrevMBB != CurMBB) 5984 DispContBB->addSuccessor(CurMBB); 5985 PrevMBB = CurMBB; 5986 } 5987 5988 // N.B. the order the invoke BBs are processed in doesn't matter here. 5989 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 5990 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 5991 const unsigned *SavedRegs = RI.getCalleeSavedRegs(MF); 5992 SmallVector<MachineBasicBlock*, 64> MBBLPads; 5993 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 5994 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 5995 MachineBasicBlock *BB = *I; 5996 5997 // Remove the landing pad successor from the invoke block and replace it 5998 // with the new dispatch block. 5999 for (MachineBasicBlock::succ_iterator 6000 SI = BB->succ_begin(), SE = BB->succ_end(); SI != SE; ++SI) { 6001 MachineBasicBlock *SMBB = *SI; 6002 if (SMBB->isLandingPad()) { 6003 BB->removeSuccessor(SMBB); 6004 MBBLPads.push_back(SMBB); 6005 } 6006 } 6007 6008 BB->addSuccessor(DispatchBB); 6009 6010 // Find the invoke call and mark all of the callee-saved registers as 6011 // 'implicit defined' so that they're spilled. This prevents code from 6012 // moving instructions to before the EH block, where they will never be 6013 // executed. 6014 for (MachineBasicBlock::reverse_iterator 6015 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 6016 if (!II->getDesc().isCall()) continue; 6017 6018 DenseMap<unsigned, bool> DefRegs; 6019 for (MachineInstr::mop_iterator 6020 OI = II->operands_begin(), OE = II->operands_end(); 6021 OI != OE; ++OI) { 6022 if (!OI->isReg()) continue; 6023 DefRegs[OI->getReg()] = true; 6024 } 6025 6026 MachineInstrBuilder MIB(&*II); 6027 6028 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 6029 unsigned Reg = SavedRegs[i]; 6030 if (Subtarget->isThumb2() && 6031 !ARM::tGPRRegisterClass->contains(Reg) && 6032 !ARM::hGPRRegisterClass->contains(Reg)) 6033 continue; 6034 else if (Subtarget->isThumb1Only() && 6035 !ARM::tGPRRegisterClass->contains(Reg)) 6036 continue; 6037 else if (!Subtarget->isThumb() && 6038 !ARM::GPRRegisterClass->contains(Reg)) 6039 continue; 6040 if (!DefRegs[Reg]) 6041 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 6042 } 6043 6044 break; 6045 } 6046 } 6047 6048 // Mark all former landing pads as non-landing pads. The dispatch is the only 6049 // landing pad now. 6050 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6051 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 6052 (*I)->setIsLandingPad(false); 6053 6054 // The instruction is gone now. 6055 MI->eraseFromParent(); 6056 6057 return MBB; 6058} 6059 6060static 6061MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 6062 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 6063 E = MBB->succ_end(); I != E; ++I) 6064 if (*I != Succ) 6065 return *I; 6066 llvm_unreachable("Expecting a BB with two successors!"); 6067} 6068 6069MachineBasicBlock * 6070ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6071 MachineBasicBlock *BB) const { 6072 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6073 DebugLoc dl = MI->getDebugLoc(); 6074 bool isThumb2 = Subtarget->isThumb2(); 6075 switch (MI->getOpcode()) { 6076 default: { 6077 MI->dump(); 6078 llvm_unreachable("Unexpected instr type to insert"); 6079 } 6080 // The Thumb2 pre-indexed stores have the same MI operands, they just 6081 // define them differently in the .td files from the isel patterns, so 6082 // they need pseudos. 6083 case ARM::t2STR_preidx: 6084 MI->setDesc(TII->get(ARM::t2STR_PRE)); 6085 return BB; 6086 case ARM::t2STRB_preidx: 6087 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 6088 return BB; 6089 case ARM::t2STRH_preidx: 6090 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 6091 return BB; 6092 6093 case ARM::STRi_preidx: 6094 case ARM::STRBi_preidx: { 6095 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 6096 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 6097 // Decode the offset. 6098 unsigned Offset = MI->getOperand(4).getImm(); 6099 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 6100 Offset = ARM_AM::getAM2Offset(Offset); 6101 if (isSub) 6102 Offset = -Offset; 6103 6104 MachineMemOperand *MMO = *MI->memoperands_begin(); 6105 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 6106 .addOperand(MI->getOperand(0)) // Rn_wb 6107 .addOperand(MI->getOperand(1)) // Rt 6108 .addOperand(MI->getOperand(2)) // Rn 6109 .addImm(Offset) // offset (skip GPR==zero_reg) 6110 .addOperand(MI->getOperand(5)) // pred 6111 .addOperand(MI->getOperand(6)) 6112 .addMemOperand(MMO); 6113 MI->eraseFromParent(); 6114 return BB; 6115 } 6116 case ARM::STRr_preidx: 6117 case ARM::STRBr_preidx: 6118 case ARM::STRH_preidx: { 6119 unsigned NewOpc; 6120 switch (MI->getOpcode()) { 6121 default: llvm_unreachable("unexpected opcode!"); 6122 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 6123 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 6124 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 6125 } 6126 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 6127 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 6128 MIB.addOperand(MI->getOperand(i)); 6129 MI->eraseFromParent(); 6130 return BB; 6131 } 6132 case ARM::ATOMIC_LOAD_ADD_I8: 6133 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6134 case ARM::ATOMIC_LOAD_ADD_I16: 6135 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6136 case ARM::ATOMIC_LOAD_ADD_I32: 6137 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 6138 6139 case ARM::ATOMIC_LOAD_AND_I8: 6140 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6141 case ARM::ATOMIC_LOAD_AND_I16: 6142 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6143 case ARM::ATOMIC_LOAD_AND_I32: 6144 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6145 6146 case ARM::ATOMIC_LOAD_OR_I8: 6147 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6148 case ARM::ATOMIC_LOAD_OR_I16: 6149 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6150 case ARM::ATOMIC_LOAD_OR_I32: 6151 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6152 6153 case ARM::ATOMIC_LOAD_XOR_I8: 6154 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6155 case ARM::ATOMIC_LOAD_XOR_I16: 6156 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6157 case ARM::ATOMIC_LOAD_XOR_I32: 6158 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6159 6160 case ARM::ATOMIC_LOAD_NAND_I8: 6161 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6162 case ARM::ATOMIC_LOAD_NAND_I16: 6163 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6164 case ARM::ATOMIC_LOAD_NAND_I32: 6165 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 6166 6167 case ARM::ATOMIC_LOAD_SUB_I8: 6168 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6169 case ARM::ATOMIC_LOAD_SUB_I16: 6170 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6171 case ARM::ATOMIC_LOAD_SUB_I32: 6172 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 6173 6174 case ARM::ATOMIC_LOAD_MIN_I8: 6175 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 6176 case ARM::ATOMIC_LOAD_MIN_I16: 6177 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 6178 case ARM::ATOMIC_LOAD_MIN_I32: 6179 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 6180 6181 case ARM::ATOMIC_LOAD_MAX_I8: 6182 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 6183 case ARM::ATOMIC_LOAD_MAX_I16: 6184 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 6185 case ARM::ATOMIC_LOAD_MAX_I32: 6186 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 6187 6188 case ARM::ATOMIC_LOAD_UMIN_I8: 6189 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 6190 case ARM::ATOMIC_LOAD_UMIN_I16: 6191 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 6192 case ARM::ATOMIC_LOAD_UMIN_I32: 6193 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 6194 6195 case ARM::ATOMIC_LOAD_UMAX_I8: 6196 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 6197 case ARM::ATOMIC_LOAD_UMAX_I16: 6198 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 6199 case ARM::ATOMIC_LOAD_UMAX_I32: 6200 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 6201 6202 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 6203 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 6204 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 6205 6206 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 6207 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 6208 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 6209 6210 6211 case ARM::ATOMADD6432: 6212 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 6213 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 6214 /*NeedsCarry*/ true); 6215 case ARM::ATOMSUB6432: 6216 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6217 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6218 /*NeedsCarry*/ true); 6219 case ARM::ATOMOR6432: 6220 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 6221 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 6222 case ARM::ATOMXOR6432: 6223 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 6224 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 6225 case ARM::ATOMAND6432: 6226 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 6227 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 6228 case ARM::ATOMSWAP6432: 6229 return EmitAtomicBinary64(MI, BB, 0, 0, false); 6230 case ARM::ATOMCMPXCHG6432: 6231 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 6232 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 6233 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 6234 6235 case ARM::tMOVCCr_pseudo: { 6236 // To "insert" a SELECT_CC instruction, we actually have to insert the 6237 // diamond control-flow pattern. The incoming instruction knows the 6238 // destination vreg to set, the condition code register to branch on, the 6239 // true/false values to select between, and a branch opcode to use. 6240 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6241 MachineFunction::iterator It = BB; 6242 ++It; 6243 6244 // thisMBB: 6245 // ... 6246 // TrueVal = ... 6247 // cmpTY ccX, r1, r2 6248 // bCC copy1MBB 6249 // fallthrough --> copy0MBB 6250 MachineBasicBlock *thisMBB = BB; 6251 MachineFunction *F = BB->getParent(); 6252 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6253 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6254 F->insert(It, copy0MBB); 6255 F->insert(It, sinkMBB); 6256 6257 // Transfer the remainder of BB and its successor edges to sinkMBB. 6258 sinkMBB->splice(sinkMBB->begin(), BB, 6259 llvm::next(MachineBasicBlock::iterator(MI)), 6260 BB->end()); 6261 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6262 6263 BB->addSuccessor(copy0MBB); 6264 BB->addSuccessor(sinkMBB); 6265 6266 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 6267 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 6268 6269 // copy0MBB: 6270 // %FalseValue = ... 6271 // # fallthrough to sinkMBB 6272 BB = copy0MBB; 6273 6274 // Update machine-CFG edges 6275 BB->addSuccessor(sinkMBB); 6276 6277 // sinkMBB: 6278 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6279 // ... 6280 BB = sinkMBB; 6281 BuildMI(*BB, BB->begin(), dl, 6282 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 6283 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 6284 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6285 6286 MI->eraseFromParent(); // The pseudo instruction is gone now. 6287 return BB; 6288 } 6289 6290 case ARM::BCCi64: 6291 case ARM::BCCZi64: { 6292 // If there is an unconditional branch to the other successor, remove it. 6293 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 6294 6295 // Compare both parts that make up the double comparison separately for 6296 // equality. 6297 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 6298 6299 unsigned LHS1 = MI->getOperand(1).getReg(); 6300 unsigned LHS2 = MI->getOperand(2).getReg(); 6301 if (RHSisZero) { 6302 AddDefaultPred(BuildMI(BB, dl, 6303 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6304 .addReg(LHS1).addImm(0)); 6305 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6306 .addReg(LHS2).addImm(0) 6307 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6308 } else { 6309 unsigned RHS1 = MI->getOperand(3).getReg(); 6310 unsigned RHS2 = MI->getOperand(4).getReg(); 6311 AddDefaultPred(BuildMI(BB, dl, 6312 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6313 .addReg(LHS1).addReg(RHS1)); 6314 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6315 .addReg(LHS2).addReg(RHS2) 6316 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 6317 } 6318 6319 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 6320 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 6321 if (MI->getOperand(0).getImm() == ARMCC::NE) 6322 std::swap(destMBB, exitMBB); 6323 6324 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6325 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 6326 if (isThumb2) 6327 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 6328 else 6329 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 6330 6331 MI->eraseFromParent(); // The pseudo instruction is gone now. 6332 return BB; 6333 } 6334 6335 case ARM::Int_eh_sjlj_setjmp: 6336 case ARM::Int_eh_sjlj_setjmp_nofp: 6337 case ARM::tInt_eh_sjlj_setjmp: 6338 case ARM::t2Int_eh_sjlj_setjmp: 6339 case ARM::t2Int_eh_sjlj_setjmp_nofp: 6340 EmitSjLjDispatchBlock(MI, BB); 6341 return BB; 6342 6343 case ARM::ABS: 6344 case ARM::t2ABS: { 6345 // To insert an ABS instruction, we have to insert the 6346 // diamond control-flow pattern. The incoming instruction knows the 6347 // source vreg to test against 0, the destination vreg to set, 6348 // the condition code register to branch on, the 6349 // true/false values to select between, and a branch opcode to use. 6350 // It transforms 6351 // V1 = ABS V0 6352 // into 6353 // V2 = MOVS V0 6354 // BCC (branch to SinkBB if V0 >= 0) 6355 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 6356 // SinkBB: V1 = PHI(V2, V3) 6357 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6358 MachineFunction::iterator BBI = BB; 6359 ++BBI; 6360 MachineFunction *Fn = BB->getParent(); 6361 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6362 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 6363 Fn->insert(BBI, RSBBB); 6364 Fn->insert(BBI, SinkBB); 6365 6366 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 6367 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 6368 bool isThumb2 = Subtarget->isThumb2(); 6369 MachineRegisterInfo &MRI = Fn->getRegInfo(); 6370 // In Thumb mode S must not be specified if source register is the SP or 6371 // PC and if destination register is the SP, so restrict register class 6372 unsigned NewMovDstReg = MRI.createVirtualRegister( 6373 isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); 6374 unsigned NewRsbDstReg = MRI.createVirtualRegister( 6375 isThumb2 ? ARM::rGPRRegisterClass : ARM::GPRRegisterClass); 6376 6377 // Transfer the remainder of BB and its successor edges to sinkMBB. 6378 SinkBB->splice(SinkBB->begin(), BB, 6379 llvm::next(MachineBasicBlock::iterator(MI)), 6380 BB->end()); 6381 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 6382 6383 BB->addSuccessor(RSBBB); 6384 BB->addSuccessor(SinkBB); 6385 6386 // fall through to SinkMBB 6387 RSBBB->addSuccessor(SinkBB); 6388 6389 // insert a movs at the end of BB 6390 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVr : ARM::MOVr), 6391 NewMovDstReg) 6392 .addReg(ABSSrcReg, RegState::Kill) 6393 .addImm((unsigned)ARMCC::AL).addReg(0) 6394 .addReg(ARM::CPSR, RegState::Define); 6395 6396 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 6397 BuildMI(BB, dl, 6398 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 6399 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 6400 6401 // insert rsbri in RSBBB 6402 // Note: BCC and rsbri will be converted into predicated rsbmi 6403 // by if-conversion pass 6404 BuildMI(*RSBBB, RSBBB->begin(), dl, 6405 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 6406 .addReg(NewMovDstReg, RegState::Kill) 6407 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 6408 6409 // insert PHI in SinkBB, 6410 // reuse ABSDstReg to not change uses of ABS instruction 6411 BuildMI(*SinkBB, SinkBB->begin(), dl, 6412 TII->get(ARM::PHI), ABSDstReg) 6413 .addReg(NewRsbDstReg).addMBB(RSBBB) 6414 .addReg(NewMovDstReg).addMBB(BB); 6415 6416 // remove ABS instruction 6417 MI->eraseFromParent(); 6418 6419 // return last added BB 6420 return SinkBB; 6421 } 6422 } 6423} 6424 6425void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 6426 SDNode *Node) const { 6427 const MCInstrDesc *MCID = &MI->getDesc(); 6428 if (!MCID->hasPostISelHook()) { 6429 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 6430 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 6431 return; 6432 } 6433 6434 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 6435 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 6436 // operand is still set to noreg. If needed, set the optional operand's 6437 // register to CPSR, and remove the redundant implicit def. 6438 // 6439 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 6440 6441 // Rename pseudo opcodes. 6442 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 6443 if (NewOpc) { 6444 const ARMBaseInstrInfo *TII = 6445 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 6446 MCID = &TII->get(NewOpc); 6447 6448 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 6449 "converted opcode should be the same except for cc_out"); 6450 6451 MI->setDesc(*MCID); 6452 6453 // Add the optional cc_out operand 6454 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 6455 } 6456 unsigned ccOutIdx = MCID->getNumOperands() - 1; 6457 6458 // Any ARM instruction that sets the 's' bit should specify an optional 6459 // "cc_out" operand in the last operand position. 6460 if (!MCID->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 6461 assert(!NewOpc && "Optional cc_out operand required"); 6462 return; 6463 } 6464 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 6465 // since we already have an optional CPSR def. 6466 bool definesCPSR = false; 6467 bool deadCPSR = false; 6468 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 6469 i != e; ++i) { 6470 const MachineOperand &MO = MI->getOperand(i); 6471 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 6472 definesCPSR = true; 6473 if (MO.isDead()) 6474 deadCPSR = true; 6475 MI->RemoveOperand(i); 6476 break; 6477 } 6478 } 6479 if (!definesCPSR) { 6480 assert(!NewOpc && "Optional cc_out operand required"); 6481 return; 6482 } 6483 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 6484 if (deadCPSR) { 6485 assert(!MI->getOperand(ccOutIdx).getReg() && 6486 "expect uninitialized optional cc_out operand"); 6487 return; 6488 } 6489 6490 // If this instruction was defined with an optional CPSR def and its dag node 6491 // had a live implicit CPSR def, then activate the optional CPSR def. 6492 MachineOperand &MO = MI->getOperand(ccOutIdx); 6493 MO.setReg(ARM::CPSR); 6494 MO.setIsDef(true); 6495} 6496 6497//===----------------------------------------------------------------------===// 6498// ARM Optimization Hooks 6499//===----------------------------------------------------------------------===// 6500 6501static 6502SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 6503 TargetLowering::DAGCombinerInfo &DCI) { 6504 SelectionDAG &DAG = DCI.DAG; 6505 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6506 EVT VT = N->getValueType(0); 6507 unsigned Opc = N->getOpcode(); 6508 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 6509 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 6510 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 6511 ISD::CondCode CC = ISD::SETCC_INVALID; 6512 6513 if (isSlctCC) { 6514 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 6515 } else { 6516 SDValue CCOp = Slct.getOperand(0); 6517 if (CCOp.getOpcode() == ISD::SETCC) 6518 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 6519 } 6520 6521 bool DoXform = false; 6522 bool InvCC = false; 6523 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 6524 "Bad input!"); 6525 6526 if (LHS.getOpcode() == ISD::Constant && 6527 cast<ConstantSDNode>(LHS)->isNullValue()) { 6528 DoXform = true; 6529 } else if (CC != ISD::SETCC_INVALID && 6530 RHS.getOpcode() == ISD::Constant && 6531 cast<ConstantSDNode>(RHS)->isNullValue()) { 6532 std::swap(LHS, RHS); 6533 SDValue Op0 = Slct.getOperand(0); 6534 EVT OpVT = isSlctCC ? Op0.getValueType() : 6535 Op0.getOperand(0).getValueType(); 6536 bool isInt = OpVT.isInteger(); 6537 CC = ISD::getSetCCInverse(CC, isInt); 6538 6539 if (!TLI.isCondCodeLegal(CC, OpVT)) 6540 return SDValue(); // Inverse operator isn't legal. 6541 6542 DoXform = true; 6543 InvCC = true; 6544 } 6545 6546 if (DoXform) { 6547 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 6548 if (isSlctCC) 6549 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 6550 Slct.getOperand(0), Slct.getOperand(1), CC); 6551 SDValue CCOp = Slct.getOperand(0); 6552 if (InvCC) 6553 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 6554 CCOp.getOperand(0), CCOp.getOperand(1), CC); 6555 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 6556 CCOp, OtherOp, Result); 6557 } 6558 return SDValue(); 6559} 6560 6561// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 6562// (only after legalization). 6563static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 6564 TargetLowering::DAGCombinerInfo &DCI, 6565 const ARMSubtarget *Subtarget) { 6566 6567 // Only perform optimization if after legalize, and if NEON is available. We 6568 // also expected both operands to be BUILD_VECTORs. 6569 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 6570 || N0.getOpcode() != ISD::BUILD_VECTOR 6571 || N1.getOpcode() != ISD::BUILD_VECTOR) 6572 return SDValue(); 6573 6574 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 6575 EVT VT = N->getValueType(0); 6576 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 6577 return SDValue(); 6578 6579 // Check that the vector operands are of the right form. 6580 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 6581 // operands, where N is the size of the formed vector. 6582 // Each EXTRACT_VECTOR should have the same input vector and odd or even 6583 // index such that we have a pair wise add pattern. 6584 6585 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 6586 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6587 return SDValue(); 6588 SDValue Vec = N0->getOperand(0)->getOperand(0); 6589 SDNode *V = Vec.getNode(); 6590 unsigned nextIndex = 0; 6591 6592 // For each operands to the ADD which are BUILD_VECTORs, 6593 // check to see if each of their operands are an EXTRACT_VECTOR with 6594 // the same vector and appropriate index. 6595 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 6596 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 6597 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 6598 6599 SDValue ExtVec0 = N0->getOperand(i); 6600 SDValue ExtVec1 = N1->getOperand(i); 6601 6602 // First operand is the vector, verify its the same. 6603 if (V != ExtVec0->getOperand(0).getNode() || 6604 V != ExtVec1->getOperand(0).getNode()) 6605 return SDValue(); 6606 6607 // Second is the constant, verify its correct. 6608 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 6609 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 6610 6611 // For the constant, we want to see all the even or all the odd. 6612 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 6613 || C1->getZExtValue() != nextIndex+1) 6614 return SDValue(); 6615 6616 // Increment index. 6617 nextIndex+=2; 6618 } else 6619 return SDValue(); 6620 } 6621 6622 // Create VPADDL node. 6623 SelectionDAG &DAG = DCI.DAG; 6624 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6625 6626 // Build operand list. 6627 SmallVector<SDValue, 8> Ops; 6628 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 6629 TLI.getPointerTy())); 6630 6631 // Input is the vector. 6632 Ops.push_back(Vec); 6633 6634 // Get widened type and narrowed type. 6635 MVT widenType; 6636 unsigned numElem = VT.getVectorNumElements(); 6637 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 6638 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 6639 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 6640 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 6641 default: 6642 assert(0 && "Invalid vector element type for padd optimization."); 6643 } 6644 6645 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 6646 widenType, &Ops[0], Ops.size()); 6647 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 6648} 6649 6650/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 6651/// operands N0 and N1. This is a helper for PerformADDCombine that is 6652/// called with the default operands, and if that fails, with commuted 6653/// operands. 6654static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 6655 TargetLowering::DAGCombinerInfo &DCI, 6656 const ARMSubtarget *Subtarget){ 6657 6658 // Attempt to create vpaddl for this add. 6659 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 6660 if (Result.getNode()) 6661 return Result; 6662 6663 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 6664 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 6665 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 6666 if (Result.getNode()) return Result; 6667 } 6668 return SDValue(); 6669} 6670 6671/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 6672/// 6673static SDValue PerformADDCombine(SDNode *N, 6674 TargetLowering::DAGCombinerInfo &DCI, 6675 const ARMSubtarget *Subtarget) { 6676 SDValue N0 = N->getOperand(0); 6677 SDValue N1 = N->getOperand(1); 6678 6679 // First try with the default operand order. 6680 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 6681 if (Result.getNode()) 6682 return Result; 6683 6684 // If that didn't work, try again with the operands commuted. 6685 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 6686} 6687 6688/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 6689/// 6690static SDValue PerformSUBCombine(SDNode *N, 6691 TargetLowering::DAGCombinerInfo &DCI) { 6692 SDValue N0 = N->getOperand(0); 6693 SDValue N1 = N->getOperand(1); 6694 6695 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 6696 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 6697 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 6698 if (Result.getNode()) return Result; 6699 } 6700 6701 return SDValue(); 6702} 6703 6704/// PerformVMULCombine 6705/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 6706/// special multiplier accumulator forwarding. 6707/// vmul d3, d0, d2 6708/// vmla d3, d1, d2 6709/// is faster than 6710/// vadd d3, d0, d1 6711/// vmul d3, d3, d2 6712static SDValue PerformVMULCombine(SDNode *N, 6713 TargetLowering::DAGCombinerInfo &DCI, 6714 const ARMSubtarget *Subtarget) { 6715 if (!Subtarget->hasVMLxForwarding()) 6716 return SDValue(); 6717 6718 SelectionDAG &DAG = DCI.DAG; 6719 SDValue N0 = N->getOperand(0); 6720 SDValue N1 = N->getOperand(1); 6721 unsigned Opcode = N0.getOpcode(); 6722 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6723 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 6724 Opcode = N1.getOpcode(); 6725 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 6726 Opcode != ISD::FADD && Opcode != ISD::FSUB) 6727 return SDValue(); 6728 std::swap(N0, N1); 6729 } 6730 6731 EVT VT = N->getValueType(0); 6732 DebugLoc DL = N->getDebugLoc(); 6733 SDValue N00 = N0->getOperand(0); 6734 SDValue N01 = N0->getOperand(1); 6735 return DAG.getNode(Opcode, DL, VT, 6736 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 6737 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 6738} 6739 6740static SDValue PerformMULCombine(SDNode *N, 6741 TargetLowering::DAGCombinerInfo &DCI, 6742 const ARMSubtarget *Subtarget) { 6743 SelectionDAG &DAG = DCI.DAG; 6744 6745 if (Subtarget->isThumb1Only()) 6746 return SDValue(); 6747 6748 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 6749 return SDValue(); 6750 6751 EVT VT = N->getValueType(0); 6752 if (VT.is64BitVector() || VT.is128BitVector()) 6753 return PerformVMULCombine(N, DCI, Subtarget); 6754 if (VT != MVT::i32) 6755 return SDValue(); 6756 6757 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6758 if (!C) 6759 return SDValue(); 6760 6761 uint64_t MulAmt = C->getZExtValue(); 6762 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 6763 ShiftAmt = ShiftAmt & (32 - 1); 6764 SDValue V = N->getOperand(0); 6765 DebugLoc DL = N->getDebugLoc(); 6766 6767 SDValue Res; 6768 MulAmt >>= ShiftAmt; 6769 if (isPowerOf2_32(MulAmt - 1)) { 6770 // (mul x, 2^N + 1) => (add (shl x, N), x) 6771 Res = DAG.getNode(ISD::ADD, DL, VT, 6772 V, DAG.getNode(ISD::SHL, DL, VT, 6773 V, DAG.getConstant(Log2_32(MulAmt-1), 6774 MVT::i32))); 6775 } else if (isPowerOf2_32(MulAmt + 1)) { 6776 // (mul x, 2^N - 1) => (sub (shl x, N), x) 6777 Res = DAG.getNode(ISD::SUB, DL, VT, 6778 DAG.getNode(ISD::SHL, DL, VT, 6779 V, DAG.getConstant(Log2_32(MulAmt+1), 6780 MVT::i32)), 6781 V); 6782 } else 6783 return SDValue(); 6784 6785 if (ShiftAmt != 0) 6786 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 6787 DAG.getConstant(ShiftAmt, MVT::i32)); 6788 6789 // Do not add new nodes to DAG combiner worklist. 6790 DCI.CombineTo(N, Res, false); 6791 return SDValue(); 6792} 6793 6794static SDValue PerformANDCombine(SDNode *N, 6795 TargetLowering::DAGCombinerInfo &DCI) { 6796 6797 // Attempt to use immediate-form VBIC 6798 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6799 DebugLoc dl = N->getDebugLoc(); 6800 EVT VT = N->getValueType(0); 6801 SelectionDAG &DAG = DCI.DAG; 6802 6803 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6804 return SDValue(); 6805 6806 APInt SplatBits, SplatUndef; 6807 unsigned SplatBitSize; 6808 bool HasAnyUndefs; 6809 if (BVN && 6810 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6811 if (SplatBitSize <= 64) { 6812 EVT VbicVT; 6813 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 6814 SplatUndef.getZExtValue(), SplatBitSize, 6815 DAG, VbicVT, VT.is128BitVector(), 6816 OtherModImm); 6817 if (Val.getNode()) { 6818 SDValue Input = 6819 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 6820 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 6821 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 6822 } 6823 } 6824 } 6825 6826 return SDValue(); 6827} 6828 6829/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 6830static SDValue PerformORCombine(SDNode *N, 6831 TargetLowering::DAGCombinerInfo &DCI, 6832 const ARMSubtarget *Subtarget) { 6833 // Attempt to use immediate-form VORR 6834 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 6835 DebugLoc dl = N->getDebugLoc(); 6836 EVT VT = N->getValueType(0); 6837 SelectionDAG &DAG = DCI.DAG; 6838 6839 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 6840 return SDValue(); 6841 6842 APInt SplatBits, SplatUndef; 6843 unsigned SplatBitSize; 6844 bool HasAnyUndefs; 6845 if (BVN && Subtarget->hasNEON() && 6846 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6847 if (SplatBitSize <= 64) { 6848 EVT VorrVT; 6849 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 6850 SplatUndef.getZExtValue(), SplatBitSize, 6851 DAG, VorrVT, VT.is128BitVector(), 6852 OtherModImm); 6853 if (Val.getNode()) { 6854 SDValue Input = 6855 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 6856 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 6857 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 6858 } 6859 } 6860 } 6861 6862 SDValue N0 = N->getOperand(0); 6863 if (N0.getOpcode() != ISD::AND) 6864 return SDValue(); 6865 SDValue N1 = N->getOperand(1); 6866 6867 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 6868 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 6869 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 6870 APInt SplatUndef; 6871 unsigned SplatBitSize; 6872 bool HasAnyUndefs; 6873 6874 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 6875 APInt SplatBits0; 6876 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 6877 HasAnyUndefs) && !HasAnyUndefs) { 6878 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 6879 APInt SplatBits1; 6880 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 6881 HasAnyUndefs) && !HasAnyUndefs && 6882 SplatBits0 == ~SplatBits1) { 6883 // Canonicalize the vector type to make instruction selection simpler. 6884 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 6885 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 6886 N0->getOperand(1), N0->getOperand(0), 6887 N1->getOperand(0)); 6888 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 6889 } 6890 } 6891 } 6892 6893 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 6894 // reasonable. 6895 6896 // BFI is only available on V6T2+ 6897 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 6898 return SDValue(); 6899 6900 DebugLoc DL = N->getDebugLoc(); 6901 // 1) or (and A, mask), val => ARMbfi A, val, mask 6902 // iff (val & mask) == val 6903 // 6904 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6905 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 6906 // && mask == ~mask2 6907 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 6908 // && ~mask == mask2 6909 // (i.e., copy a bitfield value into another bitfield of the same width) 6910 6911 if (VT != MVT::i32) 6912 return SDValue(); 6913 6914 SDValue N00 = N0.getOperand(0); 6915 6916 // The value and the mask need to be constants so we can verify this is 6917 // actually a bitfield set. If the mask is 0xffff, we can do better 6918 // via a movt instruction, so don't use BFI in that case. 6919 SDValue MaskOp = N0.getOperand(1); 6920 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 6921 if (!MaskC) 6922 return SDValue(); 6923 unsigned Mask = MaskC->getZExtValue(); 6924 if (Mask == 0xffff) 6925 return SDValue(); 6926 SDValue Res; 6927 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 6928 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 6929 if (N1C) { 6930 unsigned Val = N1C->getZExtValue(); 6931 if ((Val & ~Mask) != Val) 6932 return SDValue(); 6933 6934 if (ARM::isBitFieldInvertedMask(Mask)) { 6935 Val >>= CountTrailingZeros_32(~Mask); 6936 6937 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 6938 DAG.getConstant(Val, MVT::i32), 6939 DAG.getConstant(Mask, MVT::i32)); 6940 6941 // Do not add new nodes to DAG combiner worklist. 6942 DCI.CombineTo(N, Res, false); 6943 return SDValue(); 6944 } 6945 } else if (N1.getOpcode() == ISD::AND) { 6946 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 6947 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 6948 if (!N11C) 6949 return SDValue(); 6950 unsigned Mask2 = N11C->getZExtValue(); 6951 6952 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 6953 // as is to match. 6954 if (ARM::isBitFieldInvertedMask(Mask) && 6955 (Mask == ~Mask2)) { 6956 // The pack halfword instruction works better for masks that fit it, 6957 // so use that when it's available. 6958 if (Subtarget->hasT2ExtractPack() && 6959 (Mask == 0xffff || Mask == 0xffff0000)) 6960 return SDValue(); 6961 // 2a 6962 unsigned amt = CountTrailingZeros_32(Mask2); 6963 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 6964 DAG.getConstant(amt, MVT::i32)); 6965 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 6966 DAG.getConstant(Mask, MVT::i32)); 6967 // Do not add new nodes to DAG combiner worklist. 6968 DCI.CombineTo(N, Res, false); 6969 return SDValue(); 6970 } else if (ARM::isBitFieldInvertedMask(~Mask) && 6971 (~Mask == Mask2)) { 6972 // The pack halfword instruction works better for masks that fit it, 6973 // so use that when it's available. 6974 if (Subtarget->hasT2ExtractPack() && 6975 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 6976 return SDValue(); 6977 // 2b 6978 unsigned lsb = CountTrailingZeros_32(Mask); 6979 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 6980 DAG.getConstant(lsb, MVT::i32)); 6981 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 6982 DAG.getConstant(Mask2, MVT::i32)); 6983 // Do not add new nodes to DAG combiner worklist. 6984 DCI.CombineTo(N, Res, false); 6985 return SDValue(); 6986 } 6987 } 6988 6989 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 6990 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 6991 ARM::isBitFieldInvertedMask(~Mask)) { 6992 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 6993 // where lsb(mask) == #shamt and masked bits of B are known zero. 6994 SDValue ShAmt = N00.getOperand(1); 6995 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 6996 unsigned LSB = CountTrailingZeros_32(Mask); 6997 if (ShAmtC != LSB) 6998 return SDValue(); 6999 7000 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 7001 DAG.getConstant(~Mask, MVT::i32)); 7002 7003 // Do not add new nodes to DAG combiner worklist. 7004 DCI.CombineTo(N, Res, false); 7005 } 7006 7007 return SDValue(); 7008} 7009 7010/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 7011/// the bits being cleared by the AND are not demanded by the BFI. 7012static SDValue PerformBFICombine(SDNode *N, 7013 TargetLowering::DAGCombinerInfo &DCI) { 7014 SDValue N1 = N->getOperand(1); 7015 if (N1.getOpcode() == ISD::AND) { 7016 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 7017 if (!N11C) 7018 return SDValue(); 7019 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 7020 unsigned LSB = CountTrailingZeros_32(~InvMask); 7021 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 7022 unsigned Mask = (1 << Width)-1; 7023 unsigned Mask2 = N11C->getZExtValue(); 7024 if ((Mask & (~Mask2)) == 0) 7025 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 7026 N->getOperand(0), N1.getOperand(0), 7027 N->getOperand(2)); 7028 } 7029 return SDValue(); 7030} 7031 7032/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 7033/// ARMISD::VMOVRRD. 7034static SDValue PerformVMOVRRDCombine(SDNode *N, 7035 TargetLowering::DAGCombinerInfo &DCI) { 7036 // vmovrrd(vmovdrr x, y) -> x,y 7037 SDValue InDouble = N->getOperand(0); 7038 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 7039 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 7040 7041 // vmovrrd(load f64) -> (load i32), (load i32) 7042 SDNode *InNode = InDouble.getNode(); 7043 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 7044 InNode->getValueType(0) == MVT::f64 && 7045 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 7046 !cast<LoadSDNode>(InNode)->isVolatile()) { 7047 // TODO: Should this be done for non-FrameIndex operands? 7048 LoadSDNode *LD = cast<LoadSDNode>(InNode); 7049 7050 SelectionDAG &DAG = DCI.DAG; 7051 DebugLoc DL = LD->getDebugLoc(); 7052 SDValue BasePtr = LD->getBasePtr(); 7053 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 7054 LD->getPointerInfo(), LD->isVolatile(), 7055 LD->isNonTemporal(), LD->getAlignment()); 7056 7057 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 7058 DAG.getConstant(4, MVT::i32)); 7059 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 7060 LD->getPointerInfo(), LD->isVolatile(), 7061 LD->isNonTemporal(), 7062 std::min(4U, LD->getAlignment() / 2)); 7063 7064 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 7065 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 7066 DCI.RemoveFromWorklist(LD); 7067 DAG.DeleteNode(LD); 7068 return Result; 7069 } 7070 7071 return SDValue(); 7072} 7073 7074/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 7075/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 7076static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 7077 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 7078 SDValue Op0 = N->getOperand(0); 7079 SDValue Op1 = N->getOperand(1); 7080 if (Op0.getOpcode() == ISD::BITCAST) 7081 Op0 = Op0.getOperand(0); 7082 if (Op1.getOpcode() == ISD::BITCAST) 7083 Op1 = Op1.getOperand(0); 7084 if (Op0.getOpcode() == ARMISD::VMOVRRD && 7085 Op0.getNode() == Op1.getNode() && 7086 Op0.getResNo() == 0 && Op1.getResNo() == 1) 7087 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 7088 N->getValueType(0), Op0.getOperand(0)); 7089 return SDValue(); 7090} 7091 7092/// PerformSTORECombine - Target-specific dag combine xforms for 7093/// ISD::STORE. 7094static SDValue PerformSTORECombine(SDNode *N, 7095 TargetLowering::DAGCombinerInfo &DCI) { 7096 // Bitcast an i64 store extracted from a vector to f64. 7097 // Otherwise, the i64 value will be legalized to a pair of i32 values. 7098 StoreSDNode *St = cast<StoreSDNode>(N); 7099 SDValue StVal = St->getValue(); 7100 if (!ISD::isNormalStore(St) || St->isVolatile()) 7101 return SDValue(); 7102 7103 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 7104 StVal.getNode()->hasOneUse() && !St->isVolatile()) { 7105 SelectionDAG &DAG = DCI.DAG; 7106 DebugLoc DL = St->getDebugLoc(); 7107 SDValue BasePtr = St->getBasePtr(); 7108 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 7109 StVal.getNode()->getOperand(0), BasePtr, 7110 St->getPointerInfo(), St->isVolatile(), 7111 St->isNonTemporal(), St->getAlignment()); 7112 7113 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 7114 DAG.getConstant(4, MVT::i32)); 7115 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 7116 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 7117 St->isNonTemporal(), 7118 std::min(4U, St->getAlignment() / 2)); 7119 } 7120 7121 if (StVal.getValueType() != MVT::i64 || 7122 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7123 return SDValue(); 7124 7125 SelectionDAG &DAG = DCI.DAG; 7126 DebugLoc dl = StVal.getDebugLoc(); 7127 SDValue IntVec = StVal.getOperand(0); 7128 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 7129 IntVec.getValueType().getVectorNumElements()); 7130 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 7131 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7132 Vec, StVal.getOperand(1)); 7133 dl = N->getDebugLoc(); 7134 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 7135 // Make the DAGCombiner fold the bitcasts. 7136 DCI.AddToWorklist(Vec.getNode()); 7137 DCI.AddToWorklist(ExtElt.getNode()); 7138 DCI.AddToWorklist(V.getNode()); 7139 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 7140 St->getPointerInfo(), St->isVolatile(), 7141 St->isNonTemporal(), St->getAlignment(), 7142 St->getTBAAInfo()); 7143} 7144 7145/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 7146/// are normal, non-volatile loads. If so, it is profitable to bitcast an 7147/// i64 vector to have f64 elements, since the value can then be loaded 7148/// directly into a VFP register. 7149static bool hasNormalLoadOperand(SDNode *N) { 7150 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 7151 for (unsigned i = 0; i < NumElts; ++i) { 7152 SDNode *Elt = N->getOperand(i).getNode(); 7153 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 7154 return true; 7155 } 7156 return false; 7157} 7158 7159/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 7160/// ISD::BUILD_VECTOR. 7161static SDValue PerformBUILD_VECTORCombine(SDNode *N, 7162 TargetLowering::DAGCombinerInfo &DCI){ 7163 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 7164 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 7165 // into a pair of GPRs, which is fine when the value is used as a scalar, 7166 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 7167 SelectionDAG &DAG = DCI.DAG; 7168 if (N->getNumOperands() == 2) { 7169 SDValue RV = PerformVMOVDRRCombine(N, DAG); 7170 if (RV.getNode()) 7171 return RV; 7172 } 7173 7174 // Load i64 elements as f64 values so that type legalization does not split 7175 // them up into i32 values. 7176 EVT VT = N->getValueType(0); 7177 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 7178 return SDValue(); 7179 DebugLoc dl = N->getDebugLoc(); 7180 SmallVector<SDValue, 8> Ops; 7181 unsigned NumElts = VT.getVectorNumElements(); 7182 for (unsigned i = 0; i < NumElts; ++i) { 7183 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 7184 Ops.push_back(V); 7185 // Make the DAGCombiner fold the bitcast. 7186 DCI.AddToWorklist(V.getNode()); 7187 } 7188 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 7189 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 7190 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 7191} 7192 7193/// PerformInsertEltCombine - Target-specific dag combine xforms for 7194/// ISD::INSERT_VECTOR_ELT. 7195static SDValue PerformInsertEltCombine(SDNode *N, 7196 TargetLowering::DAGCombinerInfo &DCI) { 7197 // Bitcast an i64 load inserted into a vector to f64. 7198 // Otherwise, the i64 value will be legalized to a pair of i32 values. 7199 EVT VT = N->getValueType(0); 7200 SDNode *Elt = N->getOperand(1).getNode(); 7201 if (VT.getVectorElementType() != MVT::i64 || 7202 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 7203 return SDValue(); 7204 7205 SelectionDAG &DAG = DCI.DAG; 7206 DebugLoc dl = N->getDebugLoc(); 7207 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 7208 VT.getVectorNumElements()); 7209 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 7210 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 7211 // Make the DAGCombiner fold the bitcasts. 7212 DCI.AddToWorklist(Vec.getNode()); 7213 DCI.AddToWorklist(V.getNode()); 7214 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 7215 Vec, V, N->getOperand(2)); 7216 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 7217} 7218 7219/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 7220/// ISD::VECTOR_SHUFFLE. 7221static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 7222 // The LLVM shufflevector instruction does not require the shuffle mask 7223 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 7224 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 7225 // operands do not match the mask length, they are extended by concatenating 7226 // them with undef vectors. That is probably the right thing for other 7227 // targets, but for NEON it is better to concatenate two double-register 7228 // size vector operands into a single quad-register size vector. Do that 7229 // transformation here: 7230 // shuffle(concat(v1, undef), concat(v2, undef)) -> 7231 // shuffle(concat(v1, v2), undef) 7232 SDValue Op0 = N->getOperand(0); 7233 SDValue Op1 = N->getOperand(1); 7234 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 7235 Op1.getOpcode() != ISD::CONCAT_VECTORS || 7236 Op0.getNumOperands() != 2 || 7237 Op1.getNumOperands() != 2) 7238 return SDValue(); 7239 SDValue Concat0Op1 = Op0.getOperand(1); 7240 SDValue Concat1Op1 = Op1.getOperand(1); 7241 if (Concat0Op1.getOpcode() != ISD::UNDEF || 7242 Concat1Op1.getOpcode() != ISD::UNDEF) 7243 return SDValue(); 7244 // Skip the transformation if any of the types are illegal. 7245 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7246 EVT VT = N->getValueType(0); 7247 if (!TLI.isTypeLegal(VT) || 7248 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 7249 !TLI.isTypeLegal(Concat1Op1.getValueType())) 7250 return SDValue(); 7251 7252 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 7253 Op0.getOperand(0), Op1.getOperand(0)); 7254 // Translate the shuffle mask. 7255 SmallVector<int, 16> NewMask; 7256 unsigned NumElts = VT.getVectorNumElements(); 7257 unsigned HalfElts = NumElts/2; 7258 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 7259 for (unsigned n = 0; n < NumElts; ++n) { 7260 int MaskElt = SVN->getMaskElt(n); 7261 int NewElt = -1; 7262 if (MaskElt < (int)HalfElts) 7263 NewElt = MaskElt; 7264 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 7265 NewElt = HalfElts + MaskElt - NumElts; 7266 NewMask.push_back(NewElt); 7267 } 7268 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 7269 DAG.getUNDEF(VT), NewMask.data()); 7270} 7271 7272/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 7273/// NEON load/store intrinsics to merge base address updates. 7274static SDValue CombineBaseUpdate(SDNode *N, 7275 TargetLowering::DAGCombinerInfo &DCI) { 7276 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 7277 return SDValue(); 7278 7279 SelectionDAG &DAG = DCI.DAG; 7280 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 7281 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 7282 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 7283 SDValue Addr = N->getOperand(AddrOpIdx); 7284 7285 // Search for a use of the address operand that is an increment. 7286 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 7287 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 7288 SDNode *User = *UI; 7289 if (User->getOpcode() != ISD::ADD || 7290 UI.getUse().getResNo() != Addr.getResNo()) 7291 continue; 7292 7293 // Check that the add is independent of the load/store. Otherwise, folding 7294 // it would create a cycle. 7295 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 7296 continue; 7297 7298 // Find the new opcode for the updating load/store. 7299 bool isLoad = true; 7300 bool isLaneOp = false; 7301 unsigned NewOpc = 0; 7302 unsigned NumVecs = 0; 7303 if (isIntrinsic) { 7304 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 7305 switch (IntNo) { 7306 default: assert(0 && "unexpected intrinsic for Neon base update"); 7307 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 7308 NumVecs = 1; break; 7309 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 7310 NumVecs = 2; break; 7311 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 7312 NumVecs = 3; break; 7313 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 7314 NumVecs = 4; break; 7315 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 7316 NumVecs = 2; isLaneOp = true; break; 7317 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 7318 NumVecs = 3; isLaneOp = true; break; 7319 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 7320 NumVecs = 4; isLaneOp = true; break; 7321 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 7322 NumVecs = 1; isLoad = false; break; 7323 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 7324 NumVecs = 2; isLoad = false; break; 7325 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 7326 NumVecs = 3; isLoad = false; break; 7327 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 7328 NumVecs = 4; isLoad = false; break; 7329 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 7330 NumVecs = 2; isLoad = false; isLaneOp = true; break; 7331 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 7332 NumVecs = 3; isLoad = false; isLaneOp = true; break; 7333 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 7334 NumVecs = 4; isLoad = false; isLaneOp = true; break; 7335 } 7336 } else { 7337 isLaneOp = true; 7338 switch (N->getOpcode()) { 7339 default: assert(0 && "unexpected opcode for Neon base update"); 7340 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 7341 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 7342 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 7343 } 7344 } 7345 7346 // Find the size of memory referenced by the load/store. 7347 EVT VecTy; 7348 if (isLoad) 7349 VecTy = N->getValueType(0); 7350 else 7351 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 7352 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 7353 if (isLaneOp) 7354 NumBytes /= VecTy.getVectorNumElements(); 7355 7356 // If the increment is a constant, it must match the memory ref size. 7357 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 7358 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 7359 uint64_t IncVal = CInc->getZExtValue(); 7360 if (IncVal != NumBytes) 7361 continue; 7362 } else if (NumBytes >= 3 * 16) { 7363 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 7364 // separate instructions that make it harder to use a non-constant update. 7365 continue; 7366 } 7367 7368 // Create the new updating load/store node. 7369 EVT Tys[6]; 7370 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 7371 unsigned n; 7372 for (n = 0; n < NumResultVecs; ++n) 7373 Tys[n] = VecTy; 7374 Tys[n++] = MVT::i32; 7375 Tys[n] = MVT::Other; 7376 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 7377 SmallVector<SDValue, 8> Ops; 7378 Ops.push_back(N->getOperand(0)); // incoming chain 7379 Ops.push_back(N->getOperand(AddrOpIdx)); 7380 Ops.push_back(Inc); 7381 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 7382 Ops.push_back(N->getOperand(i)); 7383 } 7384 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 7385 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 7386 Ops.data(), Ops.size(), 7387 MemInt->getMemoryVT(), 7388 MemInt->getMemOperand()); 7389 7390 // Update the uses. 7391 std::vector<SDValue> NewResults; 7392 for (unsigned i = 0; i < NumResultVecs; ++i) { 7393 NewResults.push_back(SDValue(UpdN.getNode(), i)); 7394 } 7395 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 7396 DCI.CombineTo(N, NewResults); 7397 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 7398 7399 break; 7400 } 7401 return SDValue(); 7402} 7403 7404/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 7405/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 7406/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 7407/// return true. 7408static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 7409 SelectionDAG &DAG = DCI.DAG; 7410 EVT VT = N->getValueType(0); 7411 // vldN-dup instructions only support 64-bit vectors for N > 1. 7412 if (!VT.is64BitVector()) 7413 return false; 7414 7415 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 7416 SDNode *VLD = N->getOperand(0).getNode(); 7417 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 7418 return false; 7419 unsigned NumVecs = 0; 7420 unsigned NewOpc = 0; 7421 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 7422 if (IntNo == Intrinsic::arm_neon_vld2lane) { 7423 NumVecs = 2; 7424 NewOpc = ARMISD::VLD2DUP; 7425 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 7426 NumVecs = 3; 7427 NewOpc = ARMISD::VLD3DUP; 7428 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 7429 NumVecs = 4; 7430 NewOpc = ARMISD::VLD4DUP; 7431 } else { 7432 return false; 7433 } 7434 7435 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 7436 // numbers match the load. 7437 unsigned VLDLaneNo = 7438 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 7439 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7440 UI != UE; ++UI) { 7441 // Ignore uses of the chain result. 7442 if (UI.getUse().getResNo() == NumVecs) 7443 continue; 7444 SDNode *User = *UI; 7445 if (User->getOpcode() != ARMISD::VDUPLANE || 7446 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 7447 return false; 7448 } 7449 7450 // Create the vldN-dup node. 7451 EVT Tys[5]; 7452 unsigned n; 7453 for (n = 0; n < NumVecs; ++n) 7454 Tys[n] = VT; 7455 Tys[n] = MVT::Other; 7456 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 7457 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 7458 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 7459 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 7460 Ops, 2, VLDMemInt->getMemoryVT(), 7461 VLDMemInt->getMemOperand()); 7462 7463 // Update the uses. 7464 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 7465 UI != UE; ++UI) { 7466 unsigned ResNo = UI.getUse().getResNo(); 7467 // Ignore uses of the chain result. 7468 if (ResNo == NumVecs) 7469 continue; 7470 SDNode *User = *UI; 7471 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 7472 } 7473 7474 // Now the vldN-lane intrinsic is dead except for its chain result. 7475 // Update uses of the chain. 7476 std::vector<SDValue> VLDDupResults; 7477 for (unsigned n = 0; n < NumVecs; ++n) 7478 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 7479 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 7480 DCI.CombineTo(VLD, VLDDupResults); 7481 7482 return true; 7483} 7484 7485/// PerformVDUPLANECombine - Target-specific dag combine xforms for 7486/// ARMISD::VDUPLANE. 7487static SDValue PerformVDUPLANECombine(SDNode *N, 7488 TargetLowering::DAGCombinerInfo &DCI) { 7489 SDValue Op = N->getOperand(0); 7490 7491 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 7492 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 7493 if (CombineVLDDUP(N, DCI)) 7494 return SDValue(N, 0); 7495 7496 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 7497 // redundant. Ignore bit_converts for now; element sizes are checked below. 7498 while (Op.getOpcode() == ISD::BITCAST) 7499 Op = Op.getOperand(0); 7500 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 7501 return SDValue(); 7502 7503 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 7504 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 7505 // The canonical VMOV for a zero vector uses a 32-bit element size. 7506 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7507 unsigned EltBits; 7508 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 7509 EltSize = 8; 7510 EVT VT = N->getValueType(0); 7511 if (EltSize > VT.getVectorElementType().getSizeInBits()) 7512 return SDValue(); 7513 7514 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 7515} 7516 7517// isConstVecPow2 - Return true if each vector element is a power of 2, all 7518// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 7519static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 7520{ 7521 integerPart cN; 7522 integerPart c0 = 0; 7523 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 7524 I != E; I++) { 7525 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 7526 if (!C) 7527 return false; 7528 7529 bool isExact; 7530 APFloat APF = C->getValueAPF(); 7531 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 7532 != APFloat::opOK || !isExact) 7533 return false; 7534 7535 c0 = (I == 0) ? cN : c0; 7536 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 7537 return false; 7538 } 7539 C = c0; 7540 return true; 7541} 7542 7543/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 7544/// can replace combinations of VMUL and VCVT (floating-point to integer) 7545/// when the VMUL has a constant operand that is a power of 2. 7546/// 7547/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7548/// vmul.f32 d16, d17, d16 7549/// vcvt.s32.f32 d16, d16 7550/// becomes: 7551/// vcvt.s32.f32 d16, d16, #3 7552static SDValue PerformVCVTCombine(SDNode *N, 7553 TargetLowering::DAGCombinerInfo &DCI, 7554 const ARMSubtarget *Subtarget) { 7555 SelectionDAG &DAG = DCI.DAG; 7556 SDValue Op = N->getOperand(0); 7557 7558 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 7559 Op.getOpcode() != ISD::FMUL) 7560 return SDValue(); 7561 7562 uint64_t C; 7563 SDValue N0 = Op->getOperand(0); 7564 SDValue ConstVec = Op->getOperand(1); 7565 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 7566 7567 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7568 !isConstVecPow2(ConstVec, isSigned, C)) 7569 return SDValue(); 7570 7571 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 7572 Intrinsic::arm_neon_vcvtfp2fxu; 7573 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7574 N->getValueType(0), 7575 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 7576 DAG.getConstant(Log2_64(C), MVT::i32)); 7577} 7578 7579/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 7580/// can replace combinations of VCVT (integer to floating-point) and VDIV 7581/// when the VDIV has a constant operand that is a power of 2. 7582/// 7583/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 7584/// vcvt.f32.s32 d16, d16 7585/// vdiv.f32 d16, d17, d16 7586/// becomes: 7587/// vcvt.f32.s32 d16, d16, #3 7588static SDValue PerformVDIVCombine(SDNode *N, 7589 TargetLowering::DAGCombinerInfo &DCI, 7590 const ARMSubtarget *Subtarget) { 7591 SelectionDAG &DAG = DCI.DAG; 7592 SDValue Op = N->getOperand(0); 7593 unsigned OpOpcode = Op.getNode()->getOpcode(); 7594 7595 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 7596 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 7597 return SDValue(); 7598 7599 uint64_t C; 7600 SDValue ConstVec = N->getOperand(1); 7601 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 7602 7603 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 7604 !isConstVecPow2(ConstVec, isSigned, C)) 7605 return SDValue(); 7606 7607 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 7608 Intrinsic::arm_neon_vcvtfxu2fp; 7609 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7610 Op.getValueType(), 7611 DAG.getConstant(IntrinsicOpcode, MVT::i32), 7612 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 7613} 7614 7615/// Getvshiftimm - Check if this is a valid build_vector for the immediate 7616/// operand of a vector shift operation, where all the elements of the 7617/// build_vector must have the same constant integer value. 7618static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 7619 // Ignore bit_converts. 7620 while (Op.getOpcode() == ISD::BITCAST) 7621 Op = Op.getOperand(0); 7622 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7623 APInt SplatBits, SplatUndef; 7624 unsigned SplatBitSize; 7625 bool HasAnyUndefs; 7626 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 7627 HasAnyUndefs, ElementBits) || 7628 SplatBitSize > ElementBits) 7629 return false; 7630 Cnt = SplatBits.getSExtValue(); 7631 return true; 7632} 7633 7634/// isVShiftLImm - Check if this is a valid build_vector for the immediate 7635/// operand of a vector shift left operation. That value must be in the range: 7636/// 0 <= Value < ElementBits for a left shift; or 7637/// 0 <= Value <= ElementBits for a long left shift. 7638static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 7639 assert(VT.isVector() && "vector shift count is not a vector type"); 7640 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7641 if (! getVShiftImm(Op, ElementBits, Cnt)) 7642 return false; 7643 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 7644} 7645 7646/// isVShiftRImm - Check if this is a valid build_vector for the immediate 7647/// operand of a vector shift right operation. For a shift opcode, the value 7648/// is positive, but for an intrinsic the value count must be negative. The 7649/// absolute value must be in the range: 7650/// 1 <= |Value| <= ElementBits for a right shift; or 7651/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 7652static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 7653 int64_t &Cnt) { 7654 assert(VT.isVector() && "vector shift count is not a vector type"); 7655 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 7656 if (! getVShiftImm(Op, ElementBits, Cnt)) 7657 return false; 7658 if (isIntrinsic) 7659 Cnt = -Cnt; 7660 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 7661} 7662 7663/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 7664static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 7665 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 7666 switch (IntNo) { 7667 default: 7668 // Don't do anything for most intrinsics. 7669 break; 7670 7671 // Vector shifts: check for immediate versions and lower them. 7672 // Note: This is done during DAG combining instead of DAG legalizing because 7673 // the build_vectors for 64-bit vector element shift counts are generally 7674 // not legal, and it is hard to see their values after they get legalized to 7675 // loads from a constant pool. 7676 case Intrinsic::arm_neon_vshifts: 7677 case Intrinsic::arm_neon_vshiftu: 7678 case Intrinsic::arm_neon_vshiftls: 7679 case Intrinsic::arm_neon_vshiftlu: 7680 case Intrinsic::arm_neon_vshiftn: 7681 case Intrinsic::arm_neon_vrshifts: 7682 case Intrinsic::arm_neon_vrshiftu: 7683 case Intrinsic::arm_neon_vrshiftn: 7684 case Intrinsic::arm_neon_vqshifts: 7685 case Intrinsic::arm_neon_vqshiftu: 7686 case Intrinsic::arm_neon_vqshiftsu: 7687 case Intrinsic::arm_neon_vqshiftns: 7688 case Intrinsic::arm_neon_vqshiftnu: 7689 case Intrinsic::arm_neon_vqshiftnsu: 7690 case Intrinsic::arm_neon_vqrshiftns: 7691 case Intrinsic::arm_neon_vqrshiftnu: 7692 case Intrinsic::arm_neon_vqrshiftnsu: { 7693 EVT VT = N->getOperand(1).getValueType(); 7694 int64_t Cnt; 7695 unsigned VShiftOpc = 0; 7696 7697 switch (IntNo) { 7698 case Intrinsic::arm_neon_vshifts: 7699 case Intrinsic::arm_neon_vshiftu: 7700 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 7701 VShiftOpc = ARMISD::VSHL; 7702 break; 7703 } 7704 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 7705 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 7706 ARMISD::VSHRs : ARMISD::VSHRu); 7707 break; 7708 } 7709 return SDValue(); 7710 7711 case Intrinsic::arm_neon_vshiftls: 7712 case Intrinsic::arm_neon_vshiftlu: 7713 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 7714 break; 7715 llvm_unreachable("invalid shift count for vshll intrinsic"); 7716 7717 case Intrinsic::arm_neon_vrshifts: 7718 case Intrinsic::arm_neon_vrshiftu: 7719 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 7720 break; 7721 return SDValue(); 7722 7723 case Intrinsic::arm_neon_vqshifts: 7724 case Intrinsic::arm_neon_vqshiftu: 7725 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7726 break; 7727 return SDValue(); 7728 7729 case Intrinsic::arm_neon_vqshiftsu: 7730 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 7731 break; 7732 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 7733 7734 case Intrinsic::arm_neon_vshiftn: 7735 case Intrinsic::arm_neon_vrshiftn: 7736 case Intrinsic::arm_neon_vqshiftns: 7737 case Intrinsic::arm_neon_vqshiftnu: 7738 case Intrinsic::arm_neon_vqshiftnsu: 7739 case Intrinsic::arm_neon_vqrshiftns: 7740 case Intrinsic::arm_neon_vqrshiftnu: 7741 case Intrinsic::arm_neon_vqrshiftnsu: 7742 // Narrowing shifts require an immediate right shift. 7743 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 7744 break; 7745 llvm_unreachable("invalid shift count for narrowing vector shift " 7746 "intrinsic"); 7747 7748 default: 7749 llvm_unreachable("unhandled vector shift"); 7750 } 7751 7752 switch (IntNo) { 7753 case Intrinsic::arm_neon_vshifts: 7754 case Intrinsic::arm_neon_vshiftu: 7755 // Opcode already set above. 7756 break; 7757 case Intrinsic::arm_neon_vshiftls: 7758 case Intrinsic::arm_neon_vshiftlu: 7759 if (Cnt == VT.getVectorElementType().getSizeInBits()) 7760 VShiftOpc = ARMISD::VSHLLi; 7761 else 7762 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 7763 ARMISD::VSHLLs : ARMISD::VSHLLu); 7764 break; 7765 case Intrinsic::arm_neon_vshiftn: 7766 VShiftOpc = ARMISD::VSHRN; break; 7767 case Intrinsic::arm_neon_vrshifts: 7768 VShiftOpc = ARMISD::VRSHRs; break; 7769 case Intrinsic::arm_neon_vrshiftu: 7770 VShiftOpc = ARMISD::VRSHRu; break; 7771 case Intrinsic::arm_neon_vrshiftn: 7772 VShiftOpc = ARMISD::VRSHRN; break; 7773 case Intrinsic::arm_neon_vqshifts: 7774 VShiftOpc = ARMISD::VQSHLs; break; 7775 case Intrinsic::arm_neon_vqshiftu: 7776 VShiftOpc = ARMISD::VQSHLu; break; 7777 case Intrinsic::arm_neon_vqshiftsu: 7778 VShiftOpc = ARMISD::VQSHLsu; break; 7779 case Intrinsic::arm_neon_vqshiftns: 7780 VShiftOpc = ARMISD::VQSHRNs; break; 7781 case Intrinsic::arm_neon_vqshiftnu: 7782 VShiftOpc = ARMISD::VQSHRNu; break; 7783 case Intrinsic::arm_neon_vqshiftnsu: 7784 VShiftOpc = ARMISD::VQSHRNsu; break; 7785 case Intrinsic::arm_neon_vqrshiftns: 7786 VShiftOpc = ARMISD::VQRSHRNs; break; 7787 case Intrinsic::arm_neon_vqrshiftnu: 7788 VShiftOpc = ARMISD::VQRSHRNu; break; 7789 case Intrinsic::arm_neon_vqrshiftnsu: 7790 VShiftOpc = ARMISD::VQRSHRNsu; break; 7791 } 7792 7793 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7794 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 7795 } 7796 7797 case Intrinsic::arm_neon_vshiftins: { 7798 EVT VT = N->getOperand(1).getValueType(); 7799 int64_t Cnt; 7800 unsigned VShiftOpc = 0; 7801 7802 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 7803 VShiftOpc = ARMISD::VSLI; 7804 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 7805 VShiftOpc = ARMISD::VSRI; 7806 else { 7807 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 7808 } 7809 7810 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 7811 N->getOperand(1), N->getOperand(2), 7812 DAG.getConstant(Cnt, MVT::i32)); 7813 } 7814 7815 case Intrinsic::arm_neon_vqrshifts: 7816 case Intrinsic::arm_neon_vqrshiftu: 7817 // No immediate versions of these to check for. 7818 break; 7819 } 7820 7821 return SDValue(); 7822} 7823 7824/// PerformShiftCombine - Checks for immediate versions of vector shifts and 7825/// lowers them. As with the vector shift intrinsics, this is done during DAG 7826/// combining instead of DAG legalizing because the build_vectors for 64-bit 7827/// vector element shift counts are generally not legal, and it is hard to see 7828/// their values after they get legalized to loads from a constant pool. 7829static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 7830 const ARMSubtarget *ST) { 7831 EVT VT = N->getValueType(0); 7832 7833 // Nothing to be done for scalar shifts. 7834 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7835 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 7836 return SDValue(); 7837 7838 assert(ST->hasNEON() && "unexpected vector shift"); 7839 int64_t Cnt; 7840 7841 switch (N->getOpcode()) { 7842 default: llvm_unreachable("unexpected shift opcode"); 7843 7844 case ISD::SHL: 7845 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 7846 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 7847 DAG.getConstant(Cnt, MVT::i32)); 7848 break; 7849 7850 case ISD::SRA: 7851 case ISD::SRL: 7852 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 7853 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 7854 ARMISD::VSHRs : ARMISD::VSHRu); 7855 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 7856 DAG.getConstant(Cnt, MVT::i32)); 7857 } 7858 } 7859 return SDValue(); 7860} 7861 7862/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 7863/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 7864static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 7865 const ARMSubtarget *ST) { 7866 SDValue N0 = N->getOperand(0); 7867 7868 // Check for sign- and zero-extensions of vector extract operations of 8- 7869 // and 16-bit vector elements. NEON supports these directly. They are 7870 // handled during DAG combining because type legalization will promote them 7871 // to 32-bit types and it is messy to recognize the operations after that. 7872 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7873 SDValue Vec = N0.getOperand(0); 7874 SDValue Lane = N0.getOperand(1); 7875 EVT VT = N->getValueType(0); 7876 EVT EltVT = N0.getValueType(); 7877 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7878 7879 if (VT == MVT::i32 && 7880 (EltVT == MVT::i8 || EltVT == MVT::i16) && 7881 TLI.isTypeLegal(Vec.getValueType()) && 7882 isa<ConstantSDNode>(Lane)) { 7883 7884 unsigned Opc = 0; 7885 switch (N->getOpcode()) { 7886 default: llvm_unreachable("unexpected opcode"); 7887 case ISD::SIGN_EXTEND: 7888 Opc = ARMISD::VGETLANEs; 7889 break; 7890 case ISD::ZERO_EXTEND: 7891 case ISD::ANY_EXTEND: 7892 Opc = ARMISD::VGETLANEu; 7893 break; 7894 } 7895 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 7896 } 7897 } 7898 7899 return SDValue(); 7900} 7901 7902/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 7903/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 7904static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 7905 const ARMSubtarget *ST) { 7906 // If the target supports NEON, try to use vmax/vmin instructions for f32 7907 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 7908 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 7909 // a NaN; only do the transformation when it matches that behavior. 7910 7911 // For now only do this when using NEON for FP operations; if using VFP, it 7912 // is not obvious that the benefit outweighs the cost of switching to the 7913 // NEON pipeline. 7914 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 7915 N->getValueType(0) != MVT::f32) 7916 return SDValue(); 7917 7918 SDValue CondLHS = N->getOperand(0); 7919 SDValue CondRHS = N->getOperand(1); 7920 SDValue LHS = N->getOperand(2); 7921 SDValue RHS = N->getOperand(3); 7922 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 7923 7924 unsigned Opcode = 0; 7925 bool IsReversed; 7926 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 7927 IsReversed = false; // x CC y ? x : y 7928 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 7929 IsReversed = true ; // x CC y ? y : x 7930 } else { 7931 return SDValue(); 7932 } 7933 7934 bool IsUnordered; 7935 switch (CC) { 7936 default: break; 7937 case ISD::SETOLT: 7938 case ISD::SETOLE: 7939 case ISD::SETLT: 7940 case ISD::SETLE: 7941 case ISD::SETULT: 7942 case ISD::SETULE: 7943 // If LHS is NaN, an ordered comparison will be false and the result will 7944 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 7945 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7946 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 7947 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7948 break; 7949 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 7950 // will return -0, so vmin can only be used for unsafe math or if one of 7951 // the operands is known to be nonzero. 7952 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 7953 !UnsafeFPMath && 7954 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7955 break; 7956 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 7957 break; 7958 7959 case ISD::SETOGT: 7960 case ISD::SETOGE: 7961 case ISD::SETGT: 7962 case ISD::SETGE: 7963 case ISD::SETUGT: 7964 case ISD::SETUGE: 7965 // If LHS is NaN, an ordered comparison will be false and the result will 7966 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 7967 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 7968 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 7969 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 7970 break; 7971 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 7972 // will return +0, so vmax can only be used for unsafe math or if one of 7973 // the operands is known to be nonzero. 7974 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 7975 !UnsafeFPMath && 7976 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 7977 break; 7978 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 7979 break; 7980 } 7981 7982 if (!Opcode) 7983 return SDValue(); 7984 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 7985} 7986 7987/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 7988SDValue 7989ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 7990 SDValue Cmp = N->getOperand(4); 7991 if (Cmp.getOpcode() != ARMISD::CMPZ) 7992 // Only looking at EQ and NE cases. 7993 return SDValue(); 7994 7995 EVT VT = N->getValueType(0); 7996 DebugLoc dl = N->getDebugLoc(); 7997 SDValue LHS = Cmp.getOperand(0); 7998 SDValue RHS = Cmp.getOperand(1); 7999 SDValue FalseVal = N->getOperand(0); 8000 SDValue TrueVal = N->getOperand(1); 8001 SDValue ARMcc = N->getOperand(2); 8002 ARMCC::CondCodes CC = 8003 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 8004 8005 // Simplify 8006 // mov r1, r0 8007 // cmp r1, x 8008 // mov r0, y 8009 // moveq r0, x 8010 // to 8011 // cmp r0, x 8012 // movne r0, y 8013 // 8014 // mov r1, r0 8015 // cmp r1, x 8016 // mov r0, x 8017 // movne r0, y 8018 // to 8019 // cmp r0, x 8020 // movne r0, y 8021 /// FIXME: Turn this into a target neutral optimization? 8022 SDValue Res; 8023 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 8024 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 8025 N->getOperand(3), Cmp); 8026 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 8027 SDValue ARMcc; 8028 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 8029 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 8030 N->getOperand(3), NewCmp); 8031 } 8032 8033 if (Res.getNode()) { 8034 APInt KnownZero, KnownOne; 8035 APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()); 8036 DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne); 8037 // Capture demanded bits information that would be otherwise lost. 8038 if (KnownZero == 0xfffffffe) 8039 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8040 DAG.getValueType(MVT::i1)); 8041 else if (KnownZero == 0xffffff00) 8042 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8043 DAG.getValueType(MVT::i8)); 8044 else if (KnownZero == 0xffff0000) 8045 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 8046 DAG.getValueType(MVT::i16)); 8047 } 8048 8049 return Res; 8050} 8051 8052SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 8053 DAGCombinerInfo &DCI) const { 8054 switch (N->getOpcode()) { 8055 default: break; 8056 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 8057 case ISD::SUB: return PerformSUBCombine(N, DCI); 8058 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 8059 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 8060 case ISD::AND: return PerformANDCombine(N, DCI); 8061 case ARMISD::BFI: return PerformBFICombine(N, DCI); 8062 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 8063 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 8064 case ISD::STORE: return PerformSTORECombine(N, DCI); 8065 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 8066 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 8067 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 8068 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 8069 case ISD::FP_TO_SINT: 8070 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 8071 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 8072 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 8073 case ISD::SHL: 8074 case ISD::SRA: 8075 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 8076 case ISD::SIGN_EXTEND: 8077 case ISD::ZERO_EXTEND: 8078 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 8079 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 8080 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 8081 case ARMISD::VLD2DUP: 8082 case ARMISD::VLD3DUP: 8083 case ARMISD::VLD4DUP: 8084 return CombineBaseUpdate(N, DCI); 8085 case ISD::INTRINSIC_VOID: 8086 case ISD::INTRINSIC_W_CHAIN: 8087 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 8088 case Intrinsic::arm_neon_vld1: 8089 case Intrinsic::arm_neon_vld2: 8090 case Intrinsic::arm_neon_vld3: 8091 case Intrinsic::arm_neon_vld4: 8092 case Intrinsic::arm_neon_vld2lane: 8093 case Intrinsic::arm_neon_vld3lane: 8094 case Intrinsic::arm_neon_vld4lane: 8095 case Intrinsic::arm_neon_vst1: 8096 case Intrinsic::arm_neon_vst2: 8097 case Intrinsic::arm_neon_vst3: 8098 case Intrinsic::arm_neon_vst4: 8099 case Intrinsic::arm_neon_vst2lane: 8100 case Intrinsic::arm_neon_vst3lane: 8101 case Intrinsic::arm_neon_vst4lane: 8102 return CombineBaseUpdate(N, DCI); 8103 default: break; 8104 } 8105 break; 8106 } 8107 return SDValue(); 8108} 8109 8110bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 8111 EVT VT) const { 8112 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 8113} 8114 8115bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 8116 if (!Subtarget->allowsUnalignedMem()) 8117 return false; 8118 8119 switch (VT.getSimpleVT().SimpleTy) { 8120 default: 8121 return false; 8122 case MVT::i8: 8123 case MVT::i16: 8124 case MVT::i32: 8125 return true; 8126 // FIXME: VLD1 etc with standard alignment is legal. 8127 } 8128} 8129 8130static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 8131 if (V < 0) 8132 return false; 8133 8134 unsigned Scale = 1; 8135 switch (VT.getSimpleVT().SimpleTy) { 8136 default: return false; 8137 case MVT::i1: 8138 case MVT::i8: 8139 // Scale == 1; 8140 break; 8141 case MVT::i16: 8142 // Scale == 2; 8143 Scale = 2; 8144 break; 8145 case MVT::i32: 8146 // Scale == 4; 8147 Scale = 4; 8148 break; 8149 } 8150 8151 if ((V & (Scale - 1)) != 0) 8152 return false; 8153 V /= Scale; 8154 return V == (V & ((1LL << 5) - 1)); 8155} 8156 8157static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 8158 const ARMSubtarget *Subtarget) { 8159 bool isNeg = false; 8160 if (V < 0) { 8161 isNeg = true; 8162 V = - V; 8163 } 8164 8165 switch (VT.getSimpleVT().SimpleTy) { 8166 default: return false; 8167 case MVT::i1: 8168 case MVT::i8: 8169 case MVT::i16: 8170 case MVT::i32: 8171 // + imm12 or - imm8 8172 if (isNeg) 8173 return V == (V & ((1LL << 8) - 1)); 8174 return V == (V & ((1LL << 12) - 1)); 8175 case MVT::f32: 8176 case MVT::f64: 8177 // Same as ARM mode. FIXME: NEON? 8178 if (!Subtarget->hasVFP2()) 8179 return false; 8180 if ((V & 3) != 0) 8181 return false; 8182 V >>= 2; 8183 return V == (V & ((1LL << 8) - 1)); 8184 } 8185} 8186 8187/// isLegalAddressImmediate - Return true if the integer value can be used 8188/// as the offset of the target addressing mode for load / store of the 8189/// given type. 8190static bool isLegalAddressImmediate(int64_t V, EVT VT, 8191 const ARMSubtarget *Subtarget) { 8192 if (V == 0) 8193 return true; 8194 8195 if (!VT.isSimple()) 8196 return false; 8197 8198 if (Subtarget->isThumb1Only()) 8199 return isLegalT1AddressImmediate(V, VT); 8200 else if (Subtarget->isThumb2()) 8201 return isLegalT2AddressImmediate(V, VT, Subtarget); 8202 8203 // ARM mode. 8204 if (V < 0) 8205 V = - V; 8206 switch (VT.getSimpleVT().SimpleTy) { 8207 default: return false; 8208 case MVT::i1: 8209 case MVT::i8: 8210 case MVT::i32: 8211 // +- imm12 8212 return V == (V & ((1LL << 12) - 1)); 8213 case MVT::i16: 8214 // +- imm8 8215 return V == (V & ((1LL << 8) - 1)); 8216 case MVT::f32: 8217 case MVT::f64: 8218 if (!Subtarget->hasVFP2()) // FIXME: NEON? 8219 return false; 8220 if ((V & 3) != 0) 8221 return false; 8222 V >>= 2; 8223 return V == (V & ((1LL << 8) - 1)); 8224 } 8225} 8226 8227bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 8228 EVT VT) const { 8229 int Scale = AM.Scale; 8230 if (Scale < 0) 8231 return false; 8232 8233 switch (VT.getSimpleVT().SimpleTy) { 8234 default: return false; 8235 case MVT::i1: 8236 case MVT::i8: 8237 case MVT::i16: 8238 case MVT::i32: 8239 if (Scale == 1) 8240 return true; 8241 // r + r << imm 8242 Scale = Scale & ~1; 8243 return Scale == 2 || Scale == 4 || Scale == 8; 8244 case MVT::i64: 8245 // r + r 8246 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 8247 return true; 8248 return false; 8249 case MVT::isVoid: 8250 // Note, we allow "void" uses (basically, uses that aren't loads or 8251 // stores), because arm allows folding a scale into many arithmetic 8252 // operations. This should be made more precise and revisited later. 8253 8254 // Allow r << imm, but the imm has to be a multiple of two. 8255 if (Scale & 1) return false; 8256 return isPowerOf2_32(Scale); 8257 } 8258} 8259 8260/// isLegalAddressingMode - Return true if the addressing mode represented 8261/// by AM is legal for this target, for a load/store of the specified type. 8262bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 8263 Type *Ty) const { 8264 EVT VT = getValueType(Ty, true); 8265 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 8266 return false; 8267 8268 // Can never fold addr of global into load/store. 8269 if (AM.BaseGV) 8270 return false; 8271 8272 switch (AM.Scale) { 8273 case 0: // no scale reg, must be "r+i" or "r", or "i". 8274 break; 8275 case 1: 8276 if (Subtarget->isThumb1Only()) 8277 return false; 8278 // FALL THROUGH. 8279 default: 8280 // ARM doesn't support any R+R*scale+imm addr modes. 8281 if (AM.BaseOffs) 8282 return false; 8283 8284 if (!VT.isSimple()) 8285 return false; 8286 8287 if (Subtarget->isThumb2()) 8288 return isLegalT2ScaledAddressingMode(AM, VT); 8289 8290 int Scale = AM.Scale; 8291 switch (VT.getSimpleVT().SimpleTy) { 8292 default: return false; 8293 case MVT::i1: 8294 case MVT::i8: 8295 case MVT::i32: 8296 if (Scale < 0) Scale = -Scale; 8297 if (Scale == 1) 8298 return true; 8299 // r + r << imm 8300 return isPowerOf2_32(Scale & ~1); 8301 case MVT::i16: 8302 case MVT::i64: 8303 // r + r 8304 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 8305 return true; 8306 return false; 8307 8308 case MVT::isVoid: 8309 // Note, we allow "void" uses (basically, uses that aren't loads or 8310 // stores), because arm allows folding a scale into many arithmetic 8311 // operations. This should be made more precise and revisited later. 8312 8313 // Allow r << imm, but the imm has to be a multiple of two. 8314 if (Scale & 1) return false; 8315 return isPowerOf2_32(Scale); 8316 } 8317 break; 8318 } 8319 return true; 8320} 8321 8322/// isLegalICmpImmediate - Return true if the specified immediate is legal 8323/// icmp immediate, that is the target has icmp instructions which can compare 8324/// a register against the immediate without having to materialize the 8325/// immediate into a register. 8326bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 8327 if (!Subtarget->isThumb()) 8328 return ARM_AM::getSOImmVal(Imm) != -1; 8329 if (Subtarget->isThumb2()) 8330 return ARM_AM::getT2SOImmVal(Imm) != -1; 8331 return Imm >= 0 && Imm <= 255; 8332} 8333 8334/// isLegalAddImmediate - Return true if the specified immediate is legal 8335/// add immediate, that is the target has add instructions which can add 8336/// a register with the immediate without having to materialize the 8337/// immediate into a register. 8338bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 8339 return ARM_AM::getSOImmVal(Imm) != -1; 8340} 8341 8342static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 8343 bool isSEXTLoad, SDValue &Base, 8344 SDValue &Offset, bool &isInc, 8345 SelectionDAG &DAG) { 8346 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8347 return false; 8348 8349 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 8350 // AddressingMode 3 8351 Base = Ptr->getOperand(0); 8352 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8353 int RHSC = (int)RHS->getZExtValue(); 8354 if (RHSC < 0 && RHSC > -256) { 8355 assert(Ptr->getOpcode() == ISD::ADD); 8356 isInc = false; 8357 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8358 return true; 8359 } 8360 } 8361 isInc = (Ptr->getOpcode() == ISD::ADD); 8362 Offset = Ptr->getOperand(1); 8363 return true; 8364 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 8365 // AddressingMode 2 8366 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8367 int RHSC = (int)RHS->getZExtValue(); 8368 if (RHSC < 0 && RHSC > -0x1000) { 8369 assert(Ptr->getOpcode() == ISD::ADD); 8370 isInc = false; 8371 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8372 Base = Ptr->getOperand(0); 8373 return true; 8374 } 8375 } 8376 8377 if (Ptr->getOpcode() == ISD::ADD) { 8378 isInc = true; 8379 ARM_AM::ShiftOpc ShOpcVal= 8380 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 8381 if (ShOpcVal != ARM_AM::no_shift) { 8382 Base = Ptr->getOperand(1); 8383 Offset = Ptr->getOperand(0); 8384 } else { 8385 Base = Ptr->getOperand(0); 8386 Offset = Ptr->getOperand(1); 8387 } 8388 return true; 8389 } 8390 8391 isInc = (Ptr->getOpcode() == ISD::ADD); 8392 Base = Ptr->getOperand(0); 8393 Offset = Ptr->getOperand(1); 8394 return true; 8395 } 8396 8397 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 8398 return false; 8399} 8400 8401static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 8402 bool isSEXTLoad, SDValue &Base, 8403 SDValue &Offset, bool &isInc, 8404 SelectionDAG &DAG) { 8405 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 8406 return false; 8407 8408 Base = Ptr->getOperand(0); 8409 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 8410 int RHSC = (int)RHS->getZExtValue(); 8411 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 8412 assert(Ptr->getOpcode() == ISD::ADD); 8413 isInc = false; 8414 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 8415 return true; 8416 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 8417 isInc = Ptr->getOpcode() == ISD::ADD; 8418 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 8419 return true; 8420 } 8421 } 8422 8423 return false; 8424} 8425 8426/// getPreIndexedAddressParts - returns true by value, base pointer and 8427/// offset pointer and addressing mode by reference if the node's address 8428/// can be legally represented as pre-indexed load / store address. 8429bool 8430ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 8431 SDValue &Offset, 8432 ISD::MemIndexedMode &AM, 8433 SelectionDAG &DAG) const { 8434 if (Subtarget->isThumb1Only()) 8435 return false; 8436 8437 EVT VT; 8438 SDValue Ptr; 8439 bool isSEXTLoad = false; 8440 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8441 Ptr = LD->getBasePtr(); 8442 VT = LD->getMemoryVT(); 8443 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8444 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8445 Ptr = ST->getBasePtr(); 8446 VT = ST->getMemoryVT(); 8447 } else 8448 return false; 8449 8450 bool isInc; 8451 bool isLegal = false; 8452 if (Subtarget->isThumb2()) 8453 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8454 Offset, isInc, DAG); 8455 else 8456 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 8457 Offset, isInc, DAG); 8458 if (!isLegal) 8459 return false; 8460 8461 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 8462 return true; 8463} 8464 8465/// getPostIndexedAddressParts - returns true by value, base pointer and 8466/// offset pointer and addressing mode by reference if this node can be 8467/// combined with a load / store to form a post-indexed load / store. 8468bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 8469 SDValue &Base, 8470 SDValue &Offset, 8471 ISD::MemIndexedMode &AM, 8472 SelectionDAG &DAG) const { 8473 if (Subtarget->isThumb1Only()) 8474 return false; 8475 8476 EVT VT; 8477 SDValue Ptr; 8478 bool isSEXTLoad = false; 8479 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8480 VT = LD->getMemoryVT(); 8481 Ptr = LD->getBasePtr(); 8482 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 8483 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 8484 VT = ST->getMemoryVT(); 8485 Ptr = ST->getBasePtr(); 8486 } else 8487 return false; 8488 8489 bool isInc; 8490 bool isLegal = false; 8491 if (Subtarget->isThumb2()) 8492 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8493 isInc, DAG); 8494 else 8495 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 8496 isInc, DAG); 8497 if (!isLegal) 8498 return false; 8499 8500 if (Ptr != Base) { 8501 // Swap base ptr and offset to catch more post-index load / store when 8502 // it's legal. In Thumb2 mode, offset must be an immediate. 8503 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 8504 !Subtarget->isThumb2()) 8505 std::swap(Base, Offset); 8506 8507 // Post-indexed load / store update the base pointer. 8508 if (Ptr != Base) 8509 return false; 8510 } 8511 8512 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 8513 return true; 8514} 8515 8516void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 8517 const APInt &Mask, 8518 APInt &KnownZero, 8519 APInt &KnownOne, 8520 const SelectionDAG &DAG, 8521 unsigned Depth) const { 8522 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 8523 switch (Op.getOpcode()) { 8524 default: break; 8525 case ARMISD::CMOV: { 8526 // Bits are known zero/one if known on the LHS and RHS. 8527 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 8528 if (KnownZero == 0 && KnownOne == 0) return; 8529 8530 APInt KnownZeroRHS, KnownOneRHS; 8531 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 8532 KnownZeroRHS, KnownOneRHS, Depth+1); 8533 KnownZero &= KnownZeroRHS; 8534 KnownOne &= KnownOneRHS; 8535 return; 8536 } 8537 } 8538} 8539 8540//===----------------------------------------------------------------------===// 8541// ARM Inline Assembly Support 8542//===----------------------------------------------------------------------===// 8543 8544bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 8545 // Looking for "rev" which is V6+. 8546 if (!Subtarget->hasV6Ops()) 8547 return false; 8548 8549 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 8550 std::string AsmStr = IA->getAsmString(); 8551 SmallVector<StringRef, 4> AsmPieces; 8552 SplitString(AsmStr, AsmPieces, ";\n"); 8553 8554 switch (AsmPieces.size()) { 8555 default: return false; 8556 case 1: 8557 AsmStr = AsmPieces[0]; 8558 AsmPieces.clear(); 8559 SplitString(AsmStr, AsmPieces, " \t,"); 8560 8561 // rev $0, $1 8562 if (AsmPieces.size() == 3 && 8563 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 8564 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 8565 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 8566 if (Ty && Ty->getBitWidth() == 32) 8567 return IntrinsicLowering::LowerToByteSwap(CI); 8568 } 8569 break; 8570 } 8571 8572 return false; 8573} 8574 8575/// getConstraintType - Given a constraint letter, return the type of 8576/// constraint it is for this target. 8577ARMTargetLowering::ConstraintType 8578ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 8579 if (Constraint.size() == 1) { 8580 switch (Constraint[0]) { 8581 default: break; 8582 case 'l': return C_RegisterClass; 8583 case 'w': return C_RegisterClass; 8584 case 'h': return C_RegisterClass; 8585 case 'x': return C_RegisterClass; 8586 case 't': return C_RegisterClass; 8587 case 'j': return C_Other; // Constant for movw. 8588 // An address with a single base register. Due to the way we 8589 // currently handle addresses it is the same as an 'r' memory constraint. 8590 case 'Q': return C_Memory; 8591 } 8592 } else if (Constraint.size() == 2) { 8593 switch (Constraint[0]) { 8594 default: break; 8595 // All 'U+' constraints are addresses. 8596 case 'U': return C_Memory; 8597 } 8598 } 8599 return TargetLowering::getConstraintType(Constraint); 8600} 8601 8602/// Examine constraint type and operand type and determine a weight value. 8603/// This object must already have been set up with the operand type 8604/// and the current alternative constraint selected. 8605TargetLowering::ConstraintWeight 8606ARMTargetLowering::getSingleConstraintMatchWeight( 8607 AsmOperandInfo &info, const char *constraint) const { 8608 ConstraintWeight weight = CW_Invalid; 8609 Value *CallOperandVal = info.CallOperandVal; 8610 // If we don't have a value, we can't do a match, 8611 // but allow it at the lowest weight. 8612 if (CallOperandVal == NULL) 8613 return CW_Default; 8614 Type *type = CallOperandVal->getType(); 8615 // Look at the constraint type. 8616 switch (*constraint) { 8617 default: 8618 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 8619 break; 8620 case 'l': 8621 if (type->isIntegerTy()) { 8622 if (Subtarget->isThumb()) 8623 weight = CW_SpecificReg; 8624 else 8625 weight = CW_Register; 8626 } 8627 break; 8628 case 'w': 8629 if (type->isFloatingPointTy()) 8630 weight = CW_Register; 8631 break; 8632 } 8633 return weight; 8634} 8635 8636typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 8637RCPair 8638ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 8639 EVT VT) const { 8640 if (Constraint.size() == 1) { 8641 // GCC ARM Constraint Letters 8642 switch (Constraint[0]) { 8643 case 'l': // Low regs or general regs. 8644 if (Subtarget->isThumb()) 8645 return RCPair(0U, ARM::tGPRRegisterClass); 8646 else 8647 return RCPair(0U, ARM::GPRRegisterClass); 8648 case 'h': // High regs or no regs. 8649 if (Subtarget->isThumb()) 8650 return RCPair(0U, ARM::hGPRRegisterClass); 8651 break; 8652 case 'r': 8653 return RCPair(0U, ARM::GPRRegisterClass); 8654 case 'w': 8655 if (VT == MVT::f32) 8656 return RCPair(0U, ARM::SPRRegisterClass); 8657 if (VT.getSizeInBits() == 64) 8658 return RCPair(0U, ARM::DPRRegisterClass); 8659 if (VT.getSizeInBits() == 128) 8660 return RCPair(0U, ARM::QPRRegisterClass); 8661 break; 8662 case 'x': 8663 if (VT == MVT::f32) 8664 return RCPair(0U, ARM::SPR_8RegisterClass); 8665 if (VT.getSizeInBits() == 64) 8666 return RCPair(0U, ARM::DPR_8RegisterClass); 8667 if (VT.getSizeInBits() == 128) 8668 return RCPair(0U, ARM::QPR_8RegisterClass); 8669 break; 8670 case 't': 8671 if (VT == MVT::f32) 8672 return RCPair(0U, ARM::SPRRegisterClass); 8673 break; 8674 } 8675 } 8676 if (StringRef("{cc}").equals_lower(Constraint)) 8677 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 8678 8679 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 8680} 8681 8682/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 8683/// vector. If it is invalid, don't add anything to Ops. 8684void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 8685 std::string &Constraint, 8686 std::vector<SDValue>&Ops, 8687 SelectionDAG &DAG) const { 8688 SDValue Result(0, 0); 8689 8690 // Currently only support length 1 constraints. 8691 if (Constraint.length() != 1) return; 8692 8693 char ConstraintLetter = Constraint[0]; 8694 switch (ConstraintLetter) { 8695 default: break; 8696 case 'j': 8697 case 'I': case 'J': case 'K': case 'L': 8698 case 'M': case 'N': case 'O': 8699 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 8700 if (!C) 8701 return; 8702 8703 int64_t CVal64 = C->getSExtValue(); 8704 int CVal = (int) CVal64; 8705 // None of these constraints allow values larger than 32 bits. Check 8706 // that the value fits in an int. 8707 if (CVal != CVal64) 8708 return; 8709 8710 switch (ConstraintLetter) { 8711 case 'j': 8712 // Constant suitable for movw, must be between 0 and 8713 // 65535. 8714 if (Subtarget->hasV6T2Ops()) 8715 if (CVal >= 0 && CVal <= 65535) 8716 break; 8717 return; 8718 case 'I': 8719 if (Subtarget->isThumb1Only()) { 8720 // This must be a constant between 0 and 255, for ADD 8721 // immediates. 8722 if (CVal >= 0 && CVal <= 255) 8723 break; 8724 } else if (Subtarget->isThumb2()) { 8725 // A constant that can be used as an immediate value in a 8726 // data-processing instruction. 8727 if (ARM_AM::getT2SOImmVal(CVal) != -1) 8728 break; 8729 } else { 8730 // A constant that can be used as an immediate value in a 8731 // data-processing instruction. 8732 if (ARM_AM::getSOImmVal(CVal) != -1) 8733 break; 8734 } 8735 return; 8736 8737 case 'J': 8738 if (Subtarget->isThumb()) { // FIXME thumb2 8739 // This must be a constant between -255 and -1, for negated ADD 8740 // immediates. This can be used in GCC with an "n" modifier that 8741 // prints the negated value, for use with SUB instructions. It is 8742 // not useful otherwise but is implemented for compatibility. 8743 if (CVal >= -255 && CVal <= -1) 8744 break; 8745 } else { 8746 // This must be a constant between -4095 and 4095. It is not clear 8747 // what this constraint is intended for. Implemented for 8748 // compatibility with GCC. 8749 if (CVal >= -4095 && CVal <= 4095) 8750 break; 8751 } 8752 return; 8753 8754 case 'K': 8755 if (Subtarget->isThumb1Only()) { 8756 // A 32-bit value where only one byte has a nonzero value. Exclude 8757 // zero to match GCC. This constraint is used by GCC internally for 8758 // constants that can be loaded with a move/shift combination. 8759 // It is not useful otherwise but is implemented for compatibility. 8760 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 8761 break; 8762 } else if (Subtarget->isThumb2()) { 8763 // A constant whose bitwise inverse can be used as an immediate 8764 // value in a data-processing instruction. This can be used in GCC 8765 // with a "B" modifier that prints the inverted value, for use with 8766 // BIC and MVN instructions. It is not useful otherwise but is 8767 // implemented for compatibility. 8768 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 8769 break; 8770 } else { 8771 // A constant whose bitwise inverse can be used as an immediate 8772 // value in a data-processing instruction. This can be used in GCC 8773 // with a "B" modifier that prints the inverted value, for use with 8774 // BIC and MVN instructions. It is not useful otherwise but is 8775 // implemented for compatibility. 8776 if (ARM_AM::getSOImmVal(~CVal) != -1) 8777 break; 8778 } 8779 return; 8780 8781 case 'L': 8782 if (Subtarget->isThumb1Only()) { 8783 // This must be a constant between -7 and 7, 8784 // for 3-operand ADD/SUB immediate instructions. 8785 if (CVal >= -7 && CVal < 7) 8786 break; 8787 } else if (Subtarget->isThumb2()) { 8788 // A constant whose negation can be used as an immediate value in a 8789 // data-processing instruction. This can be used in GCC with an "n" 8790 // modifier that prints the negated value, for use with SUB 8791 // instructions. It is not useful otherwise but is implemented for 8792 // compatibility. 8793 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 8794 break; 8795 } else { 8796 // A constant whose negation can be used as an immediate value in a 8797 // data-processing instruction. This can be used in GCC with an "n" 8798 // modifier that prints the negated value, for use with SUB 8799 // instructions. It is not useful otherwise but is implemented for 8800 // compatibility. 8801 if (ARM_AM::getSOImmVal(-CVal) != -1) 8802 break; 8803 } 8804 return; 8805 8806 case 'M': 8807 if (Subtarget->isThumb()) { // FIXME thumb2 8808 // This must be a multiple of 4 between 0 and 1020, for 8809 // ADD sp + immediate. 8810 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 8811 break; 8812 } else { 8813 // A power of two or a constant between 0 and 32. This is used in 8814 // GCC for the shift amount on shifted register operands, but it is 8815 // useful in general for any shift amounts. 8816 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 8817 break; 8818 } 8819 return; 8820 8821 case 'N': 8822 if (Subtarget->isThumb()) { // FIXME thumb2 8823 // This must be a constant between 0 and 31, for shift amounts. 8824 if (CVal >= 0 && CVal <= 31) 8825 break; 8826 } 8827 return; 8828 8829 case 'O': 8830 if (Subtarget->isThumb()) { // FIXME thumb2 8831 // This must be a multiple of 4 between -508 and 508, for 8832 // ADD/SUB sp = sp + immediate. 8833 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 8834 break; 8835 } 8836 return; 8837 } 8838 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 8839 break; 8840 } 8841 8842 if (Result.getNode()) { 8843 Ops.push_back(Result); 8844 return; 8845 } 8846 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 8847} 8848 8849bool 8850ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 8851 // The ARM target isn't yet aware of offsets. 8852 return false; 8853} 8854 8855bool ARM::isBitFieldInvertedMask(unsigned v) { 8856 if (v == 0xffffffff) 8857 return 0; 8858 // there can be 1's on either or both "outsides", all the "inside" 8859 // bits must be 0's 8860 unsigned int lsb = 0, msb = 31; 8861 while (v & (1 << msb)) --msb; 8862 while (v & (1 << lsb)) ++lsb; 8863 for (unsigned int i = lsb; i <= msb; ++i) { 8864 if (v & (1 << i)) 8865 return 0; 8866 } 8867 return 1; 8868} 8869 8870/// isFPImmLegal - Returns true if the target can instruction select the 8871/// specified FP immediate natively. If false, the legalizer will 8872/// materialize the FP immediate as a load from a constant pool. 8873bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 8874 if (!Subtarget->hasVFP3()) 8875 return false; 8876 if (VT == MVT::f32) 8877 return ARM_AM::getFP32Imm(Imm) != -1; 8878 if (VT == MVT::f64) 8879 return ARM_AM::getFP64Imm(Imm) != -1; 8880 return false; 8881} 8882 8883/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 8884/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 8885/// specified in the intrinsic calls. 8886bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 8887 const CallInst &I, 8888 unsigned Intrinsic) const { 8889 switch (Intrinsic) { 8890 case Intrinsic::arm_neon_vld1: 8891 case Intrinsic::arm_neon_vld2: 8892 case Intrinsic::arm_neon_vld3: 8893 case Intrinsic::arm_neon_vld4: 8894 case Intrinsic::arm_neon_vld2lane: 8895 case Intrinsic::arm_neon_vld3lane: 8896 case Intrinsic::arm_neon_vld4lane: { 8897 Info.opc = ISD::INTRINSIC_W_CHAIN; 8898 // Conservatively set memVT to the entire set of vectors loaded. 8899 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 8900 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8901 Info.ptrVal = I.getArgOperand(0); 8902 Info.offset = 0; 8903 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8904 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8905 Info.vol = false; // volatile loads with NEON intrinsics not supported 8906 Info.readMem = true; 8907 Info.writeMem = false; 8908 return true; 8909 } 8910 case Intrinsic::arm_neon_vst1: 8911 case Intrinsic::arm_neon_vst2: 8912 case Intrinsic::arm_neon_vst3: 8913 case Intrinsic::arm_neon_vst4: 8914 case Intrinsic::arm_neon_vst2lane: 8915 case Intrinsic::arm_neon_vst3lane: 8916 case Intrinsic::arm_neon_vst4lane: { 8917 Info.opc = ISD::INTRINSIC_VOID; 8918 // Conservatively set memVT to the entire set of vectors stored. 8919 unsigned NumElts = 0; 8920 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 8921 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 8922 if (!ArgTy->isVectorTy()) 8923 break; 8924 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 8925 } 8926 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 8927 Info.ptrVal = I.getArgOperand(0); 8928 Info.offset = 0; 8929 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 8930 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 8931 Info.vol = false; // volatile stores with NEON intrinsics not supported 8932 Info.readMem = false; 8933 Info.writeMem = true; 8934 return true; 8935 } 8936 case Intrinsic::arm_strexd: { 8937 Info.opc = ISD::INTRINSIC_W_CHAIN; 8938 Info.memVT = MVT::i64; 8939 Info.ptrVal = I.getArgOperand(2); 8940 Info.offset = 0; 8941 Info.align = 8; 8942 Info.vol = true; 8943 Info.readMem = false; 8944 Info.writeMem = true; 8945 return true; 8946 } 8947 case Intrinsic::arm_ldrexd: { 8948 Info.opc = ISD::INTRINSIC_W_CHAIN; 8949 Info.memVT = MVT::i64; 8950 Info.ptrVal = I.getArgOperand(0); 8951 Info.offset = 0; 8952 Info.align = 8; 8953 Info.vol = true; 8954 Info.readMem = true; 8955 Info.writeMem = false; 8956 return true; 8957 } 8958 default: 8959 break; 8960 } 8961 8962 return false; 8963} 8964