ARMISelLowering.cpp revision f74a4298163a7d0b500c7f7a818829c153dc942e
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/MachineBasicBlock.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineInstrBuilder.h" 40#include "llvm/CodeGen/MachineRegisterInfo.h" 41#include "llvm/CodeGen/PseudoSourceValue.h" 42#include "llvm/CodeGen/SelectionDAG.h" 43#include "llvm/MC/MCSectionMachO.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/ADT/VectorExtras.h" 46#include "llvm/ADT/Statistic.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/raw_ostream.h" 51#include <sstream> 52using namespace llvm; 53 54STATISTIC(NumTailCalls, "Number of tail calls"); 55 56// This option should go away when tail calls fully work. 57static cl::opt<bool> 58EnableARMTailCalls("arm-tail-calls", cl::Hidden, 59 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 60 cl::init(false)); 61 62static cl::opt<bool> 63EnableARMLongCalls("arm-long-calls", cl::Hidden, 64 cl::desc("Generate calls via indirect call instructions"), 65 cl::init(false)); 66 67static cl::opt<bool> 68ARMInterworking("arm-interworking", cl::Hidden, 69 cl::desc("Enable / disable ARM interworking (for debugging only)"), 70 cl::init(true)); 71 72void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 73 EVT PromotedBitwiseVT) { 74 if (VT != PromotedLdStVT) { 75 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 76 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 77 PromotedLdStVT.getSimpleVT()); 78 79 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 80 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 81 PromotedLdStVT.getSimpleVT()); 82 } 83 84 EVT ElemTy = VT.getVectorElementType(); 85 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 86 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 87 if (ElemTy == MVT::i8 || ElemTy == MVT::i16) 88 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 89 if (ElemTy != MVT::i32) { 90 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 91 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 92 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 93 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 94 } 95 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 96 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 97 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 98 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 100 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 101 if (VT.isInteger()) { 102 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 104 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 105 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 106 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 107 } 108 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 109 110 // Promote all bit-wise operations. 111 if (VT.isInteger() && VT != PromotedBitwiseVT) { 112 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 113 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 114 PromotedBitwiseVT.getSimpleVT()); 115 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 116 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 117 PromotedBitwiseVT.getSimpleVT()); 118 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 119 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 120 PromotedBitwiseVT.getSimpleVT()); 121 } 122 123 // Neon does not support vector divide/remainder operations. 124 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 125 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 126 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 127 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 128 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 129 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 130} 131 132void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 133 addRegisterClass(VT, ARM::DPRRegisterClass); 134 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 135} 136 137void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 138 addRegisterClass(VT, ARM::QPRRegisterClass); 139 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 140} 141 142static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 143 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 144 return new TargetLoweringObjectFileMachO(); 145 146 return new ARMElfTargetObjectFile(); 147} 148 149ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 150 : TargetLowering(TM, createTLOF(TM)) { 151 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 152 RegInfo = TM.getRegisterInfo(); 153 Itins = TM.getInstrItineraryData(); 154 155 if (Subtarget->isTargetDarwin()) { 156 // Uses VFP for Thumb libfuncs if available. 157 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 158 // Single-precision floating-point arithmetic. 159 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 160 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 161 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 162 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 163 164 // Double-precision floating-point arithmetic. 165 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 166 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 167 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 168 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 169 170 // Single-precision comparisons. 171 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 172 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 173 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 174 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 175 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 176 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 177 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 178 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 179 180 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 181 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 182 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 183 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 184 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 185 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 186 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 187 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 188 189 // Double-precision comparisons. 190 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 191 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 192 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 193 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 194 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 195 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 196 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 197 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 198 199 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 200 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 201 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 202 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 207 208 // Floating-point to integer conversions. 209 // i64 conversions are done via library routines even when generating VFP 210 // instructions, so use the same ones. 211 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 212 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 213 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 214 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 215 216 // Conversions between floating types. 217 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 218 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 219 220 // Integer to floating-point conversions. 221 // i64 conversions are done via library routines even when generating VFP 222 // instructions, so use the same ones. 223 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 224 // e.g., __floatunsidf vs. __floatunssidfvfp. 225 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 226 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 227 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 228 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 229 } 230 } 231 232 // These libcalls are not available in 32-bit. 233 setLibcallName(RTLIB::SHL_I128, 0); 234 setLibcallName(RTLIB::SRL_I128, 0); 235 setLibcallName(RTLIB::SRA_I128, 0); 236 237 if (Subtarget->isAAPCS_ABI()) { 238 // Double-precision floating-point arithmetic helper functions 239 // RTABI chapter 4.1.2, Table 2 240 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 241 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 242 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 243 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 244 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 245 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 246 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 247 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 248 249 // Double-precision floating-point comparison helper functions 250 // RTABI chapter 4.1.2, Table 3 251 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 252 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 253 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 254 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 255 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 256 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 257 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 258 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 259 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 260 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 261 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 262 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 263 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 264 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 265 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 266 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 267 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 268 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 269 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 270 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 271 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 272 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 273 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 275 276 // Single-precision floating-point arithmetic helper functions 277 // RTABI chapter 4.1.2, Table 4 278 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 279 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 280 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 281 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 282 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 283 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 284 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 285 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 286 287 // Single-precision floating-point comparison helper functions 288 // RTABI chapter 4.1.2, Table 5 289 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 290 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 291 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 292 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 293 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 294 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 295 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 296 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 297 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 298 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 299 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 300 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 301 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 302 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 303 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 304 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 305 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 306 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 307 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 308 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 309 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 310 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 311 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 313 314 // Floating-point to integer conversions. 315 // RTABI chapter 4.1.2, Table 6 316 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 317 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 318 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 319 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 320 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 321 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 322 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 323 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 324 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 325 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 326 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 327 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 332 333 // Conversions between floating types. 334 // RTABI chapter 4.1.2, Table 7 335 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 336 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 337 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 339 340 // Integer to floating-point conversions. 341 // RTABI chapter 4.1.2, Table 8 342 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 343 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 344 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 345 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 346 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 347 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 348 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 349 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 350 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 351 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 352 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 353 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 358 359 // Long long helper functions 360 // RTABI chapter 4.2, Table 9 361 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 362 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 363 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 364 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 365 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 366 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 367 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 368 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 369 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 370 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 371 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 372 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 373 374 // Integer division functions 375 // RTABI chapter 4.3.1 376 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 377 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 378 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 379 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 380 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 381 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 382 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 384 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 385 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 387 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 388 } 389 390 if (Subtarget->isThumb1Only()) 391 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 392 else 393 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 394 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 395 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 396 if (!Subtarget->isFPOnlySP()) 397 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 398 399 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 400 } 401 402 if (Subtarget->hasNEON()) { 403 addDRTypeForNEON(MVT::v2f32); 404 addDRTypeForNEON(MVT::v8i8); 405 addDRTypeForNEON(MVT::v4i16); 406 addDRTypeForNEON(MVT::v2i32); 407 addDRTypeForNEON(MVT::v1i64); 408 409 addQRTypeForNEON(MVT::v4f32); 410 addQRTypeForNEON(MVT::v2f64); 411 addQRTypeForNEON(MVT::v16i8); 412 addQRTypeForNEON(MVT::v8i16); 413 addQRTypeForNEON(MVT::v4i32); 414 addQRTypeForNEON(MVT::v2i64); 415 416 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 417 // neither Neon nor VFP support any arithmetic operations on it. 418 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 419 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 420 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 421 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 422 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 423 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 424 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 425 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 426 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 427 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 428 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 429 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 430 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 431 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 432 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 433 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 434 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 435 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 436 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 437 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 438 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 439 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 440 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 441 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 442 443 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 444 445 // Neon does not support some operations on v1i64 and v2i64 types. 446 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 447 // Custom handling for some quad-vector types to detect VMULL. 448 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 449 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 450 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 451 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 452 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 453 454 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 455 setTargetDAGCombine(ISD::SHL); 456 setTargetDAGCombine(ISD::SRL); 457 setTargetDAGCombine(ISD::SRA); 458 setTargetDAGCombine(ISD::SIGN_EXTEND); 459 setTargetDAGCombine(ISD::ZERO_EXTEND); 460 setTargetDAGCombine(ISD::ANY_EXTEND); 461 setTargetDAGCombine(ISD::SELECT_CC); 462 setTargetDAGCombine(ISD::BUILD_VECTOR); 463 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 464 } 465 466 computeRegisterProperties(); 467 468 // ARM does not have f32 extending load. 469 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 470 471 // ARM does not have i1 sign extending load. 472 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 473 474 // ARM supports all 4 flavors of integer indexed load / store. 475 if (!Subtarget->isThumb1Only()) { 476 for (unsigned im = (unsigned)ISD::PRE_INC; 477 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 478 setIndexedLoadAction(im, MVT::i1, Legal); 479 setIndexedLoadAction(im, MVT::i8, Legal); 480 setIndexedLoadAction(im, MVT::i16, Legal); 481 setIndexedLoadAction(im, MVT::i32, Legal); 482 setIndexedStoreAction(im, MVT::i1, Legal); 483 setIndexedStoreAction(im, MVT::i8, Legal); 484 setIndexedStoreAction(im, MVT::i16, Legal); 485 setIndexedStoreAction(im, MVT::i32, Legal); 486 } 487 } 488 489 // i64 operation support. 490 if (Subtarget->isThumb1Only()) { 491 setOperationAction(ISD::MUL, MVT::i64, Expand); 492 setOperationAction(ISD::MULHU, MVT::i32, Expand); 493 setOperationAction(ISD::MULHS, MVT::i32, Expand); 494 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 495 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 496 } else { 497 setOperationAction(ISD::MUL, MVT::i64, Expand); 498 setOperationAction(ISD::MULHU, MVT::i32, Expand); 499 if (!Subtarget->hasV6Ops()) 500 setOperationAction(ISD::MULHS, MVT::i32, Expand); 501 } 502 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 503 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 504 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 505 setOperationAction(ISD::SRL, MVT::i64, Custom); 506 setOperationAction(ISD::SRA, MVT::i64, Custom); 507 508 // ARM does not have ROTL. 509 setOperationAction(ISD::ROTL, MVT::i32, Expand); 510 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 511 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 512 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 513 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 514 515 // Only ARMv6 has BSWAP. 516 if (!Subtarget->hasV6Ops()) 517 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 518 519 // These are expanded into libcalls. 520 if (!Subtarget->hasDivide()) { 521 // v7M has a hardware divider 522 setOperationAction(ISD::SDIV, MVT::i32, Expand); 523 setOperationAction(ISD::UDIV, MVT::i32, Expand); 524 } 525 setOperationAction(ISD::SREM, MVT::i32, Expand); 526 setOperationAction(ISD::UREM, MVT::i32, Expand); 527 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 528 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 529 530 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 531 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 532 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 533 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 534 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 535 536 setOperationAction(ISD::TRAP, MVT::Other, Legal); 537 538 // Use the default implementation. 539 setOperationAction(ISD::VASTART, MVT::Other, Custom); 540 setOperationAction(ISD::VAARG, MVT::Other, Expand); 541 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 542 setOperationAction(ISD::VAEND, MVT::Other, Expand); 543 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 544 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 545 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 546 // FIXME: Shouldn't need this, since no register is used, but the legalizer 547 // doesn't yet know how to not do that for SjLj. 548 setExceptionSelectorRegister(ARM::R0); 549 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 550 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 551 // the default expansion. 552 if (Subtarget->hasDataBarrier() || 553 (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())) { 554 // membarrier needs custom lowering; the rest are legal and handled 555 // normally. 556 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 557 } else { 558 // Set them all for expansion, which will force libcalls. 559 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 560 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 561 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 562 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 563 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 564 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 565 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 566 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 567 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 568 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 569 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 570 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 571 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 572 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 573 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 574 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 575 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 576 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 577 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 578 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 579 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 580 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 581 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 582 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 583 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 584 // Since the libcalls include locking, fold in the fences 585 setShouldFoldAtomicFences(true); 586 } 587 // 64-bit versions are always libcalls (for now) 588 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 589 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 590 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 591 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 592 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 593 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 594 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 595 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 596 597 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 598 if (!Subtarget->hasV6Ops()) { 599 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 600 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 601 } 602 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 603 604 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 605 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 606 // iff target supports vfp2. 607 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom); 608 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 609 } 610 611 // We want to custom lower some of our intrinsics. 612 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 613 if (Subtarget->isTargetDarwin()) { 614 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 615 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 616 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 617 } 618 619 setOperationAction(ISD::SETCC, MVT::i32, Expand); 620 setOperationAction(ISD::SETCC, MVT::f32, Expand); 621 setOperationAction(ISD::SETCC, MVT::f64, Expand); 622 setOperationAction(ISD::SELECT, MVT::i32, Custom); 623 setOperationAction(ISD::SELECT, MVT::f32, Custom); 624 setOperationAction(ISD::SELECT, MVT::f64, Custom); 625 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 626 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 627 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 628 629 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 630 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 631 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 632 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 633 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 634 635 // We don't support sin/cos/fmod/copysign/pow 636 setOperationAction(ISD::FSIN, MVT::f64, Expand); 637 setOperationAction(ISD::FSIN, MVT::f32, Expand); 638 setOperationAction(ISD::FCOS, MVT::f32, Expand); 639 setOperationAction(ISD::FCOS, MVT::f64, Expand); 640 setOperationAction(ISD::FREM, MVT::f64, Expand); 641 setOperationAction(ISD::FREM, MVT::f32, Expand); 642 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 643 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 644 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 645 } 646 setOperationAction(ISD::FPOW, MVT::f64, Expand); 647 setOperationAction(ISD::FPOW, MVT::f32, Expand); 648 649 // Various VFP goodness 650 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 651 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 652 if (Subtarget->hasVFP2()) { 653 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 654 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 655 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 656 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 657 } 658 // Special handling for half-precision FP. 659 if (!Subtarget->hasFP16()) { 660 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 661 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 662 } 663 } 664 665 // We have target-specific dag combine patterns for the following nodes: 666 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 667 setTargetDAGCombine(ISD::ADD); 668 setTargetDAGCombine(ISD::SUB); 669 setTargetDAGCombine(ISD::MUL); 670 671 if (Subtarget->hasV6T2Ops()) 672 setTargetDAGCombine(ISD::OR); 673 674 setStackPointerRegisterToSaveRestore(ARM::SP); 675 676 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 677 setSchedulingPreference(Sched::RegPressure); 678 else 679 setSchedulingPreference(Sched::Hybrid); 680 681 maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type 682 683 // On ARM arguments smaller than 4 bytes are extended, so all arguments 684 // are at least 4 bytes aligned. 685 setMinStackArgumentAlignment(4); 686 687 benefitFromCodePlacementOpt = true; 688} 689 690std::pair<const TargetRegisterClass*, uint8_t> 691ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 692 const TargetRegisterClass *RRC = 0; 693 uint8_t Cost = 1; 694 switch (VT.getSimpleVT().SimpleTy) { 695 default: 696 return TargetLowering::findRepresentativeClass(VT); 697 // Use DPR as representative register class for all floating point 698 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 699 // the cost is 1 for both f32 and f64. 700 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 701 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 702 RRC = ARM::DPRRegisterClass; 703 break; 704 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 705 case MVT::v4f32: case MVT::v2f64: 706 RRC = ARM::DPRRegisterClass; 707 Cost = 2; 708 break; 709 case MVT::v4i64: 710 RRC = ARM::DPRRegisterClass; 711 Cost = 4; 712 break; 713 case MVT::v8i64: 714 RRC = ARM::DPRRegisterClass; 715 Cost = 8; 716 break; 717 } 718 return std::make_pair(RRC, Cost); 719} 720 721const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 722 switch (Opcode) { 723 default: return 0; 724 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 725 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 726 case ARMISD::CALL: return "ARMISD::CALL"; 727 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 728 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 729 case ARMISD::tCALL: return "ARMISD::tCALL"; 730 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 731 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 732 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 733 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 734 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 735 case ARMISD::CMP: return "ARMISD::CMP"; 736 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 737 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 738 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 739 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 740 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 741 case ARMISD::CMOV: return "ARMISD::CMOV"; 742 case ARMISD::CNEG: return "ARMISD::CNEG"; 743 744 case ARMISD::RBIT: return "ARMISD::RBIT"; 745 746 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 747 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 748 case ARMISD::SITOF: return "ARMISD::SITOF"; 749 case ARMISD::UITOF: return "ARMISD::UITOF"; 750 751 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 752 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 753 case ARMISD::RRX: return "ARMISD::RRX"; 754 755 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 756 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 757 758 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 759 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 760 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 761 762 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 763 764 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 765 766 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 767 768 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 769 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 770 771 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 772 case ARMISD::VCGE: return "ARMISD::VCGE"; 773 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 774 case ARMISD::VCGT: return "ARMISD::VCGT"; 775 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 776 case ARMISD::VTST: return "ARMISD::VTST"; 777 778 case ARMISD::VSHL: return "ARMISD::VSHL"; 779 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 780 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 781 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 782 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 783 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 784 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 785 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 786 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 787 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 788 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 789 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 790 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 791 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 792 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 793 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 794 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 795 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 796 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 797 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 798 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 799 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 800 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 801 case ARMISD::VDUP: return "ARMISD::VDUP"; 802 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 803 case ARMISD::VEXT: return "ARMISD::VEXT"; 804 case ARMISD::VREV64: return "ARMISD::VREV64"; 805 case ARMISD::VREV32: return "ARMISD::VREV32"; 806 case ARMISD::VREV16: return "ARMISD::VREV16"; 807 case ARMISD::VZIP: return "ARMISD::VZIP"; 808 case ARMISD::VUZP: return "ARMISD::VUZP"; 809 case ARMISD::VTRN: return "ARMISD::VTRN"; 810 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 811 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 812 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 813 case ARMISD::FMAX: return "ARMISD::FMAX"; 814 case ARMISD::FMIN: return "ARMISD::FMIN"; 815 case ARMISD::BFI: return "ARMISD::BFI"; 816 } 817} 818 819/// getRegClassFor - Return the register class that should be used for the 820/// specified value type. 821TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 822 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 823 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 824 // load / store 4 to 8 consecutive D registers. 825 if (Subtarget->hasNEON()) { 826 if (VT == MVT::v4i64) 827 return ARM::QQPRRegisterClass; 828 else if (VT == MVT::v8i64) 829 return ARM::QQQQPRRegisterClass; 830 } 831 return TargetLowering::getRegClassFor(VT); 832} 833 834// Create a fast isel object. 835FastISel * 836ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 837 return ARM::createFastISel(funcInfo); 838} 839 840/// getFunctionAlignment - Return the Log2 alignment of this function. 841unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 842 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 843} 844 845/// getMaximalGlobalOffset - Returns the maximal possible offset which can 846/// be used for loads / stores from the global. 847unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 848 return (Subtarget->isThumb1Only() ? 127 : 4095); 849} 850 851Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 852 unsigned NumVals = N->getNumValues(); 853 if (!NumVals) 854 return Sched::RegPressure; 855 856 for (unsigned i = 0; i != NumVals; ++i) { 857 EVT VT = N->getValueType(i); 858 if (VT == MVT::Flag || VT == MVT::Other) 859 continue; 860 if (VT.isFloatingPoint() || VT.isVector()) 861 return Sched::Latency; 862 } 863 864 if (!N->isMachineOpcode()) 865 return Sched::RegPressure; 866 867 // Load are scheduled for latency even if there instruction itinerary 868 // is not available. 869 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 870 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 871 872 if (TID.getNumDefs() == 0) 873 return Sched::RegPressure; 874 if (!Itins->isEmpty() && 875 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 876 return Sched::Latency; 877 878 return Sched::RegPressure; 879} 880 881unsigned 882ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 883 MachineFunction &MF) const { 884 switch (RC->getID()) { 885 default: 886 return 0; 887 case ARM::tGPRRegClassID: 888 return RegInfo->hasFP(MF) ? 4 : 5; 889 case ARM::GPRRegClassID: { 890 unsigned FP = RegInfo->hasFP(MF) ? 1 : 0; 891 return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0); 892 } 893 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 894 case ARM::DPRRegClassID: 895 return 32 - 10; 896 } 897} 898 899//===----------------------------------------------------------------------===// 900// Lowering Code 901//===----------------------------------------------------------------------===// 902 903/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 904static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 905 switch (CC) { 906 default: llvm_unreachable("Unknown condition code!"); 907 case ISD::SETNE: return ARMCC::NE; 908 case ISD::SETEQ: return ARMCC::EQ; 909 case ISD::SETGT: return ARMCC::GT; 910 case ISD::SETGE: return ARMCC::GE; 911 case ISD::SETLT: return ARMCC::LT; 912 case ISD::SETLE: return ARMCC::LE; 913 case ISD::SETUGT: return ARMCC::HI; 914 case ISD::SETUGE: return ARMCC::HS; 915 case ISD::SETULT: return ARMCC::LO; 916 case ISD::SETULE: return ARMCC::LS; 917 } 918} 919 920/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 921static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 922 ARMCC::CondCodes &CondCode2) { 923 CondCode2 = ARMCC::AL; 924 switch (CC) { 925 default: llvm_unreachable("Unknown FP condition!"); 926 case ISD::SETEQ: 927 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 928 case ISD::SETGT: 929 case ISD::SETOGT: CondCode = ARMCC::GT; break; 930 case ISD::SETGE: 931 case ISD::SETOGE: CondCode = ARMCC::GE; break; 932 case ISD::SETOLT: CondCode = ARMCC::MI; break; 933 case ISD::SETOLE: CondCode = ARMCC::LS; break; 934 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 935 case ISD::SETO: CondCode = ARMCC::VC; break; 936 case ISD::SETUO: CondCode = ARMCC::VS; break; 937 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 938 case ISD::SETUGT: CondCode = ARMCC::HI; break; 939 case ISD::SETUGE: CondCode = ARMCC::PL; break; 940 case ISD::SETLT: 941 case ISD::SETULT: CondCode = ARMCC::LT; break; 942 case ISD::SETLE: 943 case ISD::SETULE: CondCode = ARMCC::LE; break; 944 case ISD::SETNE: 945 case ISD::SETUNE: CondCode = ARMCC::NE; break; 946 } 947} 948 949//===----------------------------------------------------------------------===// 950// Calling Convention Implementation 951//===----------------------------------------------------------------------===// 952 953#include "ARMGenCallingConv.inc" 954 955/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 956/// given CallingConvention value. 957CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 958 bool Return, 959 bool isVarArg) const { 960 switch (CC) { 961 default: 962 llvm_unreachable("Unsupported calling convention"); 963 case CallingConv::Fast: 964 if (Subtarget->hasVFP2() && !isVarArg) { 965 if (!Subtarget->isAAPCS_ABI()) 966 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 967 // For AAPCS ABI targets, just use VFP variant of the calling convention. 968 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 969 } 970 // Fallthrough 971 case CallingConv::C: { 972 // Use target triple & subtarget features to do actual dispatch. 973 if (!Subtarget->isAAPCS_ABI()) 974 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 975 else if (Subtarget->hasVFP2() && 976 FloatABIType == FloatABI::Hard && !isVarArg) 977 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 978 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 979 } 980 case CallingConv::ARM_AAPCS_VFP: 981 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 982 case CallingConv::ARM_AAPCS: 983 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 984 case CallingConv::ARM_APCS: 985 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 986 } 987} 988 989/// LowerCallResult - Lower the result values of a call into the 990/// appropriate copies out of appropriate physical registers. 991SDValue 992ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 993 CallingConv::ID CallConv, bool isVarArg, 994 const SmallVectorImpl<ISD::InputArg> &Ins, 995 DebugLoc dl, SelectionDAG &DAG, 996 SmallVectorImpl<SDValue> &InVals) const { 997 998 // Assign locations to each value returned by this call. 999 SmallVector<CCValAssign, 16> RVLocs; 1000 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1001 RVLocs, *DAG.getContext()); 1002 CCInfo.AnalyzeCallResult(Ins, 1003 CCAssignFnForNode(CallConv, /* Return*/ true, 1004 isVarArg)); 1005 1006 // Copy all of the result registers out of their specified physreg. 1007 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1008 CCValAssign VA = RVLocs[i]; 1009 1010 SDValue Val; 1011 if (VA.needsCustom()) { 1012 // Handle f64 or half of a v2f64. 1013 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1014 InFlag); 1015 Chain = Lo.getValue(1); 1016 InFlag = Lo.getValue(2); 1017 VA = RVLocs[++i]; // skip ahead to next loc 1018 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1019 InFlag); 1020 Chain = Hi.getValue(1); 1021 InFlag = Hi.getValue(2); 1022 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1023 1024 if (VA.getLocVT() == MVT::v2f64) { 1025 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1026 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1027 DAG.getConstant(0, MVT::i32)); 1028 1029 VA = RVLocs[++i]; // skip ahead to next loc 1030 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1031 Chain = Lo.getValue(1); 1032 InFlag = Lo.getValue(2); 1033 VA = RVLocs[++i]; // skip ahead to next loc 1034 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1035 Chain = Hi.getValue(1); 1036 InFlag = Hi.getValue(2); 1037 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1038 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1039 DAG.getConstant(1, MVT::i32)); 1040 } 1041 } else { 1042 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1043 InFlag); 1044 Chain = Val.getValue(1); 1045 InFlag = Val.getValue(2); 1046 } 1047 1048 switch (VA.getLocInfo()) { 1049 default: llvm_unreachable("Unknown loc info!"); 1050 case CCValAssign::Full: break; 1051 case CCValAssign::BCvt: 1052 Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val); 1053 break; 1054 } 1055 1056 InVals.push_back(Val); 1057 } 1058 1059 return Chain; 1060} 1061 1062/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1063/// by "Src" to address "Dst" of size "Size". Alignment information is 1064/// specified by the specific parameter attribute. The copy will be passed as 1065/// a byval function parameter. 1066/// Sometimes what we are copying is the end of a larger object, the part that 1067/// does not fit in registers. 1068static SDValue 1069CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1070 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1071 DebugLoc dl) { 1072 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1073 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1074 /*isVolatile=*/false, /*AlwaysInline=*/false, 1075 MachinePointerInfo(0), MachinePointerInfo(0)); 1076} 1077 1078/// LowerMemOpCallTo - Store the argument to the stack. 1079SDValue 1080ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1081 SDValue StackPtr, SDValue Arg, 1082 DebugLoc dl, SelectionDAG &DAG, 1083 const CCValAssign &VA, 1084 ISD::ArgFlagsTy Flags) const { 1085 unsigned LocMemOffset = VA.getLocMemOffset(); 1086 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1087 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1088 if (Flags.isByVal()) 1089 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1090 1091 return DAG.getStore(Chain, dl, Arg, PtrOff, 1092 MachinePointerInfo::getStack(LocMemOffset), 1093 false, false, 0); 1094} 1095 1096void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1097 SDValue Chain, SDValue &Arg, 1098 RegsToPassVector &RegsToPass, 1099 CCValAssign &VA, CCValAssign &NextVA, 1100 SDValue &StackPtr, 1101 SmallVector<SDValue, 8> &MemOpChains, 1102 ISD::ArgFlagsTy Flags) const { 1103 1104 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1105 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1106 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1107 1108 if (NextVA.isRegLoc()) 1109 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1110 else { 1111 assert(NextVA.isMemLoc()); 1112 if (StackPtr.getNode() == 0) 1113 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1114 1115 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1116 dl, DAG, NextVA, 1117 Flags)); 1118 } 1119} 1120 1121/// LowerCall - Lowering a call into a callseq_start <- 1122/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1123/// nodes. 1124SDValue 1125ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1126 CallingConv::ID CallConv, bool isVarArg, 1127 bool &isTailCall, 1128 const SmallVectorImpl<ISD::OutputArg> &Outs, 1129 const SmallVectorImpl<SDValue> &OutVals, 1130 const SmallVectorImpl<ISD::InputArg> &Ins, 1131 DebugLoc dl, SelectionDAG &DAG, 1132 SmallVectorImpl<SDValue> &InVals) const { 1133 MachineFunction &MF = DAG.getMachineFunction(); 1134 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1135 bool IsSibCall = false; 1136 // Temporarily disable tail calls so things don't break. 1137 if (!EnableARMTailCalls) 1138 isTailCall = false; 1139 if (isTailCall) { 1140 // Check if it's really possible to do a tail call. 1141 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1142 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1143 Outs, OutVals, Ins, DAG); 1144 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1145 // detected sibcalls. 1146 if (isTailCall) { 1147 ++NumTailCalls; 1148 IsSibCall = true; 1149 } 1150 } 1151 1152 // Analyze operands of the call, assigning locations to each operand. 1153 SmallVector<CCValAssign, 16> ArgLocs; 1154 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1155 *DAG.getContext()); 1156 CCInfo.AnalyzeCallOperands(Outs, 1157 CCAssignFnForNode(CallConv, /* Return*/ false, 1158 isVarArg)); 1159 1160 // Get a count of how many bytes are to be pushed on the stack. 1161 unsigned NumBytes = CCInfo.getNextStackOffset(); 1162 1163 // For tail calls, memory operands are available in our caller's stack. 1164 if (IsSibCall) 1165 NumBytes = 0; 1166 1167 // Adjust the stack pointer for the new arguments... 1168 // These operations are automatically eliminated by the prolog/epilog pass 1169 if (!IsSibCall) 1170 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1171 1172 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1173 1174 RegsToPassVector RegsToPass; 1175 SmallVector<SDValue, 8> MemOpChains; 1176 1177 // Walk the register/memloc assignments, inserting copies/loads. In the case 1178 // of tail call optimization, arguments are handled later. 1179 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1180 i != e; 1181 ++i, ++realArgIdx) { 1182 CCValAssign &VA = ArgLocs[i]; 1183 SDValue Arg = OutVals[realArgIdx]; 1184 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1185 1186 // Promote the value if needed. 1187 switch (VA.getLocInfo()) { 1188 default: llvm_unreachable("Unknown loc info!"); 1189 case CCValAssign::Full: break; 1190 case CCValAssign::SExt: 1191 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1192 break; 1193 case CCValAssign::ZExt: 1194 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1195 break; 1196 case CCValAssign::AExt: 1197 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1198 break; 1199 case CCValAssign::BCvt: 1200 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1201 break; 1202 } 1203 1204 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1205 if (VA.needsCustom()) { 1206 if (VA.getLocVT() == MVT::v2f64) { 1207 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1208 DAG.getConstant(0, MVT::i32)); 1209 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1210 DAG.getConstant(1, MVT::i32)); 1211 1212 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1213 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1214 1215 VA = ArgLocs[++i]; // skip ahead to next loc 1216 if (VA.isRegLoc()) { 1217 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1218 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1219 } else { 1220 assert(VA.isMemLoc()); 1221 1222 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1223 dl, DAG, VA, Flags)); 1224 } 1225 } else { 1226 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1227 StackPtr, MemOpChains, Flags); 1228 } 1229 } else if (VA.isRegLoc()) { 1230 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1231 } else if (!IsSibCall) { 1232 assert(VA.isMemLoc()); 1233 1234 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1235 dl, DAG, VA, Flags)); 1236 } 1237 } 1238 1239 if (!MemOpChains.empty()) 1240 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1241 &MemOpChains[0], MemOpChains.size()); 1242 1243 // Build a sequence of copy-to-reg nodes chained together with token chain 1244 // and flag operands which copy the outgoing args into the appropriate regs. 1245 SDValue InFlag; 1246 // Tail call byval lowering might overwrite argument registers so in case of 1247 // tail call optimization the copies to registers are lowered later. 1248 if (!isTailCall) 1249 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1250 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1251 RegsToPass[i].second, InFlag); 1252 InFlag = Chain.getValue(1); 1253 } 1254 1255 // For tail calls lower the arguments to the 'real' stack slot. 1256 if (isTailCall) { 1257 // Force all the incoming stack arguments to be loaded from the stack 1258 // before any new outgoing arguments are stored to the stack, because the 1259 // outgoing stack slots may alias the incoming argument stack slots, and 1260 // the alias isn't otherwise explicit. This is slightly more conservative 1261 // than necessary, because it means that each store effectively depends 1262 // on every argument instead of just those arguments it would clobber. 1263 1264 // Do not flag preceeding copytoreg stuff together with the following stuff. 1265 InFlag = SDValue(); 1266 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1267 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1268 RegsToPass[i].second, InFlag); 1269 InFlag = Chain.getValue(1); 1270 } 1271 InFlag =SDValue(); 1272 } 1273 1274 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1275 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1276 // node so that legalize doesn't hack it. 1277 bool isDirect = false; 1278 bool isARMFunc = false; 1279 bool isLocalARMFunc = false; 1280 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1281 1282 if (EnableARMLongCalls) { 1283 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1284 && "long-calls with non-static relocation model!"); 1285 // Handle a global address or an external symbol. If it's not one of 1286 // those, the target's already in a register, so we don't need to do 1287 // anything extra. 1288 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1289 const GlobalValue *GV = G->getGlobal(); 1290 // Create a constant pool entry for the callee address 1291 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1292 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1293 ARMPCLabelIndex, 1294 ARMCP::CPValue, 0); 1295 // Get the address of the callee into a register 1296 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1297 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1298 Callee = DAG.getLoad(getPointerTy(), dl, 1299 DAG.getEntryNode(), CPAddr, 1300 MachinePointerInfo::getConstantPool(), 1301 false, false, 0); 1302 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1303 const char *Sym = S->getSymbol(); 1304 1305 // Create a constant pool entry for the callee address 1306 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1307 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1308 Sym, ARMPCLabelIndex, 0); 1309 // Get the address of the callee into a register 1310 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1311 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1312 Callee = DAG.getLoad(getPointerTy(), dl, 1313 DAG.getEntryNode(), CPAddr, 1314 MachinePointerInfo::getConstantPool(), 1315 false, false, 0); 1316 } 1317 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1318 const GlobalValue *GV = G->getGlobal(); 1319 isDirect = true; 1320 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1321 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1322 getTargetMachine().getRelocationModel() != Reloc::Static; 1323 isARMFunc = !Subtarget->isThumb() || isStub; 1324 // ARM call to a local ARM function is predicable. 1325 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1326 // tBX takes a register source operand. 1327 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1328 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1329 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1330 ARMPCLabelIndex, 1331 ARMCP::CPValue, 4); 1332 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1333 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1334 Callee = DAG.getLoad(getPointerTy(), dl, 1335 DAG.getEntryNode(), CPAddr, 1336 MachinePointerInfo::getConstantPool(), 1337 false, false, 0); 1338 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1339 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1340 getPointerTy(), Callee, PICLabel); 1341 } else { 1342 // On ELF targets for PIC code, direct calls should go through the PLT 1343 unsigned OpFlags = 0; 1344 if (Subtarget->isTargetELF() && 1345 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1346 OpFlags = ARMII::MO_PLT; 1347 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1348 } 1349 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1350 isDirect = true; 1351 bool isStub = Subtarget->isTargetDarwin() && 1352 getTargetMachine().getRelocationModel() != Reloc::Static; 1353 isARMFunc = !Subtarget->isThumb() || isStub; 1354 // tBX takes a register source operand. 1355 const char *Sym = S->getSymbol(); 1356 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1357 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1358 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1359 Sym, ARMPCLabelIndex, 4); 1360 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1361 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1362 Callee = DAG.getLoad(getPointerTy(), dl, 1363 DAG.getEntryNode(), CPAddr, 1364 MachinePointerInfo::getConstantPool(), 1365 false, false, 0); 1366 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1367 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1368 getPointerTy(), Callee, PICLabel); 1369 } else { 1370 unsigned OpFlags = 0; 1371 // On ELF targets for PIC code, direct calls should go through the PLT 1372 if (Subtarget->isTargetELF() && 1373 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1374 OpFlags = ARMII::MO_PLT; 1375 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1376 } 1377 } 1378 1379 // FIXME: handle tail calls differently. 1380 unsigned CallOpc; 1381 if (Subtarget->isThumb()) { 1382 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1383 CallOpc = ARMISD::CALL_NOLINK; 1384 else 1385 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1386 } else { 1387 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1388 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1389 : ARMISD::CALL_NOLINK; 1390 } 1391 1392 std::vector<SDValue> Ops; 1393 Ops.push_back(Chain); 1394 Ops.push_back(Callee); 1395 1396 // Add argument registers to the end of the list so that they are known live 1397 // into the call. 1398 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1399 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1400 RegsToPass[i].second.getValueType())); 1401 1402 if (InFlag.getNode()) 1403 Ops.push_back(InFlag); 1404 1405 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1406 if (isTailCall) 1407 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1408 1409 // Returns a chain and a flag for retval copy to use. 1410 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1411 InFlag = Chain.getValue(1); 1412 1413 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1414 DAG.getIntPtrConstant(0, true), InFlag); 1415 if (!Ins.empty()) 1416 InFlag = Chain.getValue(1); 1417 1418 // Handle result values, copying them out of physregs into vregs that we 1419 // return. 1420 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1421 dl, DAG, InVals); 1422} 1423 1424/// MatchingStackOffset - Return true if the given stack call argument is 1425/// already available in the same position (relatively) of the caller's 1426/// incoming argument stack. 1427static 1428bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1429 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1430 const ARMInstrInfo *TII) { 1431 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1432 int FI = INT_MAX; 1433 if (Arg.getOpcode() == ISD::CopyFromReg) { 1434 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1435 if (!VR || TargetRegisterInfo::isPhysicalRegister(VR)) 1436 return false; 1437 MachineInstr *Def = MRI->getVRegDef(VR); 1438 if (!Def) 1439 return false; 1440 if (!Flags.isByVal()) { 1441 if (!TII->isLoadFromStackSlot(Def, FI)) 1442 return false; 1443 } else { 1444 return false; 1445 } 1446 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1447 if (Flags.isByVal()) 1448 // ByVal argument is passed in as a pointer but it's now being 1449 // dereferenced. e.g. 1450 // define @foo(%struct.X* %A) { 1451 // tail call @bar(%struct.X* byval %A) 1452 // } 1453 return false; 1454 SDValue Ptr = Ld->getBasePtr(); 1455 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1456 if (!FINode) 1457 return false; 1458 FI = FINode->getIndex(); 1459 } else 1460 return false; 1461 1462 assert(FI != INT_MAX); 1463 if (!MFI->isFixedObjectIndex(FI)) 1464 return false; 1465 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1466} 1467 1468/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1469/// for tail call optimization. Targets which want to do tail call 1470/// optimization should implement this function. 1471bool 1472ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1473 CallingConv::ID CalleeCC, 1474 bool isVarArg, 1475 bool isCalleeStructRet, 1476 bool isCallerStructRet, 1477 const SmallVectorImpl<ISD::OutputArg> &Outs, 1478 const SmallVectorImpl<SDValue> &OutVals, 1479 const SmallVectorImpl<ISD::InputArg> &Ins, 1480 SelectionDAG& DAG) const { 1481 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1482 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1483 bool CCMatch = CallerCC == CalleeCC; 1484 1485 // Look for obvious safe cases to perform tail call optimization that do not 1486 // require ABI changes. This is what gcc calls sibcall. 1487 1488 // Do not sibcall optimize vararg calls unless the call site is not passing 1489 // any arguments. 1490 if (isVarArg && !Outs.empty()) 1491 return false; 1492 1493 // Also avoid sibcall optimization if either caller or callee uses struct 1494 // return semantics. 1495 if (isCalleeStructRet || isCallerStructRet) 1496 return false; 1497 1498 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1499 // emitEpilogue is not ready for them. 1500 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1501 // LR. This means if we need to reload LR, it takes an extra instructions, 1502 // which outweighs the value of the tail call; but here we don't know yet 1503 // whether LR is going to be used. Probably the right approach is to 1504 // generate the tail call here and turn it back into CALL/RET in 1505 // emitEpilogue if LR is used. 1506 if (Subtarget->isThumb1Only()) 1507 return false; 1508 1509 // For the moment, we can only do this to functions defined in this 1510 // compilation, or to indirect calls. A Thumb B to an ARM function, 1511 // or vice versa, is not easily fixed up in the linker unlike BL. 1512 // (We could do this by loading the address of the callee into a register; 1513 // that is an extra instruction over the direct call and burns a register 1514 // as well, so is not likely to be a win.) 1515 1516 // It might be safe to remove this restriction on non-Darwin. 1517 1518 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1519 // but we need to make sure there are enough registers; the only valid 1520 // registers are the 4 used for parameters. We don't currently do this 1521 // case. 1522 if (isa<ExternalSymbolSDNode>(Callee)) 1523 return false; 1524 1525 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1526 const GlobalValue *GV = G->getGlobal(); 1527 if (GV->isDeclaration() || GV->isWeakForLinker()) 1528 return false; 1529 } 1530 1531 // If the calling conventions do not match, then we'd better make sure the 1532 // results are returned in the same way as what the caller expects. 1533 if (!CCMatch) { 1534 SmallVector<CCValAssign, 16> RVLocs1; 1535 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1536 RVLocs1, *DAG.getContext()); 1537 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1538 1539 SmallVector<CCValAssign, 16> RVLocs2; 1540 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1541 RVLocs2, *DAG.getContext()); 1542 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1543 1544 if (RVLocs1.size() != RVLocs2.size()) 1545 return false; 1546 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1547 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1548 return false; 1549 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1550 return false; 1551 if (RVLocs1[i].isRegLoc()) { 1552 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1553 return false; 1554 } else { 1555 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1556 return false; 1557 } 1558 } 1559 } 1560 1561 // If the callee takes no arguments then go on to check the results of the 1562 // call. 1563 if (!Outs.empty()) { 1564 // Check if stack adjustment is needed. For now, do not do this if any 1565 // argument is passed on the stack. 1566 SmallVector<CCValAssign, 16> ArgLocs; 1567 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1568 ArgLocs, *DAG.getContext()); 1569 CCInfo.AnalyzeCallOperands(Outs, 1570 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1571 if (CCInfo.getNextStackOffset()) { 1572 MachineFunction &MF = DAG.getMachineFunction(); 1573 1574 // Check if the arguments are already laid out in the right way as 1575 // the caller's fixed stack objects. 1576 MachineFrameInfo *MFI = MF.getFrameInfo(); 1577 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1578 const ARMInstrInfo *TII = 1579 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1580 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1581 i != e; 1582 ++i, ++realArgIdx) { 1583 CCValAssign &VA = ArgLocs[i]; 1584 EVT RegVT = VA.getLocVT(); 1585 SDValue Arg = OutVals[realArgIdx]; 1586 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1587 if (VA.getLocInfo() == CCValAssign::Indirect) 1588 return false; 1589 if (VA.needsCustom()) { 1590 // f64 and vector types are split into multiple registers or 1591 // register/stack-slot combinations. The types will not match 1592 // the registers; give up on memory f64 refs until we figure 1593 // out what to do about this. 1594 if (!VA.isRegLoc()) 1595 return false; 1596 if (!ArgLocs[++i].isRegLoc()) 1597 return false; 1598 if (RegVT == MVT::v2f64) { 1599 if (!ArgLocs[++i].isRegLoc()) 1600 return false; 1601 if (!ArgLocs[++i].isRegLoc()) 1602 return false; 1603 } 1604 } else if (!VA.isRegLoc()) { 1605 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1606 MFI, MRI, TII)) 1607 return false; 1608 } 1609 } 1610 } 1611 } 1612 1613 return true; 1614} 1615 1616SDValue 1617ARMTargetLowering::LowerReturn(SDValue Chain, 1618 CallingConv::ID CallConv, bool isVarArg, 1619 const SmallVectorImpl<ISD::OutputArg> &Outs, 1620 const SmallVectorImpl<SDValue> &OutVals, 1621 DebugLoc dl, SelectionDAG &DAG) const { 1622 1623 // CCValAssign - represent the assignment of the return value to a location. 1624 SmallVector<CCValAssign, 16> RVLocs; 1625 1626 // CCState - Info about the registers and stack slots. 1627 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1628 *DAG.getContext()); 1629 1630 // Analyze outgoing return values. 1631 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1632 isVarArg)); 1633 1634 // If this is the first return lowered for this function, add 1635 // the regs to the liveout set for the function. 1636 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1637 for (unsigned i = 0; i != RVLocs.size(); ++i) 1638 if (RVLocs[i].isRegLoc()) 1639 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1640 } 1641 1642 SDValue Flag; 1643 1644 // Copy the result values into the output registers. 1645 for (unsigned i = 0, realRVLocIdx = 0; 1646 i != RVLocs.size(); 1647 ++i, ++realRVLocIdx) { 1648 CCValAssign &VA = RVLocs[i]; 1649 assert(VA.isRegLoc() && "Can only return in registers!"); 1650 1651 SDValue Arg = OutVals[realRVLocIdx]; 1652 1653 switch (VA.getLocInfo()) { 1654 default: llvm_unreachable("Unknown loc info!"); 1655 case CCValAssign::Full: break; 1656 case CCValAssign::BCvt: 1657 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1658 break; 1659 } 1660 1661 if (VA.needsCustom()) { 1662 if (VA.getLocVT() == MVT::v2f64) { 1663 // Extract the first half and return it in two registers. 1664 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1665 DAG.getConstant(0, MVT::i32)); 1666 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1667 DAG.getVTList(MVT::i32, MVT::i32), Half); 1668 1669 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1670 Flag = Chain.getValue(1); 1671 VA = RVLocs[++i]; // skip ahead to next loc 1672 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1673 HalfGPRs.getValue(1), Flag); 1674 Flag = Chain.getValue(1); 1675 VA = RVLocs[++i]; // skip ahead to next loc 1676 1677 // Extract the 2nd half and fall through to handle it as an f64 value. 1678 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1679 DAG.getConstant(1, MVT::i32)); 1680 } 1681 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1682 // available. 1683 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1684 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1685 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1686 Flag = Chain.getValue(1); 1687 VA = RVLocs[++i]; // skip ahead to next loc 1688 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1689 Flag); 1690 } else 1691 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1692 1693 // Guarantee that all emitted copies are 1694 // stuck together, avoiding something bad. 1695 Flag = Chain.getValue(1); 1696 } 1697 1698 SDValue result; 1699 if (Flag.getNode()) 1700 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1701 else // Return Void 1702 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1703 1704 return result; 1705} 1706 1707// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1708// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1709// one of the above mentioned nodes. It has to be wrapped because otherwise 1710// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1711// be used to form addressing mode. These wrapped nodes will be selected 1712// into MOVi. 1713static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1714 EVT PtrVT = Op.getValueType(); 1715 // FIXME there is no actual debug info here 1716 DebugLoc dl = Op.getDebugLoc(); 1717 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1718 SDValue Res; 1719 if (CP->isMachineConstantPoolEntry()) 1720 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1721 CP->getAlignment()); 1722 else 1723 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1724 CP->getAlignment()); 1725 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1726} 1727 1728unsigned ARMTargetLowering::getJumpTableEncoding() const { 1729 return MachineJumpTableInfo::EK_Inline; 1730} 1731 1732SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1733 SelectionDAG &DAG) const { 1734 MachineFunction &MF = DAG.getMachineFunction(); 1735 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1736 unsigned ARMPCLabelIndex = 0; 1737 DebugLoc DL = Op.getDebugLoc(); 1738 EVT PtrVT = getPointerTy(); 1739 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1740 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1741 SDValue CPAddr; 1742 if (RelocM == Reloc::Static) { 1743 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1744 } else { 1745 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1746 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1747 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1748 ARMCP::CPBlockAddress, 1749 PCAdj); 1750 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1751 } 1752 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1753 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1754 MachinePointerInfo::getConstantPool(), 1755 false, false, 0); 1756 if (RelocM == Reloc::Static) 1757 return Result; 1758 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1759 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1760} 1761 1762// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1763SDValue 1764ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1765 SelectionDAG &DAG) const { 1766 DebugLoc dl = GA->getDebugLoc(); 1767 EVT PtrVT = getPointerTy(); 1768 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1769 MachineFunction &MF = DAG.getMachineFunction(); 1770 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1771 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1772 ARMConstantPoolValue *CPV = 1773 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1774 ARMCP::CPValue, PCAdj, "tlsgd", true); 1775 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1776 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1777 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1778 MachinePointerInfo::getConstantPool(), 1779 false, false, 0); 1780 SDValue Chain = Argument.getValue(1); 1781 1782 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1783 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1784 1785 // call __tls_get_addr. 1786 ArgListTy Args; 1787 ArgListEntry Entry; 1788 Entry.Node = Argument; 1789 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1790 Args.push_back(Entry); 1791 // FIXME: is there useful debug info available here? 1792 std::pair<SDValue, SDValue> CallResult = 1793 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1794 false, false, false, false, 1795 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1796 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1797 return CallResult.first; 1798} 1799 1800// Lower ISD::GlobalTLSAddress using the "initial exec" or 1801// "local exec" model. 1802SDValue 1803ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1804 SelectionDAG &DAG) const { 1805 const GlobalValue *GV = GA->getGlobal(); 1806 DebugLoc dl = GA->getDebugLoc(); 1807 SDValue Offset; 1808 SDValue Chain = DAG.getEntryNode(); 1809 EVT PtrVT = getPointerTy(); 1810 // Get the Thread Pointer 1811 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1812 1813 if (GV->isDeclaration()) { 1814 MachineFunction &MF = DAG.getMachineFunction(); 1815 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1816 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1817 // Initial exec model. 1818 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1819 ARMConstantPoolValue *CPV = 1820 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1821 ARMCP::CPValue, PCAdj, "gottpoff", true); 1822 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1823 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1824 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1825 MachinePointerInfo::getConstantPool(), 1826 false, false, 0); 1827 Chain = Offset.getValue(1); 1828 1829 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1830 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1831 1832 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1833 MachinePointerInfo::getConstantPool(), 1834 false, false, 0); 1835 } else { 1836 // local exec model 1837 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff"); 1838 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1839 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1840 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1841 MachinePointerInfo::getConstantPool(), 1842 false, false, 0); 1843 } 1844 1845 // The address of the thread local variable is the add of the thread 1846 // pointer with the offset of the variable. 1847 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1848} 1849 1850SDValue 1851ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1852 // TODO: implement the "local dynamic" model 1853 assert(Subtarget->isTargetELF() && 1854 "TLS not implemented for non-ELF targets"); 1855 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1856 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1857 // otherwise use the "Local Exec" TLS Model 1858 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1859 return LowerToTLSGeneralDynamicModel(GA, DAG); 1860 else 1861 return LowerToTLSExecModels(GA, DAG); 1862} 1863 1864SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1865 SelectionDAG &DAG) const { 1866 EVT PtrVT = getPointerTy(); 1867 DebugLoc dl = Op.getDebugLoc(); 1868 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1869 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1870 if (RelocM == Reloc::PIC_) { 1871 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1872 ARMConstantPoolValue *CPV = 1873 new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT"); 1874 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1875 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1876 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1877 CPAddr, 1878 MachinePointerInfo::getConstantPool(), 1879 false, false, 0); 1880 SDValue Chain = Result.getValue(1); 1881 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1882 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1883 if (!UseGOTOFF) 1884 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1885 MachinePointerInfo::getGOT(), false, false, 0); 1886 return Result; 1887 } else { 1888 // If we have T2 ops, we can materialize the address directly via movt/movw 1889 // pair. This is always cheaper. 1890 if (Subtarget->useMovt()) { 1891 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1892 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 1893 } else { 1894 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1895 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1896 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1897 MachinePointerInfo::getConstantPool(), 1898 false, false, 0); 1899 } 1900 } 1901} 1902 1903SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1904 SelectionDAG &DAG) const { 1905 MachineFunction &MF = DAG.getMachineFunction(); 1906 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1907 unsigned ARMPCLabelIndex = 0; 1908 EVT PtrVT = getPointerTy(); 1909 DebugLoc dl = Op.getDebugLoc(); 1910 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1911 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1912 SDValue CPAddr; 1913 if (RelocM == Reloc::Static) 1914 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1915 else { 1916 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1917 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 1918 ARMConstantPoolValue *CPV = 1919 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 1920 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1921 } 1922 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1923 1924 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1925 MachinePointerInfo::getConstantPool(), 1926 false, false, 0); 1927 SDValue Chain = Result.getValue(1); 1928 1929 if (RelocM == Reloc::PIC_) { 1930 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1931 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1932 } 1933 1934 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 1935 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 1936 false, false, 0); 1937 1938 return Result; 1939} 1940 1941SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 1942 SelectionDAG &DAG) const { 1943 assert(Subtarget->isTargetELF() && 1944 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 1945 MachineFunction &MF = DAG.getMachineFunction(); 1946 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1947 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1948 EVT PtrVT = getPointerTy(); 1949 DebugLoc dl = Op.getDebugLoc(); 1950 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1951 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1952 "_GLOBAL_OFFSET_TABLE_", 1953 ARMPCLabelIndex, PCAdj); 1954 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1955 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1956 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1957 MachinePointerInfo::getConstantPool(), 1958 false, false, 0); 1959 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1960 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1961} 1962 1963SDValue 1964ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 1965 const { 1966 DebugLoc dl = Op.getDebugLoc(); 1967 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 1968 Op.getOperand(0), Op.getOperand(1)); 1969} 1970 1971SDValue 1972ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 1973 DebugLoc dl = Op.getDebugLoc(); 1974 SDValue Val = DAG.getConstant(0, MVT::i32); 1975 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 1976 Op.getOperand(1), Val); 1977} 1978 1979SDValue 1980ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 1981 DebugLoc dl = Op.getDebugLoc(); 1982 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 1983 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 1984} 1985 1986SDValue 1987ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 1988 const ARMSubtarget *Subtarget) const { 1989 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1990 DebugLoc dl = Op.getDebugLoc(); 1991 switch (IntNo) { 1992 default: return SDValue(); // Don't custom lower most intrinsics. 1993 case Intrinsic::arm_thread_pointer: { 1994 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1995 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1996 } 1997 case Intrinsic::eh_sjlj_lsda: { 1998 MachineFunction &MF = DAG.getMachineFunction(); 1999 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2000 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 2001 EVT PtrVT = getPointerTy(); 2002 DebugLoc dl = Op.getDebugLoc(); 2003 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2004 SDValue CPAddr; 2005 unsigned PCAdj = (RelocM != Reloc::PIC_) 2006 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2007 ARMConstantPoolValue *CPV = 2008 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2009 ARMCP::CPLSDA, PCAdj); 2010 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2011 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2012 SDValue Result = 2013 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2014 MachinePointerInfo::getConstantPool(), 2015 false, false, 0); 2016 2017 if (RelocM == Reloc::PIC_) { 2018 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2019 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2020 } 2021 return Result; 2022 } 2023 } 2024} 2025 2026static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2027 const ARMSubtarget *Subtarget) { 2028 DebugLoc dl = Op.getDebugLoc(); 2029 if (!Subtarget->hasDataBarrier()) { 2030 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2031 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2032 // here. 2033 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb1Only() && 2034 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2035 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2036 DAG.getConstant(0, MVT::i32)); 2037 } 2038 2039 SDValue Op5 = Op.getOperand(5); 2040 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2041 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2042 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2043 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2044 2045 ARM_MB::MemBOpt DMBOpt; 2046 if (isDeviceBarrier) 2047 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2048 else 2049 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2050 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2051 DAG.getConstant(DMBOpt, MVT::i32)); 2052} 2053 2054static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2055 MachineFunction &MF = DAG.getMachineFunction(); 2056 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2057 2058 // vastart just stores the address of the VarArgsFrameIndex slot into the 2059 // memory location argument. 2060 DebugLoc dl = Op.getDebugLoc(); 2061 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2062 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2063 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2064 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2065 MachinePointerInfo(SV), false, false, 0); 2066} 2067 2068SDValue 2069ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2070 SDValue &Root, SelectionDAG &DAG, 2071 DebugLoc dl) const { 2072 MachineFunction &MF = DAG.getMachineFunction(); 2073 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2074 2075 TargetRegisterClass *RC; 2076 if (AFI->isThumb1OnlyFunction()) 2077 RC = ARM::tGPRRegisterClass; 2078 else 2079 RC = ARM::GPRRegisterClass; 2080 2081 // Transform the arguments stored in physical registers into virtual ones. 2082 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2083 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2084 2085 SDValue ArgValue2; 2086 if (NextVA.isMemLoc()) { 2087 MachineFrameInfo *MFI = MF.getFrameInfo(); 2088 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2089 2090 // Create load node to retrieve arguments from the stack. 2091 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2092 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2093 MachinePointerInfo::getFixedStack(FI), 2094 false, false, 0); 2095 } else { 2096 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2097 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2098 } 2099 2100 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2101} 2102 2103SDValue 2104ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2105 CallingConv::ID CallConv, bool isVarArg, 2106 const SmallVectorImpl<ISD::InputArg> 2107 &Ins, 2108 DebugLoc dl, SelectionDAG &DAG, 2109 SmallVectorImpl<SDValue> &InVals) 2110 const { 2111 2112 MachineFunction &MF = DAG.getMachineFunction(); 2113 MachineFrameInfo *MFI = MF.getFrameInfo(); 2114 2115 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2116 2117 // Assign locations to all of the incoming arguments. 2118 SmallVector<CCValAssign, 16> ArgLocs; 2119 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2120 *DAG.getContext()); 2121 CCInfo.AnalyzeFormalArguments(Ins, 2122 CCAssignFnForNode(CallConv, /* Return*/ false, 2123 isVarArg)); 2124 2125 SmallVector<SDValue, 16> ArgValues; 2126 2127 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2128 CCValAssign &VA = ArgLocs[i]; 2129 2130 // Arguments stored in registers. 2131 if (VA.isRegLoc()) { 2132 EVT RegVT = VA.getLocVT(); 2133 2134 SDValue ArgValue; 2135 if (VA.needsCustom()) { 2136 // f64 and vector types are split up into multiple registers or 2137 // combinations of registers and stack slots. 2138 if (VA.getLocVT() == MVT::v2f64) { 2139 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2140 Chain, DAG, dl); 2141 VA = ArgLocs[++i]; // skip ahead to next loc 2142 SDValue ArgValue2; 2143 if (VA.isMemLoc()) { 2144 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2145 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2146 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2147 MachinePointerInfo::getFixedStack(FI), 2148 false, false, 0); 2149 } else { 2150 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2151 Chain, DAG, dl); 2152 } 2153 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2154 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2155 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2156 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2157 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2158 } else 2159 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2160 2161 } else { 2162 TargetRegisterClass *RC; 2163 2164 if (RegVT == MVT::f32) 2165 RC = ARM::SPRRegisterClass; 2166 else if (RegVT == MVT::f64) 2167 RC = ARM::DPRRegisterClass; 2168 else if (RegVT == MVT::v2f64) 2169 RC = ARM::QPRRegisterClass; 2170 else if (RegVT == MVT::i32) 2171 RC = (AFI->isThumb1OnlyFunction() ? 2172 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2173 else 2174 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2175 2176 // Transform the arguments in physical registers into virtual ones. 2177 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2178 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2179 } 2180 2181 // If this is an 8 or 16-bit value, it is really passed promoted 2182 // to 32 bits. Insert an assert[sz]ext to capture this, then 2183 // truncate to the right size. 2184 switch (VA.getLocInfo()) { 2185 default: llvm_unreachable("Unknown loc info!"); 2186 case CCValAssign::Full: break; 2187 case CCValAssign::BCvt: 2188 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); 2189 break; 2190 case CCValAssign::SExt: 2191 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2192 DAG.getValueType(VA.getValVT())); 2193 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2194 break; 2195 case CCValAssign::ZExt: 2196 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2197 DAG.getValueType(VA.getValVT())); 2198 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2199 break; 2200 } 2201 2202 InVals.push_back(ArgValue); 2203 2204 } else { // VA.isRegLoc() 2205 2206 // sanity check 2207 assert(VA.isMemLoc()); 2208 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2209 2210 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 2211 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true); 2212 2213 // Create load nodes to retrieve arguments from the stack. 2214 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2215 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2216 MachinePointerInfo::getFixedStack(FI), 2217 false, false, 0)); 2218 } 2219 } 2220 2221 // varargs 2222 if (isVarArg) { 2223 static const unsigned GPRArgRegs[] = { 2224 ARM::R0, ARM::R1, ARM::R2, ARM::R3 2225 }; 2226 2227 unsigned NumGPRs = CCInfo.getFirstUnallocated 2228 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2229 2230 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 2231 unsigned VARegSize = (4 - NumGPRs) * 4; 2232 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2233 unsigned ArgOffset = CCInfo.getNextStackOffset(); 2234 if (VARegSaveSize) { 2235 // If this function is vararg, store any remaining integer argument regs 2236 // to their spots on the stack so that they may be loaded by deferencing 2237 // the result of va_next. 2238 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2239 AFI->setVarArgsFrameIndex( 2240 MFI->CreateFixedObject(VARegSaveSize, 2241 ArgOffset + VARegSaveSize - VARegSize, 2242 false)); 2243 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2244 getPointerTy()); 2245 2246 SmallVector<SDValue, 4> MemOps; 2247 for (; NumGPRs < 4; ++NumGPRs) { 2248 TargetRegisterClass *RC; 2249 if (AFI->isThumb1OnlyFunction()) 2250 RC = ARM::tGPRRegisterClass; 2251 else 2252 RC = ARM::GPRRegisterClass; 2253 2254 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 2255 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2256 SDValue Store = 2257 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2258 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2259 false, false, 0); 2260 MemOps.push_back(Store); 2261 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2262 DAG.getConstant(4, getPointerTy())); 2263 } 2264 if (!MemOps.empty()) 2265 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2266 &MemOps[0], MemOps.size()); 2267 } else 2268 // This will point to the next argument passed via stack. 2269 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2270 } 2271 2272 return Chain; 2273} 2274 2275/// isFloatingPointZero - Return true if this is +0.0. 2276static bool isFloatingPointZero(SDValue Op) { 2277 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2278 return CFP->getValueAPF().isPosZero(); 2279 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2280 // Maybe this has already been legalized into the constant pool? 2281 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2282 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2283 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2284 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2285 return CFP->getValueAPF().isPosZero(); 2286 } 2287 } 2288 return false; 2289} 2290 2291/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2292/// the given operands. 2293SDValue 2294ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2295 SDValue &ARMcc, SelectionDAG &DAG, 2296 DebugLoc dl) const { 2297 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2298 unsigned C = RHSC->getZExtValue(); 2299 if (!isLegalICmpImmediate(C)) { 2300 // Constant does not fit, try adjusting it by one? 2301 switch (CC) { 2302 default: break; 2303 case ISD::SETLT: 2304 case ISD::SETGE: 2305 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2306 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2307 RHS = DAG.getConstant(C-1, MVT::i32); 2308 } 2309 break; 2310 case ISD::SETULT: 2311 case ISD::SETUGE: 2312 if (C != 0 && isLegalICmpImmediate(C-1)) { 2313 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2314 RHS = DAG.getConstant(C-1, MVT::i32); 2315 } 2316 break; 2317 case ISD::SETLE: 2318 case ISD::SETGT: 2319 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2320 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2321 RHS = DAG.getConstant(C+1, MVT::i32); 2322 } 2323 break; 2324 case ISD::SETULE: 2325 case ISD::SETUGT: 2326 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2327 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2328 RHS = DAG.getConstant(C+1, MVT::i32); 2329 } 2330 break; 2331 } 2332 } 2333 } 2334 2335 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2336 ARMISD::NodeType CompareType; 2337 switch (CondCode) { 2338 default: 2339 CompareType = ARMISD::CMP; 2340 break; 2341 case ARMCC::EQ: 2342 case ARMCC::NE: 2343 // Uses only Z Flag 2344 CompareType = ARMISD::CMPZ; 2345 break; 2346 } 2347 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2348 return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS); 2349} 2350 2351/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2352SDValue 2353ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2354 DebugLoc dl) const { 2355 SDValue Cmp; 2356 if (!isFloatingPointZero(RHS)) 2357 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS); 2358 else 2359 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS); 2360 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp); 2361} 2362 2363SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2364 SDValue Cond = Op.getOperand(0); 2365 SDValue SelectTrue = Op.getOperand(1); 2366 SDValue SelectFalse = Op.getOperand(2); 2367 DebugLoc dl = Op.getDebugLoc(); 2368 2369 // Convert: 2370 // 2371 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2372 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2373 // 2374 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2375 const ConstantSDNode *CMOVTrue = 2376 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2377 const ConstantSDNode *CMOVFalse = 2378 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2379 2380 if (CMOVTrue && CMOVFalse) { 2381 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2382 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2383 2384 SDValue True; 2385 SDValue False; 2386 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2387 True = SelectTrue; 2388 False = SelectFalse; 2389 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2390 True = SelectFalse; 2391 False = SelectTrue; 2392 } 2393 2394 if (True.getNode() && False.getNode()) { 2395 EVT VT = Cond.getValueType(); 2396 SDValue ARMcc = Cond.getOperand(2); 2397 SDValue CCR = Cond.getOperand(3); 2398 SDValue Cmp = Cond.getOperand(4); 2399 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2400 } 2401 } 2402 } 2403 2404 return DAG.getSelectCC(dl, Cond, 2405 DAG.getConstant(0, Cond.getValueType()), 2406 SelectTrue, SelectFalse, ISD::SETNE); 2407} 2408 2409SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2410 EVT VT = Op.getValueType(); 2411 SDValue LHS = Op.getOperand(0); 2412 SDValue RHS = Op.getOperand(1); 2413 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2414 SDValue TrueVal = Op.getOperand(2); 2415 SDValue FalseVal = Op.getOperand(3); 2416 DebugLoc dl = Op.getDebugLoc(); 2417 2418 if (LHS.getValueType() == MVT::i32) { 2419 SDValue ARMcc; 2420 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2421 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2422 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2423 } 2424 2425 ARMCC::CondCodes CondCode, CondCode2; 2426 FPCCToARMCC(CC, CondCode, CondCode2); 2427 2428 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2429 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2430 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2431 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2432 ARMcc, CCR, Cmp); 2433 if (CondCode2 != ARMCC::AL) { 2434 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2435 // FIXME: Needs another CMP because flag can have but one use. 2436 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2437 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2438 Result, TrueVal, ARMcc2, CCR, Cmp2); 2439 } 2440 return Result; 2441} 2442 2443/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2444/// to morph to an integer compare sequence. 2445static bool canChangeToInt(SDValue Op, bool &SeenZero, 2446 const ARMSubtarget *Subtarget) { 2447 SDNode *N = Op.getNode(); 2448 if (!N->hasOneUse()) 2449 // Otherwise it requires moving the value from fp to integer registers. 2450 return false; 2451 if (!N->getNumValues()) 2452 return false; 2453 EVT VT = Op.getValueType(); 2454 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2455 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2456 // vmrs are very slow, e.g. cortex-a8. 2457 return false; 2458 2459 if (isFloatingPointZero(Op)) { 2460 SeenZero = true; 2461 return true; 2462 } 2463 return ISD::isNormalLoad(N); 2464} 2465 2466static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2467 if (isFloatingPointZero(Op)) 2468 return DAG.getConstant(0, MVT::i32); 2469 2470 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2471 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2472 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2473 Ld->isVolatile(), Ld->isNonTemporal(), 2474 Ld->getAlignment()); 2475 2476 llvm_unreachable("Unknown VFP cmp argument!"); 2477} 2478 2479static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2480 SDValue &RetVal1, SDValue &RetVal2) { 2481 if (isFloatingPointZero(Op)) { 2482 RetVal1 = DAG.getConstant(0, MVT::i32); 2483 RetVal2 = DAG.getConstant(0, MVT::i32); 2484 return; 2485 } 2486 2487 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2488 SDValue Ptr = Ld->getBasePtr(); 2489 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2490 Ld->getChain(), Ptr, 2491 Ld->getPointerInfo(), 2492 Ld->isVolatile(), Ld->isNonTemporal(), 2493 Ld->getAlignment()); 2494 2495 EVT PtrType = Ptr.getValueType(); 2496 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2497 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2498 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2499 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2500 Ld->getChain(), NewPtr, 2501 Ld->getPointerInfo().getWithOffset(4), 2502 Ld->isVolatile(), Ld->isNonTemporal(), 2503 NewAlign); 2504 return; 2505 } 2506 2507 llvm_unreachable("Unknown VFP cmp argument!"); 2508} 2509 2510/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2511/// f32 and even f64 comparisons to integer ones. 2512SDValue 2513ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2514 SDValue Chain = Op.getOperand(0); 2515 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2516 SDValue LHS = Op.getOperand(2); 2517 SDValue RHS = Op.getOperand(3); 2518 SDValue Dest = Op.getOperand(4); 2519 DebugLoc dl = Op.getDebugLoc(); 2520 2521 bool SeenZero = false; 2522 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2523 canChangeToInt(RHS, SeenZero, Subtarget) && 2524 // If one of the operand is zero, it's safe to ignore the NaN case since 2525 // we only care about equality comparisons. 2526 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2527 // If unsafe fp math optimization is enabled and there are no othter uses of 2528 // the CMP operands, and the condition code is EQ oe NE, we can optimize it 2529 // to an integer comparison. 2530 if (CC == ISD::SETOEQ) 2531 CC = ISD::SETEQ; 2532 else if (CC == ISD::SETUNE) 2533 CC = ISD::SETNE; 2534 2535 SDValue ARMcc; 2536 if (LHS.getValueType() == MVT::f32) { 2537 LHS = bitcastf32Toi32(LHS, DAG); 2538 RHS = bitcastf32Toi32(RHS, DAG); 2539 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2540 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2541 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2542 Chain, Dest, ARMcc, CCR, Cmp); 2543 } 2544 2545 SDValue LHS1, LHS2; 2546 SDValue RHS1, RHS2; 2547 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2548 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2549 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2550 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2551 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2552 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2553 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2554 } 2555 2556 return SDValue(); 2557} 2558 2559SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2560 SDValue Chain = Op.getOperand(0); 2561 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2562 SDValue LHS = Op.getOperand(2); 2563 SDValue RHS = Op.getOperand(3); 2564 SDValue Dest = Op.getOperand(4); 2565 DebugLoc dl = Op.getDebugLoc(); 2566 2567 if (LHS.getValueType() == MVT::i32) { 2568 SDValue ARMcc; 2569 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2570 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2571 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2572 Chain, Dest, ARMcc, CCR, Cmp); 2573 } 2574 2575 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2576 2577 if (UnsafeFPMath && 2578 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2579 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2580 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2581 if (Result.getNode()) 2582 return Result; 2583 } 2584 2585 ARMCC::CondCodes CondCode, CondCode2; 2586 FPCCToARMCC(CC, CondCode, CondCode2); 2587 2588 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2589 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2590 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2591 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2592 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2593 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2594 if (CondCode2 != ARMCC::AL) { 2595 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2596 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2597 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2598 } 2599 return Res; 2600} 2601 2602SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2603 SDValue Chain = Op.getOperand(0); 2604 SDValue Table = Op.getOperand(1); 2605 SDValue Index = Op.getOperand(2); 2606 DebugLoc dl = Op.getDebugLoc(); 2607 2608 EVT PTy = getPointerTy(); 2609 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2610 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2611 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2612 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2613 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2614 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2615 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2616 if (Subtarget->isThumb2()) { 2617 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2618 // which does another jump to the destination. This also makes it easier 2619 // to translate it to TBB / TBH later. 2620 // FIXME: This might not work if the function is extremely large. 2621 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2622 Addr, Op.getOperand(2), JTI, UId); 2623 } 2624 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2625 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2626 MachinePointerInfo::getJumpTable(), 2627 false, false, 0); 2628 Chain = Addr.getValue(1); 2629 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2630 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2631 } else { 2632 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2633 MachinePointerInfo::getJumpTable(), false, false, 0); 2634 Chain = Addr.getValue(1); 2635 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2636 } 2637} 2638 2639static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2640 DebugLoc dl = Op.getDebugLoc(); 2641 unsigned Opc; 2642 2643 switch (Op.getOpcode()) { 2644 default: 2645 assert(0 && "Invalid opcode!"); 2646 case ISD::FP_TO_SINT: 2647 Opc = ARMISD::FTOSI; 2648 break; 2649 case ISD::FP_TO_UINT: 2650 Opc = ARMISD::FTOUI; 2651 break; 2652 } 2653 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2654 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); 2655} 2656 2657static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2658 EVT VT = Op.getValueType(); 2659 DebugLoc dl = Op.getDebugLoc(); 2660 unsigned Opc; 2661 2662 switch (Op.getOpcode()) { 2663 default: 2664 assert(0 && "Invalid opcode!"); 2665 case ISD::SINT_TO_FP: 2666 Opc = ARMISD::SITOF; 2667 break; 2668 case ISD::UINT_TO_FP: 2669 Opc = ARMISD::UITOF; 2670 break; 2671 } 2672 2673 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0)); 2674 return DAG.getNode(Opc, dl, VT, Op); 2675} 2676 2677SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 2678 // Implement fcopysign with a fabs and a conditional fneg. 2679 SDValue Tmp0 = Op.getOperand(0); 2680 SDValue Tmp1 = Op.getOperand(1); 2681 DebugLoc dl = Op.getDebugLoc(); 2682 EVT VT = Op.getValueType(); 2683 EVT SrcVT = Tmp1.getValueType(); 2684 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 2685 SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32); 2686 SDValue FP0 = DAG.getConstantFP(0.0, SrcVT); 2687 SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl); 2688 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2689 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp); 2690} 2691 2692SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2693 MachineFunction &MF = DAG.getMachineFunction(); 2694 MachineFrameInfo *MFI = MF.getFrameInfo(); 2695 MFI->setReturnAddressIsTaken(true); 2696 2697 EVT VT = Op.getValueType(); 2698 DebugLoc dl = Op.getDebugLoc(); 2699 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2700 if (Depth) { 2701 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2702 SDValue Offset = DAG.getConstant(4, MVT::i32); 2703 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2704 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2705 MachinePointerInfo(), false, false, 0); 2706 } 2707 2708 // Return LR, which contains the return address. Mark it an implicit live-in. 2709 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 2710 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 2711} 2712 2713SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 2714 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2715 MFI->setFrameAddressIsTaken(true); 2716 2717 EVT VT = Op.getValueType(); 2718 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 2719 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2720 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 2721 ? ARM::R7 : ARM::R11; 2722 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2723 while (Depth--) 2724 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2725 MachinePointerInfo(), 2726 false, false, 0); 2727 return FrameAddr; 2728} 2729 2730/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to 2731/// expand a bit convert where either the source or destination type is i64 to 2732/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 2733/// operand type is illegal (e.g., v2f32 for a target that doesn't support 2734/// vectors), since the legalizer won't know what to do with that. 2735static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) { 2736 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2737 DebugLoc dl = N->getDebugLoc(); 2738 SDValue Op = N->getOperand(0); 2739 2740 // This function is only supposed to be called for i64 types, either as the 2741 // source or destination of the bit convert. 2742 EVT SrcVT = Op.getValueType(); 2743 EVT DstVT = N->getValueType(0); 2744 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 2745 "ExpandBIT_CONVERT called for non-i64 type"); 2746 2747 // Turn i64->f64 into VMOVDRR. 2748 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 2749 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2750 DAG.getConstant(0, MVT::i32)); 2751 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2752 DAG.getConstant(1, MVT::i32)); 2753 return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT, 2754 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 2755 } 2756 2757 // Turn f64->i64 into VMOVRRD. 2758 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 2759 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 2760 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 2761 // Merge the pieces into a single i64 value. 2762 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 2763 } 2764 2765 return SDValue(); 2766} 2767 2768/// getZeroVector - Returns a vector of specified type with all zero elements. 2769/// Zero vectors are used to represent vector negation and in those cases 2770/// will be implemented with the NEON VNEG instruction. However, VNEG does 2771/// not support i64 elements, so sometimes the zero vectors will need to be 2772/// explicitly constructed. Regardless, use a canonical VMOV to create the 2773/// zero vector. 2774static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2775 assert(VT.isVector() && "Expected a vector type"); 2776 // The canonical modified immediate encoding of a zero vector is....0! 2777 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 2778 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 2779 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 2780 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 2781} 2782 2783/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 2784/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2785SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 2786 SelectionDAG &DAG) const { 2787 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2788 EVT VT = Op.getValueType(); 2789 unsigned VTBits = VT.getSizeInBits(); 2790 DebugLoc dl = Op.getDebugLoc(); 2791 SDValue ShOpLo = Op.getOperand(0); 2792 SDValue ShOpHi = Op.getOperand(1); 2793 SDValue ShAmt = Op.getOperand(2); 2794 SDValue ARMcc; 2795 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 2796 2797 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 2798 2799 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2800 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2801 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 2802 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2803 DAG.getConstant(VTBits, MVT::i32)); 2804 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 2805 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2806 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 2807 2808 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2809 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2810 ARMcc, DAG, dl); 2811 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 2812 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 2813 CCR, Cmp); 2814 2815 SDValue Ops[2] = { Lo, Hi }; 2816 return DAG.getMergeValues(Ops, 2, dl); 2817} 2818 2819/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 2820/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2821SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 2822 SelectionDAG &DAG) const { 2823 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2824 EVT VT = Op.getValueType(); 2825 unsigned VTBits = VT.getSizeInBits(); 2826 DebugLoc dl = Op.getDebugLoc(); 2827 SDValue ShOpLo = Op.getOperand(0); 2828 SDValue ShOpHi = Op.getOperand(1); 2829 SDValue ShAmt = Op.getOperand(2); 2830 SDValue ARMcc; 2831 2832 assert(Op.getOpcode() == ISD::SHL_PARTS); 2833 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2834 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2835 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 2836 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2837 DAG.getConstant(VTBits, MVT::i32)); 2838 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 2839 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 2840 2841 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2842 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2843 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2844 ARMcc, DAG, dl); 2845 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 2846 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 2847 CCR, Cmp); 2848 2849 SDValue Ops[2] = { Lo, Hi }; 2850 return DAG.getMergeValues(Ops, 2, dl); 2851} 2852 2853SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 2854 SelectionDAG &DAG) const { 2855 // The rounding mode is in bits 23:22 of the FPSCR. 2856 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 2857 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 2858 // so that the shift + and get folded into a bitfield extract. 2859 DebugLoc dl = Op.getDebugLoc(); 2860 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 2861 DAG.getConstant(Intrinsic::arm_get_fpscr, 2862 MVT::i32)); 2863 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 2864 DAG.getConstant(1U << 22, MVT::i32)); 2865 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 2866 DAG.getConstant(22, MVT::i32)); 2867 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 2868 DAG.getConstant(3, MVT::i32)); 2869} 2870 2871static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 2872 const ARMSubtarget *ST) { 2873 EVT VT = N->getValueType(0); 2874 DebugLoc dl = N->getDebugLoc(); 2875 2876 if (!ST->hasV6T2Ops()) 2877 return SDValue(); 2878 2879 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 2880 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 2881} 2882 2883static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 2884 const ARMSubtarget *ST) { 2885 EVT VT = N->getValueType(0); 2886 DebugLoc dl = N->getDebugLoc(); 2887 2888 // Lower vector shifts on NEON to use VSHL. 2889 if (VT.isVector()) { 2890 assert(ST->hasNEON() && "unexpected vector shift"); 2891 2892 // Left shifts translate directly to the vshiftu intrinsic. 2893 if (N->getOpcode() == ISD::SHL) 2894 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2895 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 2896 N->getOperand(0), N->getOperand(1)); 2897 2898 assert((N->getOpcode() == ISD::SRA || 2899 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 2900 2901 // NEON uses the same intrinsics for both left and right shifts. For 2902 // right shifts, the shift amounts are negative, so negate the vector of 2903 // shift amounts. 2904 EVT ShiftVT = N->getOperand(1).getValueType(); 2905 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 2906 getZeroVector(ShiftVT, DAG, dl), 2907 N->getOperand(1)); 2908 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 2909 Intrinsic::arm_neon_vshifts : 2910 Intrinsic::arm_neon_vshiftu); 2911 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2912 DAG.getConstant(vshiftInt, MVT::i32), 2913 N->getOperand(0), NegatedCount); 2914 } 2915 2916 // We can get here for a node like i32 = ISD::SHL i32, i64 2917 if (VT != MVT::i64) 2918 return SDValue(); 2919 2920 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 2921 "Unknown shift to lower!"); 2922 2923 // We only lower SRA, SRL of 1 here, all others use generic lowering. 2924 if (!isa<ConstantSDNode>(N->getOperand(1)) || 2925 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 2926 return SDValue(); 2927 2928 // If we are in thumb mode, we don't have RRX. 2929 if (ST->isThumb1Only()) return SDValue(); 2930 2931 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 2932 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2933 DAG.getConstant(0, MVT::i32)); 2934 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2935 DAG.getConstant(1, MVT::i32)); 2936 2937 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 2938 // captures the result into a carry flag. 2939 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 2940 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1); 2941 2942 // The low part is an ARMISD::RRX operand, which shifts the carry in. 2943 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 2944 2945 // Merge the pieces into a single i64 value. 2946 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 2947} 2948 2949static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 2950 SDValue TmpOp0, TmpOp1; 2951 bool Invert = false; 2952 bool Swap = false; 2953 unsigned Opc = 0; 2954 2955 SDValue Op0 = Op.getOperand(0); 2956 SDValue Op1 = Op.getOperand(1); 2957 SDValue CC = Op.getOperand(2); 2958 EVT VT = Op.getValueType(); 2959 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 2960 DebugLoc dl = Op.getDebugLoc(); 2961 2962 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 2963 switch (SetCCOpcode) { 2964 default: llvm_unreachable("Illegal FP comparison"); break; 2965 case ISD::SETUNE: 2966 case ISD::SETNE: Invert = true; // Fallthrough 2967 case ISD::SETOEQ: 2968 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 2969 case ISD::SETOLT: 2970 case ISD::SETLT: Swap = true; // Fallthrough 2971 case ISD::SETOGT: 2972 case ISD::SETGT: Opc = ARMISD::VCGT; break; 2973 case ISD::SETOLE: 2974 case ISD::SETLE: Swap = true; // Fallthrough 2975 case ISD::SETOGE: 2976 case ISD::SETGE: Opc = ARMISD::VCGE; break; 2977 case ISD::SETUGE: Swap = true; // Fallthrough 2978 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 2979 case ISD::SETUGT: Swap = true; // Fallthrough 2980 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 2981 case ISD::SETUEQ: Invert = true; // Fallthrough 2982 case ISD::SETONE: 2983 // Expand this to (OLT | OGT). 2984 TmpOp0 = Op0; 2985 TmpOp1 = Op1; 2986 Opc = ISD::OR; 2987 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2988 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 2989 break; 2990 case ISD::SETUO: Invert = true; // Fallthrough 2991 case ISD::SETO: 2992 // Expand this to (OLT | OGE). 2993 TmpOp0 = Op0; 2994 TmpOp1 = Op1; 2995 Opc = ISD::OR; 2996 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2997 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 2998 break; 2999 } 3000 } else { 3001 // Integer comparisons. 3002 switch (SetCCOpcode) { 3003 default: llvm_unreachable("Illegal integer comparison"); break; 3004 case ISD::SETNE: Invert = true; 3005 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3006 case ISD::SETLT: Swap = true; 3007 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3008 case ISD::SETLE: Swap = true; 3009 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3010 case ISD::SETULT: Swap = true; 3011 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3012 case ISD::SETULE: Swap = true; 3013 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3014 } 3015 3016 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3017 if (Opc == ARMISD::VCEQ) { 3018 3019 SDValue AndOp; 3020 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3021 AndOp = Op0; 3022 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3023 AndOp = Op1; 3024 3025 // Ignore bitconvert. 3026 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT) 3027 AndOp = AndOp.getOperand(0); 3028 3029 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3030 Opc = ARMISD::VTST; 3031 Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0)); 3032 Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1)); 3033 Invert = !Invert; 3034 } 3035 } 3036 } 3037 3038 if (Swap) 3039 std::swap(Op0, Op1); 3040 3041 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3042 3043 if (Invert) 3044 Result = DAG.getNOT(dl, Result, VT); 3045 3046 return Result; 3047} 3048 3049/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3050/// valid vector constant for a NEON instruction with a "modified immediate" 3051/// operand (e.g., VMOV). If so, return the encoded value. 3052static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3053 unsigned SplatBitSize, SelectionDAG &DAG, 3054 EVT &VT, bool is128Bits, bool isVMOV) { 3055 unsigned OpCmode, Imm; 3056 3057 // SplatBitSize is set to the smallest size that splats the vector, so a 3058 // zero vector will always have SplatBitSize == 8. However, NEON modified 3059 // immediate instructions others than VMOV do not support the 8-bit encoding 3060 // of a zero vector, and the default encoding of zero is supposed to be the 3061 // 32-bit version. 3062 if (SplatBits == 0) 3063 SplatBitSize = 32; 3064 3065 switch (SplatBitSize) { 3066 case 8: 3067 if (!isVMOV) 3068 return SDValue(); 3069 // Any 1-byte value is OK. Op=0, Cmode=1110. 3070 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3071 OpCmode = 0xe; 3072 Imm = SplatBits; 3073 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3074 break; 3075 3076 case 16: 3077 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3078 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3079 if ((SplatBits & ~0xff) == 0) { 3080 // Value = 0x00nn: Op=x, Cmode=100x. 3081 OpCmode = 0x8; 3082 Imm = SplatBits; 3083 break; 3084 } 3085 if ((SplatBits & ~0xff00) == 0) { 3086 // Value = 0xnn00: Op=x, Cmode=101x. 3087 OpCmode = 0xa; 3088 Imm = SplatBits >> 8; 3089 break; 3090 } 3091 return SDValue(); 3092 3093 case 32: 3094 // NEON's 32-bit VMOV supports splat values where: 3095 // * only one byte is nonzero, or 3096 // * the least significant byte is 0xff and the second byte is nonzero, or 3097 // * the least significant 2 bytes are 0xff and the third is nonzero. 3098 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3099 if ((SplatBits & ~0xff) == 0) { 3100 // Value = 0x000000nn: Op=x, Cmode=000x. 3101 OpCmode = 0; 3102 Imm = SplatBits; 3103 break; 3104 } 3105 if ((SplatBits & ~0xff00) == 0) { 3106 // Value = 0x0000nn00: Op=x, Cmode=001x. 3107 OpCmode = 0x2; 3108 Imm = SplatBits >> 8; 3109 break; 3110 } 3111 if ((SplatBits & ~0xff0000) == 0) { 3112 // Value = 0x00nn0000: Op=x, Cmode=010x. 3113 OpCmode = 0x4; 3114 Imm = SplatBits >> 16; 3115 break; 3116 } 3117 if ((SplatBits & ~0xff000000) == 0) { 3118 // Value = 0xnn000000: Op=x, Cmode=011x. 3119 OpCmode = 0x6; 3120 Imm = SplatBits >> 24; 3121 break; 3122 } 3123 3124 if ((SplatBits & ~0xffff) == 0 && 3125 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3126 // Value = 0x0000nnff: Op=x, Cmode=1100. 3127 OpCmode = 0xc; 3128 Imm = SplatBits >> 8; 3129 SplatBits |= 0xff; 3130 break; 3131 } 3132 3133 if ((SplatBits & ~0xffffff) == 0 && 3134 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3135 // Value = 0x00nnffff: Op=x, Cmode=1101. 3136 OpCmode = 0xd; 3137 Imm = SplatBits >> 16; 3138 SplatBits |= 0xffff; 3139 break; 3140 } 3141 3142 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3143 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3144 // VMOV.I32. A (very) minor optimization would be to replicate the value 3145 // and fall through here to test for a valid 64-bit splat. But, then the 3146 // caller would also need to check and handle the change in size. 3147 return SDValue(); 3148 3149 case 64: { 3150 if (!isVMOV) 3151 return SDValue(); 3152 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3153 uint64_t BitMask = 0xff; 3154 uint64_t Val = 0; 3155 unsigned ImmMask = 1; 3156 Imm = 0; 3157 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3158 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3159 Val |= BitMask; 3160 Imm |= ImmMask; 3161 } else if ((SplatBits & BitMask) != 0) { 3162 return SDValue(); 3163 } 3164 BitMask <<= 8; 3165 ImmMask <<= 1; 3166 } 3167 // Op=1, Cmode=1110. 3168 OpCmode = 0x1e; 3169 SplatBits = Val; 3170 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3171 break; 3172 } 3173 3174 default: 3175 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3176 return SDValue(); 3177 } 3178 3179 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3180 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3181} 3182 3183static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3184 bool &ReverseVEXT, unsigned &Imm) { 3185 unsigned NumElts = VT.getVectorNumElements(); 3186 ReverseVEXT = false; 3187 3188 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3189 if (M[0] < 0) 3190 return false; 3191 3192 Imm = M[0]; 3193 3194 // If this is a VEXT shuffle, the immediate value is the index of the first 3195 // element. The other shuffle indices must be the successive elements after 3196 // the first one. 3197 unsigned ExpectedElt = Imm; 3198 for (unsigned i = 1; i < NumElts; ++i) { 3199 // Increment the expected index. If it wraps around, it may still be 3200 // a VEXT but the source vectors must be swapped. 3201 ExpectedElt += 1; 3202 if (ExpectedElt == NumElts * 2) { 3203 ExpectedElt = 0; 3204 ReverseVEXT = true; 3205 } 3206 3207 if (M[i] < 0) continue; // ignore UNDEF indices 3208 if (ExpectedElt != static_cast<unsigned>(M[i])) 3209 return false; 3210 } 3211 3212 // Adjust the index value if the source operands will be swapped. 3213 if (ReverseVEXT) 3214 Imm -= NumElts; 3215 3216 return true; 3217} 3218 3219/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3220/// instruction with the specified blocksize. (The order of the elements 3221/// within each block of the vector is reversed.) 3222static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3223 unsigned BlockSize) { 3224 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3225 "Only possible block sizes for VREV are: 16, 32, 64"); 3226 3227 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3228 if (EltSz == 64) 3229 return false; 3230 3231 unsigned NumElts = VT.getVectorNumElements(); 3232 unsigned BlockElts = M[0] + 1; 3233 // If the first shuffle index is UNDEF, be optimistic. 3234 if (M[0] < 0) 3235 BlockElts = BlockSize / EltSz; 3236 3237 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3238 return false; 3239 3240 for (unsigned i = 0; i < NumElts; ++i) { 3241 if (M[i] < 0) continue; // ignore UNDEF indices 3242 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3243 return false; 3244 } 3245 3246 return true; 3247} 3248 3249static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3250 unsigned &WhichResult) { 3251 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3252 if (EltSz == 64) 3253 return false; 3254 3255 unsigned NumElts = VT.getVectorNumElements(); 3256 WhichResult = (M[0] == 0 ? 0 : 1); 3257 for (unsigned i = 0; i < NumElts; i += 2) { 3258 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3259 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3260 return false; 3261 } 3262 return true; 3263} 3264 3265/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3266/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3267/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3268static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3269 unsigned &WhichResult) { 3270 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3271 if (EltSz == 64) 3272 return false; 3273 3274 unsigned NumElts = VT.getVectorNumElements(); 3275 WhichResult = (M[0] == 0 ? 0 : 1); 3276 for (unsigned i = 0; i < NumElts; i += 2) { 3277 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3278 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3279 return false; 3280 } 3281 return true; 3282} 3283 3284static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3285 unsigned &WhichResult) { 3286 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3287 if (EltSz == 64) 3288 return false; 3289 3290 unsigned NumElts = VT.getVectorNumElements(); 3291 WhichResult = (M[0] == 0 ? 0 : 1); 3292 for (unsigned i = 0; i != NumElts; ++i) { 3293 if (M[i] < 0) continue; // ignore UNDEF indices 3294 if ((unsigned) M[i] != 2 * i + WhichResult) 3295 return false; 3296 } 3297 3298 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3299 if (VT.is64BitVector() && EltSz == 32) 3300 return false; 3301 3302 return true; 3303} 3304 3305/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3306/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3307/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3308static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3309 unsigned &WhichResult) { 3310 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3311 if (EltSz == 64) 3312 return false; 3313 3314 unsigned Half = VT.getVectorNumElements() / 2; 3315 WhichResult = (M[0] == 0 ? 0 : 1); 3316 for (unsigned j = 0; j != 2; ++j) { 3317 unsigned Idx = WhichResult; 3318 for (unsigned i = 0; i != Half; ++i) { 3319 int MIdx = M[i + j * Half]; 3320 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3321 return false; 3322 Idx += 2; 3323 } 3324 } 3325 3326 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3327 if (VT.is64BitVector() && EltSz == 32) 3328 return false; 3329 3330 return true; 3331} 3332 3333static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3334 unsigned &WhichResult) { 3335 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3336 if (EltSz == 64) 3337 return false; 3338 3339 unsigned NumElts = VT.getVectorNumElements(); 3340 WhichResult = (M[0] == 0 ? 0 : 1); 3341 unsigned Idx = WhichResult * NumElts / 2; 3342 for (unsigned i = 0; i != NumElts; i += 2) { 3343 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3344 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3345 return false; 3346 Idx += 1; 3347 } 3348 3349 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3350 if (VT.is64BitVector() && EltSz == 32) 3351 return false; 3352 3353 return true; 3354} 3355 3356/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3357/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3358/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3359static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3360 unsigned &WhichResult) { 3361 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3362 if (EltSz == 64) 3363 return false; 3364 3365 unsigned NumElts = VT.getVectorNumElements(); 3366 WhichResult = (M[0] == 0 ? 0 : 1); 3367 unsigned Idx = WhichResult * NumElts / 2; 3368 for (unsigned i = 0; i != NumElts; i += 2) { 3369 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3370 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3371 return false; 3372 Idx += 1; 3373 } 3374 3375 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3376 if (VT.is64BitVector() && EltSz == 32) 3377 return false; 3378 3379 return true; 3380} 3381 3382// If N is an integer constant that can be moved into a register in one 3383// instruction, return an SDValue of such a constant (will become a MOV 3384// instruction). Otherwise return null. 3385static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3386 const ARMSubtarget *ST, DebugLoc dl) { 3387 uint64_t Val; 3388 if (!isa<ConstantSDNode>(N)) 3389 return SDValue(); 3390 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3391 3392 if (ST->isThumb1Only()) { 3393 if (Val <= 255 || ~Val <= 255) 3394 return DAG.getConstant(Val, MVT::i32); 3395 } else { 3396 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3397 return DAG.getConstant(Val, MVT::i32); 3398 } 3399 return SDValue(); 3400} 3401 3402// If this is a case we can't handle, return null and let the default 3403// expansion code take care of it. 3404static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3405 const ARMSubtarget *ST) { 3406 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3407 DebugLoc dl = Op.getDebugLoc(); 3408 EVT VT = Op.getValueType(); 3409 3410 APInt SplatBits, SplatUndef; 3411 unsigned SplatBitSize; 3412 bool HasAnyUndefs; 3413 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3414 if (SplatBitSize <= 64) { 3415 // Check if an immediate VMOV works. 3416 EVT VmovVT; 3417 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3418 SplatUndef.getZExtValue(), SplatBitSize, 3419 DAG, VmovVT, VT.is128BitVector(), true); 3420 if (Val.getNode()) { 3421 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3422 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 3423 } 3424 3425 // Try an immediate VMVN. 3426 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3427 ((1LL << SplatBitSize) - 1)); 3428 Val = isNEONModifiedImm(NegatedImm, 3429 SplatUndef.getZExtValue(), SplatBitSize, 3430 DAG, VmovVT, VT.is128BitVector(), false); 3431 if (Val.getNode()) { 3432 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3433 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 3434 } 3435 } 3436 } 3437 3438 // Scan through the operands to see if only one value is used. 3439 unsigned NumElts = VT.getVectorNumElements(); 3440 bool isOnlyLowElement = true; 3441 bool usesOnlyOneValue = true; 3442 bool isConstant = true; 3443 SDValue Value; 3444 for (unsigned i = 0; i < NumElts; ++i) { 3445 SDValue V = Op.getOperand(i); 3446 if (V.getOpcode() == ISD::UNDEF) 3447 continue; 3448 if (i > 0) 3449 isOnlyLowElement = false; 3450 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3451 isConstant = false; 3452 3453 if (!Value.getNode()) 3454 Value = V; 3455 else if (V != Value) 3456 usesOnlyOneValue = false; 3457 } 3458 3459 if (!Value.getNode()) 3460 return DAG.getUNDEF(VT); 3461 3462 if (isOnlyLowElement) 3463 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3464 3465 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3466 3467 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3468 // i32 and try again. 3469 if (usesOnlyOneValue && EltSize <= 32) { 3470 if (!isConstant) 3471 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3472 if (VT.getVectorElementType().isFloatingPoint()) { 3473 SmallVector<SDValue, 8> Ops; 3474 for (unsigned i = 0; i < NumElts; ++i) 3475 Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, 3476 Op.getOperand(i))); 3477 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &Ops[0], 3478 NumElts); 3479 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3480 if (Val.getNode()) 3481 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3482 } 3483 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3484 if (Val.getNode()) 3485 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3486 } 3487 3488 // If all elements are constants and the case above didn't get hit, fall back 3489 // to the default expansion, which will generate a load from the constant 3490 // pool. 3491 if (isConstant) 3492 return SDValue(); 3493 3494 // Vectors with 32- or 64-bit elements can be built by directly assigning 3495 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3496 // will be legalized. 3497 if (EltSize >= 32) { 3498 // Do the expansion with floating-point types, since that is what the VFP 3499 // registers are defined to use, and since i64 is not legal. 3500 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3501 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3502 SmallVector<SDValue, 8> Ops; 3503 for (unsigned i = 0; i < NumElts; ++i) 3504 Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i))); 3505 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3506 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3507 } 3508 3509 return SDValue(); 3510} 3511 3512/// isShuffleMaskLegal - Targets can use this to indicate that they only 3513/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 3514/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 3515/// are assumed to be legal. 3516bool 3517ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 3518 EVT VT) const { 3519 if (VT.getVectorNumElements() == 4 && 3520 (VT.is128BitVector() || VT.is64BitVector())) { 3521 unsigned PFIndexes[4]; 3522 for (unsigned i = 0; i != 4; ++i) { 3523 if (M[i] < 0) 3524 PFIndexes[i] = 8; 3525 else 3526 PFIndexes[i] = M[i]; 3527 } 3528 3529 // Compute the index in the perfect shuffle table. 3530 unsigned PFTableIndex = 3531 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3532 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3533 unsigned Cost = (PFEntry >> 30); 3534 3535 if (Cost <= 4) 3536 return true; 3537 } 3538 3539 bool ReverseVEXT; 3540 unsigned Imm, WhichResult; 3541 3542 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3543 return (EltSize >= 32 || 3544 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 3545 isVREVMask(M, VT, 64) || 3546 isVREVMask(M, VT, 32) || 3547 isVREVMask(M, VT, 16) || 3548 isVEXTMask(M, VT, ReverseVEXT, Imm) || 3549 isVTRNMask(M, VT, WhichResult) || 3550 isVUZPMask(M, VT, WhichResult) || 3551 isVZIPMask(M, VT, WhichResult) || 3552 isVTRN_v_undef_Mask(M, VT, WhichResult) || 3553 isVUZP_v_undef_Mask(M, VT, WhichResult) || 3554 isVZIP_v_undef_Mask(M, VT, WhichResult)); 3555} 3556 3557/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3558/// the specified operations to build the shuffle. 3559static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3560 SDValue RHS, SelectionDAG &DAG, 3561 DebugLoc dl) { 3562 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3563 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3564 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3565 3566 enum { 3567 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3568 OP_VREV, 3569 OP_VDUP0, 3570 OP_VDUP1, 3571 OP_VDUP2, 3572 OP_VDUP3, 3573 OP_VEXT1, 3574 OP_VEXT2, 3575 OP_VEXT3, 3576 OP_VUZPL, // VUZP, left result 3577 OP_VUZPR, // VUZP, right result 3578 OP_VZIPL, // VZIP, left result 3579 OP_VZIPR, // VZIP, right result 3580 OP_VTRNL, // VTRN, left result 3581 OP_VTRNR // VTRN, right result 3582 }; 3583 3584 if (OpNum == OP_COPY) { 3585 if (LHSID == (1*9+2)*9+3) return LHS; 3586 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3587 return RHS; 3588 } 3589 3590 SDValue OpLHS, OpRHS; 3591 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 3592 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 3593 EVT VT = OpLHS.getValueType(); 3594 3595 switch (OpNum) { 3596 default: llvm_unreachable("Unknown shuffle opcode!"); 3597 case OP_VREV: 3598 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 3599 case OP_VDUP0: 3600 case OP_VDUP1: 3601 case OP_VDUP2: 3602 case OP_VDUP3: 3603 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 3604 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 3605 case OP_VEXT1: 3606 case OP_VEXT2: 3607 case OP_VEXT3: 3608 return DAG.getNode(ARMISD::VEXT, dl, VT, 3609 OpLHS, OpRHS, 3610 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 3611 case OP_VUZPL: 3612 case OP_VUZPR: 3613 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3614 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 3615 case OP_VZIPL: 3616 case OP_VZIPR: 3617 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3618 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 3619 case OP_VTRNL: 3620 case OP_VTRNR: 3621 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3622 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 3623 } 3624} 3625 3626static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 3627 SDValue V1 = Op.getOperand(0); 3628 SDValue V2 = Op.getOperand(1); 3629 DebugLoc dl = Op.getDebugLoc(); 3630 EVT VT = Op.getValueType(); 3631 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 3632 SmallVector<int, 8> ShuffleMask; 3633 3634 // Convert shuffles that are directly supported on NEON to target-specific 3635 // DAG nodes, instead of keeping them as shuffles and matching them again 3636 // during code selection. This is more efficient and avoids the possibility 3637 // of inconsistencies between legalization and selection. 3638 // FIXME: floating-point vectors should be canonicalized to integer vectors 3639 // of the same time so that they get CSEd properly. 3640 SVN->getMask(ShuffleMask); 3641 3642 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3643 if (EltSize <= 32) { 3644 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 3645 int Lane = SVN->getSplatIndex(); 3646 // If this is undef splat, generate it via "just" vdup, if possible. 3647 if (Lane == -1) Lane = 0; 3648 3649 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 3650 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 3651 } 3652 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 3653 DAG.getConstant(Lane, MVT::i32)); 3654 } 3655 3656 bool ReverseVEXT; 3657 unsigned Imm; 3658 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 3659 if (ReverseVEXT) 3660 std::swap(V1, V2); 3661 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 3662 DAG.getConstant(Imm, MVT::i32)); 3663 } 3664 3665 if (isVREVMask(ShuffleMask, VT, 64)) 3666 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 3667 if (isVREVMask(ShuffleMask, VT, 32)) 3668 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 3669 if (isVREVMask(ShuffleMask, VT, 16)) 3670 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 3671 3672 // Check for Neon shuffles that modify both input vectors in place. 3673 // If both results are used, i.e., if there are two shuffles with the same 3674 // source operands and with masks corresponding to both results of one of 3675 // these operations, DAG memoization will ensure that a single node is 3676 // used for both shuffles. 3677 unsigned WhichResult; 3678 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 3679 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3680 V1, V2).getValue(WhichResult); 3681 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 3682 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3683 V1, V2).getValue(WhichResult); 3684 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 3685 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3686 V1, V2).getValue(WhichResult); 3687 3688 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3689 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3690 V1, V1).getValue(WhichResult); 3691 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3692 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3693 V1, V1).getValue(WhichResult); 3694 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3695 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3696 V1, V1).getValue(WhichResult); 3697 } 3698 3699 // If the shuffle is not directly supported and it has 4 elements, use 3700 // the PerfectShuffle-generated table to synthesize it from other shuffles. 3701 unsigned NumElts = VT.getVectorNumElements(); 3702 if (NumElts == 4) { 3703 unsigned PFIndexes[4]; 3704 for (unsigned i = 0; i != 4; ++i) { 3705 if (ShuffleMask[i] < 0) 3706 PFIndexes[i] = 8; 3707 else 3708 PFIndexes[i] = ShuffleMask[i]; 3709 } 3710 3711 // Compute the index in the perfect shuffle table. 3712 unsigned PFTableIndex = 3713 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3714 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3715 unsigned Cost = (PFEntry >> 30); 3716 3717 if (Cost <= 4) 3718 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 3719 } 3720 3721 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 3722 if (EltSize >= 32) { 3723 // Do the expansion with floating-point types, since that is what the VFP 3724 // registers are defined to use, and since i64 is not legal. 3725 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3726 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3727 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1); 3728 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2); 3729 SmallVector<SDValue, 8> Ops; 3730 for (unsigned i = 0; i < NumElts; ++i) { 3731 if (ShuffleMask[i] < 0) 3732 Ops.push_back(DAG.getUNDEF(EltVT)); 3733 else 3734 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3735 ShuffleMask[i] < (int)NumElts ? V1 : V2, 3736 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 3737 MVT::i32))); 3738 } 3739 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3740 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3741 } 3742 3743 return SDValue(); 3744} 3745 3746static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 3747 EVT VT = Op.getValueType(); 3748 DebugLoc dl = Op.getDebugLoc(); 3749 SDValue Vec = Op.getOperand(0); 3750 SDValue Lane = Op.getOperand(1); 3751 assert(VT == MVT::i32 && 3752 Vec.getValueType().getVectorElementType().getSizeInBits() < 32 && 3753 "unexpected type for custom-lowering vector extract"); 3754 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 3755} 3756 3757static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 3758 // The only time a CONCAT_VECTORS operation can have legal types is when 3759 // two 64-bit vectors are concatenated to a 128-bit vector. 3760 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 3761 "unexpected CONCAT_VECTORS"); 3762 DebugLoc dl = Op.getDebugLoc(); 3763 SDValue Val = DAG.getUNDEF(MVT::v2f64); 3764 SDValue Op0 = Op.getOperand(0); 3765 SDValue Op1 = Op.getOperand(1); 3766 if (Op0.getOpcode() != ISD::UNDEF) 3767 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3768 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0), 3769 DAG.getIntPtrConstant(0)); 3770 if (Op1.getOpcode() != ISD::UNDEF) 3771 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3772 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1), 3773 DAG.getIntPtrConstant(1)); 3774 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val); 3775} 3776 3777/// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or 3778/// an extending load, return the unextended value. 3779static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 3780 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 3781 return N->getOperand(0); 3782 LoadSDNode *LD = cast<LoadSDNode>(N); 3783 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 3784 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 3785 LD->isNonTemporal(), LD->getAlignment()); 3786} 3787 3788static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 3789 // Multiplications are only custom-lowered for 128-bit vectors so that 3790 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 3791 EVT VT = Op.getValueType(); 3792 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 3793 SDNode *N0 = Op.getOperand(0).getNode(); 3794 SDNode *N1 = Op.getOperand(1).getNode(); 3795 unsigned NewOpc = 0; 3796 if ((N0->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N0)) && 3797 (N1->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N1))) { 3798 NewOpc = ARMISD::VMULLs; 3799 } else if ((N0->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N0)) && 3800 (N1->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N1))) { 3801 NewOpc = ARMISD::VMULLu; 3802 } else if (VT.getSimpleVT().SimpleTy == MVT::v2i64) { 3803 // Fall through to expand this. It is not legal. 3804 return SDValue(); 3805 } else { 3806 // Other vector multiplications are legal. 3807 return Op; 3808 } 3809 3810 // Legalize to a VMULL instruction. 3811 DebugLoc DL = Op.getDebugLoc(); 3812 SDValue Op0 = SkipExtension(N0, DAG); 3813 SDValue Op1 = SkipExtension(N1, DAG); 3814 3815 assert(Op0.getValueType().is64BitVector() && 3816 Op1.getValueType().is64BitVector() && 3817 "unexpected types for extended operands to VMULL"); 3818 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 3819} 3820 3821SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3822 switch (Op.getOpcode()) { 3823 default: llvm_unreachable("Don't know how to custom lower this!"); 3824 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3825 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 3826 case ISD::GlobalAddress: 3827 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 3828 LowerGlobalAddressELF(Op, DAG); 3829 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3830 case ISD::SELECT: return LowerSELECT(Op, DAG); 3831 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 3832 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 3833 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 3834 case ISD::VASTART: return LowerVASTART(Op, DAG); 3835 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 3836 case ISD::SINT_TO_FP: 3837 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 3838 case ISD::FP_TO_SINT: 3839 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 3840 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 3841 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3842 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3843 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 3844 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 3845 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 3846 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 3847 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 3848 Subtarget); 3849 case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG); 3850 case ISD::SHL: 3851 case ISD::SRL: 3852 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 3853 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 3854 case ISD::SRL_PARTS: 3855 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 3856 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 3857 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 3858 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 3859 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3860 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 3861 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 3862 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 3863 case ISD::MUL: return LowerMUL(Op, DAG); 3864 } 3865 return SDValue(); 3866} 3867 3868/// ReplaceNodeResults - Replace the results of node with an illegal result 3869/// type with new values built out of custom code. 3870void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 3871 SmallVectorImpl<SDValue>&Results, 3872 SelectionDAG &DAG) const { 3873 SDValue Res; 3874 switch (N->getOpcode()) { 3875 default: 3876 llvm_unreachable("Don't know how to custom expand this!"); 3877 break; 3878 case ISD::BIT_CONVERT: 3879 Res = ExpandBIT_CONVERT(N, DAG); 3880 break; 3881 case ISD::SRL: 3882 case ISD::SRA: 3883 Res = LowerShift(N, DAG, Subtarget); 3884 break; 3885 } 3886 if (Res.getNode()) 3887 Results.push_back(Res); 3888} 3889 3890//===----------------------------------------------------------------------===// 3891// ARM Scheduler Hooks 3892//===----------------------------------------------------------------------===// 3893 3894MachineBasicBlock * 3895ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 3896 MachineBasicBlock *BB, 3897 unsigned Size) const { 3898 unsigned dest = MI->getOperand(0).getReg(); 3899 unsigned ptr = MI->getOperand(1).getReg(); 3900 unsigned oldval = MI->getOperand(2).getReg(); 3901 unsigned newval = MI->getOperand(3).getReg(); 3902 unsigned scratch = BB->getParent()->getRegInfo() 3903 .createVirtualRegister(ARM::GPRRegisterClass); 3904 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3905 DebugLoc dl = MI->getDebugLoc(); 3906 bool isThumb2 = Subtarget->isThumb2(); 3907 3908 unsigned ldrOpc, strOpc; 3909 switch (Size) { 3910 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 3911 case 1: 3912 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 3913 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB; 3914 break; 3915 case 2: 3916 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 3917 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 3918 break; 3919 case 4: 3920 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 3921 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 3922 break; 3923 } 3924 3925 MachineFunction *MF = BB->getParent(); 3926 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3927 MachineFunction::iterator It = BB; 3928 ++It; // insert the new blocks after the current block 3929 3930 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 3931 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 3932 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3933 MF->insert(It, loop1MBB); 3934 MF->insert(It, loop2MBB); 3935 MF->insert(It, exitMBB); 3936 3937 // Transfer the remainder of BB and its successor edges to exitMBB. 3938 exitMBB->splice(exitMBB->begin(), BB, 3939 llvm::next(MachineBasicBlock::iterator(MI)), 3940 BB->end()); 3941 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 3942 3943 // thisMBB: 3944 // ... 3945 // fallthrough --> loop1MBB 3946 BB->addSuccessor(loop1MBB); 3947 3948 // loop1MBB: 3949 // ldrex dest, [ptr] 3950 // cmp dest, oldval 3951 // bne exitMBB 3952 BB = loop1MBB; 3953 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 3954 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 3955 .addReg(dest).addReg(oldval)); 3956 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 3957 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 3958 BB->addSuccessor(loop2MBB); 3959 BB->addSuccessor(exitMBB); 3960 3961 // loop2MBB: 3962 // strex scratch, newval, [ptr] 3963 // cmp scratch, #0 3964 // bne loop1MBB 3965 BB = loop2MBB; 3966 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 3967 .addReg(ptr)); 3968 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 3969 .addReg(scratch).addImm(0)); 3970 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 3971 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 3972 BB->addSuccessor(loop1MBB); 3973 BB->addSuccessor(exitMBB); 3974 3975 // exitMBB: 3976 // ... 3977 BB = exitMBB; 3978 3979 MI->eraseFromParent(); // The instruction is gone now. 3980 3981 return BB; 3982} 3983 3984MachineBasicBlock * 3985ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 3986 unsigned Size, unsigned BinOpcode) const { 3987 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 3988 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3989 3990 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3991 MachineFunction *MF = BB->getParent(); 3992 MachineFunction::iterator It = BB; 3993 ++It; 3994 3995 unsigned dest = MI->getOperand(0).getReg(); 3996 unsigned ptr = MI->getOperand(1).getReg(); 3997 unsigned incr = MI->getOperand(2).getReg(); 3998 DebugLoc dl = MI->getDebugLoc(); 3999 4000 bool isThumb2 = Subtarget->isThumb2(); 4001 unsigned ldrOpc, strOpc; 4002 switch (Size) { 4003 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4004 case 1: 4005 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4006 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4007 break; 4008 case 2: 4009 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4010 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4011 break; 4012 case 4: 4013 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4014 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4015 break; 4016 } 4017 4018 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4019 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4020 MF->insert(It, loopMBB); 4021 MF->insert(It, exitMBB); 4022 4023 // Transfer the remainder of BB and its successor edges to exitMBB. 4024 exitMBB->splice(exitMBB->begin(), BB, 4025 llvm::next(MachineBasicBlock::iterator(MI)), 4026 BB->end()); 4027 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4028 4029 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4030 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4031 unsigned scratch2 = (!BinOpcode) ? incr : 4032 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4033 4034 // thisMBB: 4035 // ... 4036 // fallthrough --> loopMBB 4037 BB->addSuccessor(loopMBB); 4038 4039 // loopMBB: 4040 // ldrex dest, ptr 4041 // <binop> scratch2, dest, incr 4042 // strex scratch, scratch2, ptr 4043 // cmp scratch, #0 4044 // bne- loopMBB 4045 // fallthrough --> exitMBB 4046 BB = loopMBB; 4047 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4048 if (BinOpcode) { 4049 // operand order needs to go the other way for NAND 4050 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 4051 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4052 addReg(incr).addReg(dest)).addReg(0); 4053 else 4054 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4055 addReg(dest).addReg(incr)).addReg(0); 4056 } 4057 4058 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 4059 .addReg(ptr)); 4060 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4061 .addReg(scratch).addImm(0)); 4062 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4063 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4064 4065 BB->addSuccessor(loopMBB); 4066 BB->addSuccessor(exitMBB); 4067 4068 // exitMBB: 4069 // ... 4070 BB = exitMBB; 4071 4072 MI->eraseFromParent(); // The instruction is gone now. 4073 4074 return BB; 4075} 4076 4077static 4078MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 4079 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 4080 E = MBB->succ_end(); I != E; ++I) 4081 if (*I != Succ) 4082 return *I; 4083 llvm_unreachable("Expecting a BB with two successors!"); 4084} 4085 4086MachineBasicBlock * 4087ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4088 MachineBasicBlock *BB) const { 4089 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4090 DebugLoc dl = MI->getDebugLoc(); 4091 bool isThumb2 = Subtarget->isThumb2(); 4092 switch (MI->getOpcode()) { 4093 default: 4094 MI->dump(); 4095 llvm_unreachable("Unexpected instr type to insert"); 4096 4097 case ARM::ATOMIC_LOAD_ADD_I8: 4098 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4099 case ARM::ATOMIC_LOAD_ADD_I16: 4100 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4101 case ARM::ATOMIC_LOAD_ADD_I32: 4102 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4103 4104 case ARM::ATOMIC_LOAD_AND_I8: 4105 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4106 case ARM::ATOMIC_LOAD_AND_I16: 4107 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4108 case ARM::ATOMIC_LOAD_AND_I32: 4109 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4110 4111 case ARM::ATOMIC_LOAD_OR_I8: 4112 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4113 case ARM::ATOMIC_LOAD_OR_I16: 4114 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4115 case ARM::ATOMIC_LOAD_OR_I32: 4116 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4117 4118 case ARM::ATOMIC_LOAD_XOR_I8: 4119 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4120 case ARM::ATOMIC_LOAD_XOR_I16: 4121 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4122 case ARM::ATOMIC_LOAD_XOR_I32: 4123 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4124 4125 case ARM::ATOMIC_LOAD_NAND_I8: 4126 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4127 case ARM::ATOMIC_LOAD_NAND_I16: 4128 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4129 case ARM::ATOMIC_LOAD_NAND_I32: 4130 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4131 4132 case ARM::ATOMIC_LOAD_SUB_I8: 4133 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4134 case ARM::ATOMIC_LOAD_SUB_I16: 4135 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4136 case ARM::ATOMIC_LOAD_SUB_I32: 4137 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4138 4139 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 4140 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 4141 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 4142 4143 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 4144 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 4145 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 4146 4147 case ARM::tMOVCCr_pseudo: { 4148 // To "insert" a SELECT_CC instruction, we actually have to insert the 4149 // diamond control-flow pattern. The incoming instruction knows the 4150 // destination vreg to set, the condition code register to branch on, the 4151 // true/false values to select between, and a branch opcode to use. 4152 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4153 MachineFunction::iterator It = BB; 4154 ++It; 4155 4156 // thisMBB: 4157 // ... 4158 // TrueVal = ... 4159 // cmpTY ccX, r1, r2 4160 // bCC copy1MBB 4161 // fallthrough --> copy0MBB 4162 MachineBasicBlock *thisMBB = BB; 4163 MachineFunction *F = BB->getParent(); 4164 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4165 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4166 F->insert(It, copy0MBB); 4167 F->insert(It, sinkMBB); 4168 4169 // Transfer the remainder of BB and its successor edges to sinkMBB. 4170 sinkMBB->splice(sinkMBB->begin(), BB, 4171 llvm::next(MachineBasicBlock::iterator(MI)), 4172 BB->end()); 4173 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 4174 4175 BB->addSuccessor(copy0MBB); 4176 BB->addSuccessor(sinkMBB); 4177 4178 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 4179 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 4180 4181 // copy0MBB: 4182 // %FalseValue = ... 4183 // # fallthrough to sinkMBB 4184 BB = copy0MBB; 4185 4186 // Update machine-CFG edges 4187 BB->addSuccessor(sinkMBB); 4188 4189 // sinkMBB: 4190 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4191 // ... 4192 BB = sinkMBB; 4193 BuildMI(*BB, BB->begin(), dl, 4194 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 4195 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4196 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4197 4198 MI->eraseFromParent(); // The pseudo instruction is gone now. 4199 return BB; 4200 } 4201 4202 case ARM::BCCi64: 4203 case ARM::BCCZi64: { 4204 // Compare both parts that make up the double comparison separately for 4205 // equality. 4206 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 4207 4208 unsigned LHS1 = MI->getOperand(1).getReg(); 4209 unsigned LHS2 = MI->getOperand(2).getReg(); 4210 if (RHSisZero) { 4211 AddDefaultPred(BuildMI(BB, dl, 4212 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4213 .addReg(LHS1).addImm(0)); 4214 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4215 .addReg(LHS2).addImm(0) 4216 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4217 } else { 4218 unsigned RHS1 = MI->getOperand(3).getReg(); 4219 unsigned RHS2 = MI->getOperand(4).getReg(); 4220 AddDefaultPred(BuildMI(BB, dl, 4221 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4222 .addReg(LHS1).addReg(RHS1)); 4223 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4224 .addReg(LHS2).addReg(RHS2) 4225 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4226 } 4227 4228 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 4229 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 4230 if (MI->getOperand(0).getImm() == ARMCC::NE) 4231 std::swap(destMBB, exitMBB); 4232 4233 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4234 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 4235 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 4236 .addMBB(exitMBB); 4237 4238 MI->eraseFromParent(); // The pseudo instruction is gone now. 4239 return BB; 4240 } 4241 } 4242} 4243 4244//===----------------------------------------------------------------------===// 4245// ARM Optimization Hooks 4246//===----------------------------------------------------------------------===// 4247 4248static 4249SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 4250 TargetLowering::DAGCombinerInfo &DCI) { 4251 SelectionDAG &DAG = DCI.DAG; 4252 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4253 EVT VT = N->getValueType(0); 4254 unsigned Opc = N->getOpcode(); 4255 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 4256 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 4257 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 4258 ISD::CondCode CC = ISD::SETCC_INVALID; 4259 4260 if (isSlctCC) { 4261 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 4262 } else { 4263 SDValue CCOp = Slct.getOperand(0); 4264 if (CCOp.getOpcode() == ISD::SETCC) 4265 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 4266 } 4267 4268 bool DoXform = false; 4269 bool InvCC = false; 4270 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 4271 "Bad input!"); 4272 4273 if (LHS.getOpcode() == ISD::Constant && 4274 cast<ConstantSDNode>(LHS)->isNullValue()) { 4275 DoXform = true; 4276 } else if (CC != ISD::SETCC_INVALID && 4277 RHS.getOpcode() == ISD::Constant && 4278 cast<ConstantSDNode>(RHS)->isNullValue()) { 4279 std::swap(LHS, RHS); 4280 SDValue Op0 = Slct.getOperand(0); 4281 EVT OpVT = isSlctCC ? Op0.getValueType() : 4282 Op0.getOperand(0).getValueType(); 4283 bool isInt = OpVT.isInteger(); 4284 CC = ISD::getSetCCInverse(CC, isInt); 4285 4286 if (!TLI.isCondCodeLegal(CC, OpVT)) 4287 return SDValue(); // Inverse operator isn't legal. 4288 4289 DoXform = true; 4290 InvCC = true; 4291 } 4292 4293 if (DoXform) { 4294 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 4295 if (isSlctCC) 4296 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 4297 Slct.getOperand(0), Slct.getOperand(1), CC); 4298 SDValue CCOp = Slct.getOperand(0); 4299 if (InvCC) 4300 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 4301 CCOp.getOperand(0), CCOp.getOperand(1), CC); 4302 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4303 CCOp, OtherOp, Result); 4304 } 4305 return SDValue(); 4306} 4307 4308/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 4309/// operands N0 and N1. This is a helper for PerformADDCombine that is 4310/// called with the default operands, and if that fails, with commuted 4311/// operands. 4312static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 4313 TargetLowering::DAGCombinerInfo &DCI) { 4314 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 4315 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 4316 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 4317 if (Result.getNode()) return Result; 4318 } 4319 return SDValue(); 4320} 4321 4322/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 4323/// 4324static SDValue PerformADDCombine(SDNode *N, 4325 TargetLowering::DAGCombinerInfo &DCI) { 4326 SDValue N0 = N->getOperand(0); 4327 SDValue N1 = N->getOperand(1); 4328 4329 // First try with the default operand order. 4330 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 4331 if (Result.getNode()) 4332 return Result; 4333 4334 // If that didn't work, try again with the operands commuted. 4335 return PerformADDCombineWithOperands(N, N1, N0, DCI); 4336} 4337 4338/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 4339/// 4340static SDValue PerformSUBCombine(SDNode *N, 4341 TargetLowering::DAGCombinerInfo &DCI) { 4342 SDValue N0 = N->getOperand(0); 4343 SDValue N1 = N->getOperand(1); 4344 4345 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 4346 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 4347 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 4348 if (Result.getNode()) return Result; 4349 } 4350 4351 return SDValue(); 4352} 4353 4354static SDValue PerformMULCombine(SDNode *N, 4355 TargetLowering::DAGCombinerInfo &DCI, 4356 const ARMSubtarget *Subtarget) { 4357 SelectionDAG &DAG = DCI.DAG; 4358 4359 if (Subtarget->isThumb1Only()) 4360 return SDValue(); 4361 4362 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 4363 return SDValue(); 4364 4365 EVT VT = N->getValueType(0); 4366 if (VT != MVT::i32) 4367 return SDValue(); 4368 4369 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4370 if (!C) 4371 return SDValue(); 4372 4373 uint64_t MulAmt = C->getZExtValue(); 4374 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 4375 ShiftAmt = ShiftAmt & (32 - 1); 4376 SDValue V = N->getOperand(0); 4377 DebugLoc DL = N->getDebugLoc(); 4378 4379 SDValue Res; 4380 MulAmt >>= ShiftAmt; 4381 if (isPowerOf2_32(MulAmt - 1)) { 4382 // (mul x, 2^N + 1) => (add (shl x, N), x) 4383 Res = DAG.getNode(ISD::ADD, DL, VT, 4384 V, DAG.getNode(ISD::SHL, DL, VT, 4385 V, DAG.getConstant(Log2_32(MulAmt-1), 4386 MVT::i32))); 4387 } else if (isPowerOf2_32(MulAmt + 1)) { 4388 // (mul x, 2^N - 1) => (sub (shl x, N), x) 4389 Res = DAG.getNode(ISD::SUB, DL, VT, 4390 DAG.getNode(ISD::SHL, DL, VT, 4391 V, DAG.getConstant(Log2_32(MulAmt+1), 4392 MVT::i32)), 4393 V); 4394 } else 4395 return SDValue(); 4396 4397 if (ShiftAmt != 0) 4398 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 4399 DAG.getConstant(ShiftAmt, MVT::i32)); 4400 4401 // Do not add new nodes to DAG combiner worklist. 4402 DCI.CombineTo(N, Res, false); 4403 return SDValue(); 4404} 4405 4406/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 4407static SDValue PerformORCombine(SDNode *N, 4408 TargetLowering::DAGCombinerInfo &DCI, 4409 const ARMSubtarget *Subtarget) { 4410 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 4411 // reasonable. 4412 4413 // BFI is only available on V6T2+ 4414 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 4415 return SDValue(); 4416 4417 SelectionDAG &DAG = DCI.DAG; 4418 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 4419 DebugLoc DL = N->getDebugLoc(); 4420 // 1) or (and A, mask), val => ARMbfi A, val, mask 4421 // iff (val & mask) == val 4422 // 4423 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4424 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 4425 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4426 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 4427 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4428 // (i.e., copy a bitfield value into another bitfield of the same width) 4429 if (N0.getOpcode() != ISD::AND) 4430 return SDValue(); 4431 4432 EVT VT = N->getValueType(0); 4433 if (VT != MVT::i32) 4434 return SDValue(); 4435 4436 4437 // The value and the mask need to be constants so we can verify this is 4438 // actually a bitfield set. If the mask is 0xffff, we can do better 4439 // via a movt instruction, so don't use BFI in that case. 4440 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 4441 if (!C) 4442 return SDValue(); 4443 unsigned Mask = C->getZExtValue(); 4444 if (Mask == 0xffff) 4445 return SDValue(); 4446 SDValue Res; 4447 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 4448 if ((C = dyn_cast<ConstantSDNode>(N1))) { 4449 unsigned Val = C->getZExtValue(); 4450 if (!ARM::isBitFieldInvertedMask(Mask) || (Val & ~Mask) != Val) 4451 return SDValue(); 4452 Val >>= CountTrailingZeros_32(~Mask); 4453 4454 Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0), 4455 DAG.getConstant(Val, MVT::i32), 4456 DAG.getConstant(Mask, MVT::i32)); 4457 4458 // Do not add new nodes to DAG combiner worklist. 4459 DCI.CombineTo(N, Res, false); 4460 } else if (N1.getOpcode() == ISD::AND) { 4461 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4462 C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4463 if (!C) 4464 return SDValue(); 4465 unsigned Mask2 = C->getZExtValue(); 4466 4467 if (ARM::isBitFieldInvertedMask(Mask) && 4468 ARM::isBitFieldInvertedMask(~Mask2) && 4469 (CountPopulation_32(Mask) == CountPopulation_32(~Mask2))) { 4470 // The pack halfword instruction works better for masks that fit it, 4471 // so use that when it's available. 4472 if (Subtarget->hasT2ExtractPack() && 4473 (Mask == 0xffff || Mask == 0xffff0000)) 4474 return SDValue(); 4475 // 2a 4476 unsigned lsb = CountTrailingZeros_32(Mask2); 4477 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 4478 DAG.getConstant(lsb, MVT::i32)); 4479 Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0), Res, 4480 DAG.getConstant(Mask, MVT::i32)); 4481 // Do not add new nodes to DAG combiner worklist. 4482 DCI.CombineTo(N, Res, false); 4483 } else if (ARM::isBitFieldInvertedMask(~Mask) && 4484 ARM::isBitFieldInvertedMask(Mask2) && 4485 (CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) { 4486 // The pack halfword instruction works better for masks that fit it, 4487 // so use that when it's available. 4488 if (Subtarget->hasT2ExtractPack() && 4489 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 4490 return SDValue(); 4491 // 2b 4492 unsigned lsb = CountTrailingZeros_32(Mask); 4493 Res = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), 4494 DAG.getConstant(lsb, MVT::i32)); 4495 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 4496 DAG.getConstant(Mask2, MVT::i32)); 4497 // Do not add new nodes to DAG combiner worklist. 4498 DCI.CombineTo(N, Res, false); 4499 } 4500 } 4501 4502 return SDValue(); 4503} 4504 4505/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 4506/// ARMISD::VMOVRRD. 4507static SDValue PerformVMOVRRDCombine(SDNode *N, 4508 TargetLowering::DAGCombinerInfo &DCI) { 4509 // vmovrrd(vmovdrr x, y) -> x,y 4510 SDValue InDouble = N->getOperand(0); 4511 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 4512 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 4513 return SDValue(); 4514} 4515 4516/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 4517/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 4518static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 4519 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 4520 SDValue Op0 = N->getOperand(0); 4521 SDValue Op1 = N->getOperand(1); 4522 if (Op0.getOpcode() == ISD::BIT_CONVERT) 4523 Op0 = Op0.getOperand(0); 4524 if (Op1.getOpcode() == ISD::BIT_CONVERT) 4525 Op1 = Op1.getOperand(0); 4526 if (Op0.getOpcode() == ARMISD::VMOVRRD && 4527 Op0.getNode() == Op1.getNode() && 4528 Op0.getResNo() == 0 && Op1.getResNo() == 1) 4529 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), 4530 N->getValueType(0), Op0.getOperand(0)); 4531 return SDValue(); 4532} 4533 4534/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 4535/// ISD::BUILD_VECTOR. 4536static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG) { 4537 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 4538 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 4539 // into a pair of GPRs, which is fine when the value is used as a scalar, 4540 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 4541 if (N->getNumOperands() == 2) 4542 return PerformVMOVDRRCombine(N, DAG); 4543 4544 return SDValue(); 4545} 4546 4547/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 4548/// ISD::VECTOR_SHUFFLE. 4549static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 4550 // The LLVM shufflevector instruction does not require the shuffle mask 4551 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 4552 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 4553 // operands do not match the mask length, they are extended by concatenating 4554 // them with undef vectors. That is probably the right thing for other 4555 // targets, but for NEON it is better to concatenate two double-register 4556 // size vector operands into a single quad-register size vector. Do that 4557 // transformation here: 4558 // shuffle(concat(v1, undef), concat(v2, undef)) -> 4559 // shuffle(concat(v1, v2), undef) 4560 SDValue Op0 = N->getOperand(0); 4561 SDValue Op1 = N->getOperand(1); 4562 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 4563 Op1.getOpcode() != ISD::CONCAT_VECTORS || 4564 Op0.getNumOperands() != 2 || 4565 Op1.getNumOperands() != 2) 4566 return SDValue(); 4567 SDValue Concat0Op1 = Op0.getOperand(1); 4568 SDValue Concat1Op1 = Op1.getOperand(1); 4569 if (Concat0Op1.getOpcode() != ISD::UNDEF || 4570 Concat1Op1.getOpcode() != ISD::UNDEF) 4571 return SDValue(); 4572 // Skip the transformation if any of the types are illegal. 4573 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4574 EVT VT = N->getValueType(0); 4575 if (!TLI.isTypeLegal(VT) || 4576 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 4577 !TLI.isTypeLegal(Concat1Op1.getValueType())) 4578 return SDValue(); 4579 4580 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 4581 Op0.getOperand(0), Op1.getOperand(0)); 4582 // Translate the shuffle mask. 4583 SmallVector<int, 16> NewMask; 4584 unsigned NumElts = VT.getVectorNumElements(); 4585 unsigned HalfElts = NumElts/2; 4586 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 4587 for (unsigned n = 0; n < NumElts; ++n) { 4588 int MaskElt = SVN->getMaskElt(n); 4589 int NewElt = -1; 4590 if (MaskElt < (int)HalfElts) 4591 NewElt = MaskElt; 4592 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 4593 NewElt = HalfElts + MaskElt - NumElts; 4594 NewMask.push_back(NewElt); 4595 } 4596 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 4597 DAG.getUNDEF(VT), NewMask.data()); 4598} 4599 4600/// PerformVDUPLANECombine - Target-specific dag combine xforms for 4601/// ARMISD::VDUPLANE. 4602static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) { 4603 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 4604 // redundant. 4605 SDValue Op = N->getOperand(0); 4606 EVT VT = N->getValueType(0); 4607 4608 // Ignore bit_converts. 4609 while (Op.getOpcode() == ISD::BIT_CONVERT) 4610 Op = Op.getOperand(0); 4611 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 4612 return SDValue(); 4613 4614 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 4615 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 4616 // The canonical VMOV for a zero vector uses a 32-bit element size. 4617 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4618 unsigned EltBits; 4619 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 4620 EltSize = 8; 4621 if (EltSize > VT.getVectorElementType().getSizeInBits()) 4622 return SDValue(); 4623 4624 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op); 4625} 4626 4627/// getVShiftImm - Check if this is a valid build_vector for the immediate 4628/// operand of a vector shift operation, where all the elements of the 4629/// build_vector must have the same constant integer value. 4630static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 4631 // Ignore bit_converts. 4632 while (Op.getOpcode() == ISD::BIT_CONVERT) 4633 Op = Op.getOperand(0); 4634 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 4635 APInt SplatBits, SplatUndef; 4636 unsigned SplatBitSize; 4637 bool HasAnyUndefs; 4638 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 4639 HasAnyUndefs, ElementBits) || 4640 SplatBitSize > ElementBits) 4641 return false; 4642 Cnt = SplatBits.getSExtValue(); 4643 return true; 4644} 4645 4646/// isVShiftLImm - Check if this is a valid build_vector for the immediate 4647/// operand of a vector shift left operation. That value must be in the range: 4648/// 0 <= Value < ElementBits for a left shift; or 4649/// 0 <= Value <= ElementBits for a long left shift. 4650static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 4651 assert(VT.isVector() && "vector shift count is not a vector type"); 4652 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 4653 if (! getVShiftImm(Op, ElementBits, Cnt)) 4654 return false; 4655 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 4656} 4657 4658/// isVShiftRImm - Check if this is a valid build_vector for the immediate 4659/// operand of a vector shift right operation. For a shift opcode, the value 4660/// is positive, but for an intrinsic the value count must be negative. The 4661/// absolute value must be in the range: 4662/// 1 <= |Value| <= ElementBits for a right shift; or 4663/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 4664static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 4665 int64_t &Cnt) { 4666 assert(VT.isVector() && "vector shift count is not a vector type"); 4667 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 4668 if (! getVShiftImm(Op, ElementBits, Cnt)) 4669 return false; 4670 if (isIntrinsic) 4671 Cnt = -Cnt; 4672 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 4673} 4674 4675/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 4676static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 4677 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 4678 switch (IntNo) { 4679 default: 4680 // Don't do anything for most intrinsics. 4681 break; 4682 4683 // Vector shifts: check for immediate versions and lower them. 4684 // Note: This is done during DAG combining instead of DAG legalizing because 4685 // the build_vectors for 64-bit vector element shift counts are generally 4686 // not legal, and it is hard to see their values after they get legalized to 4687 // loads from a constant pool. 4688 case Intrinsic::arm_neon_vshifts: 4689 case Intrinsic::arm_neon_vshiftu: 4690 case Intrinsic::arm_neon_vshiftls: 4691 case Intrinsic::arm_neon_vshiftlu: 4692 case Intrinsic::arm_neon_vshiftn: 4693 case Intrinsic::arm_neon_vrshifts: 4694 case Intrinsic::arm_neon_vrshiftu: 4695 case Intrinsic::arm_neon_vrshiftn: 4696 case Intrinsic::arm_neon_vqshifts: 4697 case Intrinsic::arm_neon_vqshiftu: 4698 case Intrinsic::arm_neon_vqshiftsu: 4699 case Intrinsic::arm_neon_vqshiftns: 4700 case Intrinsic::arm_neon_vqshiftnu: 4701 case Intrinsic::arm_neon_vqshiftnsu: 4702 case Intrinsic::arm_neon_vqrshiftns: 4703 case Intrinsic::arm_neon_vqrshiftnu: 4704 case Intrinsic::arm_neon_vqrshiftnsu: { 4705 EVT VT = N->getOperand(1).getValueType(); 4706 int64_t Cnt; 4707 unsigned VShiftOpc = 0; 4708 4709 switch (IntNo) { 4710 case Intrinsic::arm_neon_vshifts: 4711 case Intrinsic::arm_neon_vshiftu: 4712 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 4713 VShiftOpc = ARMISD::VSHL; 4714 break; 4715 } 4716 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 4717 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 4718 ARMISD::VSHRs : ARMISD::VSHRu); 4719 break; 4720 } 4721 return SDValue(); 4722 4723 case Intrinsic::arm_neon_vshiftls: 4724 case Intrinsic::arm_neon_vshiftlu: 4725 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 4726 break; 4727 llvm_unreachable("invalid shift count for vshll intrinsic"); 4728 4729 case Intrinsic::arm_neon_vrshifts: 4730 case Intrinsic::arm_neon_vrshiftu: 4731 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 4732 break; 4733 return SDValue(); 4734 4735 case Intrinsic::arm_neon_vqshifts: 4736 case Intrinsic::arm_neon_vqshiftu: 4737 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 4738 break; 4739 return SDValue(); 4740 4741 case Intrinsic::arm_neon_vqshiftsu: 4742 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 4743 break; 4744 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 4745 4746 case Intrinsic::arm_neon_vshiftn: 4747 case Intrinsic::arm_neon_vrshiftn: 4748 case Intrinsic::arm_neon_vqshiftns: 4749 case Intrinsic::arm_neon_vqshiftnu: 4750 case Intrinsic::arm_neon_vqshiftnsu: 4751 case Intrinsic::arm_neon_vqrshiftns: 4752 case Intrinsic::arm_neon_vqrshiftnu: 4753 case Intrinsic::arm_neon_vqrshiftnsu: 4754 // Narrowing shifts require an immediate right shift. 4755 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 4756 break; 4757 llvm_unreachable("invalid shift count for narrowing vector shift " 4758 "intrinsic"); 4759 4760 default: 4761 llvm_unreachable("unhandled vector shift"); 4762 } 4763 4764 switch (IntNo) { 4765 case Intrinsic::arm_neon_vshifts: 4766 case Intrinsic::arm_neon_vshiftu: 4767 // Opcode already set above. 4768 break; 4769 case Intrinsic::arm_neon_vshiftls: 4770 case Intrinsic::arm_neon_vshiftlu: 4771 if (Cnt == VT.getVectorElementType().getSizeInBits()) 4772 VShiftOpc = ARMISD::VSHLLi; 4773 else 4774 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 4775 ARMISD::VSHLLs : ARMISD::VSHLLu); 4776 break; 4777 case Intrinsic::arm_neon_vshiftn: 4778 VShiftOpc = ARMISD::VSHRN; break; 4779 case Intrinsic::arm_neon_vrshifts: 4780 VShiftOpc = ARMISD::VRSHRs; break; 4781 case Intrinsic::arm_neon_vrshiftu: 4782 VShiftOpc = ARMISD::VRSHRu; break; 4783 case Intrinsic::arm_neon_vrshiftn: 4784 VShiftOpc = ARMISD::VRSHRN; break; 4785 case Intrinsic::arm_neon_vqshifts: 4786 VShiftOpc = ARMISD::VQSHLs; break; 4787 case Intrinsic::arm_neon_vqshiftu: 4788 VShiftOpc = ARMISD::VQSHLu; break; 4789 case Intrinsic::arm_neon_vqshiftsu: 4790 VShiftOpc = ARMISD::VQSHLsu; break; 4791 case Intrinsic::arm_neon_vqshiftns: 4792 VShiftOpc = ARMISD::VQSHRNs; break; 4793 case Intrinsic::arm_neon_vqshiftnu: 4794 VShiftOpc = ARMISD::VQSHRNu; break; 4795 case Intrinsic::arm_neon_vqshiftnsu: 4796 VShiftOpc = ARMISD::VQSHRNsu; break; 4797 case Intrinsic::arm_neon_vqrshiftns: 4798 VShiftOpc = ARMISD::VQRSHRNs; break; 4799 case Intrinsic::arm_neon_vqrshiftnu: 4800 VShiftOpc = ARMISD::VQRSHRNu; break; 4801 case Intrinsic::arm_neon_vqrshiftnsu: 4802 VShiftOpc = ARMISD::VQRSHRNsu; break; 4803 } 4804 4805 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 4806 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 4807 } 4808 4809 case Intrinsic::arm_neon_vshiftins: { 4810 EVT VT = N->getOperand(1).getValueType(); 4811 int64_t Cnt; 4812 unsigned VShiftOpc = 0; 4813 4814 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 4815 VShiftOpc = ARMISD::VSLI; 4816 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 4817 VShiftOpc = ARMISD::VSRI; 4818 else { 4819 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 4820 } 4821 4822 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 4823 N->getOperand(1), N->getOperand(2), 4824 DAG.getConstant(Cnt, MVT::i32)); 4825 } 4826 4827 case Intrinsic::arm_neon_vqrshifts: 4828 case Intrinsic::arm_neon_vqrshiftu: 4829 // No immediate versions of these to check for. 4830 break; 4831 } 4832 4833 return SDValue(); 4834} 4835 4836/// PerformShiftCombine - Checks for immediate versions of vector shifts and 4837/// lowers them. As with the vector shift intrinsics, this is done during DAG 4838/// combining instead of DAG legalizing because the build_vectors for 64-bit 4839/// vector element shift counts are generally not legal, and it is hard to see 4840/// their values after they get legalized to loads from a constant pool. 4841static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 4842 const ARMSubtarget *ST) { 4843 EVT VT = N->getValueType(0); 4844 4845 // Nothing to be done for scalar shifts. 4846 if (! VT.isVector()) 4847 return SDValue(); 4848 4849 assert(ST->hasNEON() && "unexpected vector shift"); 4850 int64_t Cnt; 4851 4852 switch (N->getOpcode()) { 4853 default: llvm_unreachable("unexpected shift opcode"); 4854 4855 case ISD::SHL: 4856 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 4857 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 4858 DAG.getConstant(Cnt, MVT::i32)); 4859 break; 4860 4861 case ISD::SRA: 4862 case ISD::SRL: 4863 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 4864 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 4865 ARMISD::VSHRs : ARMISD::VSHRu); 4866 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 4867 DAG.getConstant(Cnt, MVT::i32)); 4868 } 4869 } 4870 return SDValue(); 4871} 4872 4873/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 4874/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 4875static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 4876 const ARMSubtarget *ST) { 4877 SDValue N0 = N->getOperand(0); 4878 4879 // Check for sign- and zero-extensions of vector extract operations of 8- 4880 // and 16-bit vector elements. NEON supports these directly. They are 4881 // handled during DAG combining because type legalization will promote them 4882 // to 32-bit types and it is messy to recognize the operations after that. 4883 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 4884 SDValue Vec = N0.getOperand(0); 4885 SDValue Lane = N0.getOperand(1); 4886 EVT VT = N->getValueType(0); 4887 EVT EltVT = N0.getValueType(); 4888 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4889 4890 if (VT == MVT::i32 && 4891 (EltVT == MVT::i8 || EltVT == MVT::i16) && 4892 TLI.isTypeLegal(Vec.getValueType())) { 4893 4894 unsigned Opc = 0; 4895 switch (N->getOpcode()) { 4896 default: llvm_unreachable("unexpected opcode"); 4897 case ISD::SIGN_EXTEND: 4898 Opc = ARMISD::VGETLANEs; 4899 break; 4900 case ISD::ZERO_EXTEND: 4901 case ISD::ANY_EXTEND: 4902 Opc = ARMISD::VGETLANEu; 4903 break; 4904 } 4905 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 4906 } 4907 } 4908 4909 return SDValue(); 4910} 4911 4912/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 4913/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 4914static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 4915 const ARMSubtarget *ST) { 4916 // If the target supports NEON, try to use vmax/vmin instructions for f32 4917 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 4918 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 4919 // a NaN; only do the transformation when it matches that behavior. 4920 4921 // For now only do this when using NEON for FP operations; if using VFP, it 4922 // is not obvious that the benefit outweighs the cost of switching to the 4923 // NEON pipeline. 4924 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 4925 N->getValueType(0) != MVT::f32) 4926 return SDValue(); 4927 4928 SDValue CondLHS = N->getOperand(0); 4929 SDValue CondRHS = N->getOperand(1); 4930 SDValue LHS = N->getOperand(2); 4931 SDValue RHS = N->getOperand(3); 4932 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 4933 4934 unsigned Opcode = 0; 4935 bool IsReversed; 4936 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 4937 IsReversed = false; // x CC y ? x : y 4938 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 4939 IsReversed = true ; // x CC y ? y : x 4940 } else { 4941 return SDValue(); 4942 } 4943 4944 bool IsUnordered; 4945 switch (CC) { 4946 default: break; 4947 case ISD::SETOLT: 4948 case ISD::SETOLE: 4949 case ISD::SETLT: 4950 case ISD::SETLE: 4951 case ISD::SETULT: 4952 case ISD::SETULE: 4953 // If LHS is NaN, an ordered comparison will be false and the result will 4954 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 4955 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 4956 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 4957 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 4958 break; 4959 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 4960 // will return -0, so vmin can only be used for unsafe math or if one of 4961 // the operands is known to be nonzero. 4962 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 4963 !UnsafeFPMath && 4964 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 4965 break; 4966 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 4967 break; 4968 4969 case ISD::SETOGT: 4970 case ISD::SETOGE: 4971 case ISD::SETGT: 4972 case ISD::SETGE: 4973 case ISD::SETUGT: 4974 case ISD::SETUGE: 4975 // If LHS is NaN, an ordered comparison will be false and the result will 4976 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 4977 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 4978 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 4979 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 4980 break; 4981 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 4982 // will return +0, so vmax can only be used for unsafe math or if one of 4983 // the operands is known to be nonzero. 4984 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 4985 !UnsafeFPMath && 4986 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 4987 break; 4988 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 4989 break; 4990 } 4991 4992 if (!Opcode) 4993 return SDValue(); 4994 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 4995} 4996 4997SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 4998 DAGCombinerInfo &DCI) const { 4999 switch (N->getOpcode()) { 5000 default: break; 5001 case ISD::ADD: return PerformADDCombine(N, DCI); 5002 case ISD::SUB: return PerformSUBCombine(N, DCI); 5003 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 5004 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 5005 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 5006 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 5007 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI.DAG); 5008 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 5009 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI.DAG); 5010 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 5011 case ISD::SHL: 5012 case ISD::SRA: 5013 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 5014 case ISD::SIGN_EXTEND: 5015 case ISD::ZERO_EXTEND: 5016 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 5017 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 5018 } 5019 return SDValue(); 5020} 5021 5022bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 5023 if (!Subtarget->allowsUnalignedMem()) 5024 return false; 5025 5026 switch (VT.getSimpleVT().SimpleTy) { 5027 default: 5028 return false; 5029 case MVT::i8: 5030 case MVT::i16: 5031 case MVT::i32: 5032 return true; 5033 // FIXME: VLD1 etc with standard alignment is legal. 5034 } 5035} 5036 5037static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 5038 if (V < 0) 5039 return false; 5040 5041 unsigned Scale = 1; 5042 switch (VT.getSimpleVT().SimpleTy) { 5043 default: return false; 5044 case MVT::i1: 5045 case MVT::i8: 5046 // Scale == 1; 5047 break; 5048 case MVT::i16: 5049 // Scale == 2; 5050 Scale = 2; 5051 break; 5052 case MVT::i32: 5053 // Scale == 4; 5054 Scale = 4; 5055 break; 5056 } 5057 5058 if ((V & (Scale - 1)) != 0) 5059 return false; 5060 V /= Scale; 5061 return V == (V & ((1LL << 5) - 1)); 5062} 5063 5064static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 5065 const ARMSubtarget *Subtarget) { 5066 bool isNeg = false; 5067 if (V < 0) { 5068 isNeg = true; 5069 V = - V; 5070 } 5071 5072 switch (VT.getSimpleVT().SimpleTy) { 5073 default: return false; 5074 case MVT::i1: 5075 case MVT::i8: 5076 case MVT::i16: 5077 case MVT::i32: 5078 // + imm12 or - imm8 5079 if (isNeg) 5080 return V == (V & ((1LL << 8) - 1)); 5081 return V == (V & ((1LL << 12) - 1)); 5082 case MVT::f32: 5083 case MVT::f64: 5084 // Same as ARM mode. FIXME: NEON? 5085 if (!Subtarget->hasVFP2()) 5086 return false; 5087 if ((V & 3) != 0) 5088 return false; 5089 V >>= 2; 5090 return V == (V & ((1LL << 8) - 1)); 5091 } 5092} 5093 5094/// isLegalAddressImmediate - Return true if the integer value can be used 5095/// as the offset of the target addressing mode for load / store of the 5096/// given type. 5097static bool isLegalAddressImmediate(int64_t V, EVT VT, 5098 const ARMSubtarget *Subtarget) { 5099 if (V == 0) 5100 return true; 5101 5102 if (!VT.isSimple()) 5103 return false; 5104 5105 if (Subtarget->isThumb1Only()) 5106 return isLegalT1AddressImmediate(V, VT); 5107 else if (Subtarget->isThumb2()) 5108 return isLegalT2AddressImmediate(V, VT, Subtarget); 5109 5110 // ARM mode. 5111 if (V < 0) 5112 V = - V; 5113 switch (VT.getSimpleVT().SimpleTy) { 5114 default: return false; 5115 case MVT::i1: 5116 case MVT::i8: 5117 case MVT::i32: 5118 // +- imm12 5119 return V == (V & ((1LL << 12) - 1)); 5120 case MVT::i16: 5121 // +- imm8 5122 return V == (V & ((1LL << 8) - 1)); 5123 case MVT::f32: 5124 case MVT::f64: 5125 if (!Subtarget->hasVFP2()) // FIXME: NEON? 5126 return false; 5127 if ((V & 3) != 0) 5128 return false; 5129 V >>= 2; 5130 return V == (V & ((1LL << 8) - 1)); 5131 } 5132} 5133 5134bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 5135 EVT VT) const { 5136 int Scale = AM.Scale; 5137 if (Scale < 0) 5138 return false; 5139 5140 switch (VT.getSimpleVT().SimpleTy) { 5141 default: return false; 5142 case MVT::i1: 5143 case MVT::i8: 5144 case MVT::i16: 5145 case MVT::i32: 5146 if (Scale == 1) 5147 return true; 5148 // r + r << imm 5149 Scale = Scale & ~1; 5150 return Scale == 2 || Scale == 4 || Scale == 8; 5151 case MVT::i64: 5152 // r + r 5153 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5154 return true; 5155 return false; 5156 case MVT::isVoid: 5157 // Note, we allow "void" uses (basically, uses that aren't loads or 5158 // stores), because arm allows folding a scale into many arithmetic 5159 // operations. This should be made more precise and revisited later. 5160 5161 // Allow r << imm, but the imm has to be a multiple of two. 5162 if (Scale & 1) return false; 5163 return isPowerOf2_32(Scale); 5164 } 5165} 5166 5167/// isLegalAddressingMode - Return true if the addressing mode represented 5168/// by AM is legal for this target, for a load/store of the specified type. 5169bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5170 const Type *Ty) const { 5171 EVT VT = getValueType(Ty, true); 5172 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 5173 return false; 5174 5175 // Can never fold addr of global into load/store. 5176 if (AM.BaseGV) 5177 return false; 5178 5179 switch (AM.Scale) { 5180 case 0: // no scale reg, must be "r+i" or "r", or "i". 5181 break; 5182 case 1: 5183 if (Subtarget->isThumb1Only()) 5184 return false; 5185 // FALL THROUGH. 5186 default: 5187 // ARM doesn't support any R+R*scale+imm addr modes. 5188 if (AM.BaseOffs) 5189 return false; 5190 5191 if (!VT.isSimple()) 5192 return false; 5193 5194 if (Subtarget->isThumb2()) 5195 return isLegalT2ScaledAddressingMode(AM, VT); 5196 5197 int Scale = AM.Scale; 5198 switch (VT.getSimpleVT().SimpleTy) { 5199 default: return false; 5200 case MVT::i1: 5201 case MVT::i8: 5202 case MVT::i32: 5203 if (Scale < 0) Scale = -Scale; 5204 if (Scale == 1) 5205 return true; 5206 // r + r << imm 5207 return isPowerOf2_32(Scale & ~1); 5208 case MVT::i16: 5209 case MVT::i64: 5210 // r + r 5211 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5212 return true; 5213 return false; 5214 5215 case MVT::isVoid: 5216 // Note, we allow "void" uses (basically, uses that aren't loads or 5217 // stores), because arm allows folding a scale into many arithmetic 5218 // operations. This should be made more precise and revisited later. 5219 5220 // Allow r << imm, but the imm has to be a multiple of two. 5221 if (Scale & 1) return false; 5222 return isPowerOf2_32(Scale); 5223 } 5224 break; 5225 } 5226 return true; 5227} 5228 5229/// isLegalICmpImmediate - Return true if the specified immediate is legal 5230/// icmp immediate, that is the target has icmp instructions which can compare 5231/// a register against the immediate without having to materialize the 5232/// immediate into a register. 5233bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 5234 if (!Subtarget->isThumb()) 5235 return ARM_AM::getSOImmVal(Imm) != -1; 5236 if (Subtarget->isThumb2()) 5237 return ARM_AM::getT2SOImmVal(Imm) != -1; 5238 return Imm >= 0 && Imm <= 255; 5239} 5240 5241static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 5242 bool isSEXTLoad, SDValue &Base, 5243 SDValue &Offset, bool &isInc, 5244 SelectionDAG &DAG) { 5245 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5246 return false; 5247 5248 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 5249 // AddressingMode 3 5250 Base = Ptr->getOperand(0); 5251 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5252 int RHSC = (int)RHS->getZExtValue(); 5253 if (RHSC < 0 && RHSC > -256) { 5254 assert(Ptr->getOpcode() == ISD::ADD); 5255 isInc = false; 5256 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5257 return true; 5258 } 5259 } 5260 isInc = (Ptr->getOpcode() == ISD::ADD); 5261 Offset = Ptr->getOperand(1); 5262 return true; 5263 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 5264 // AddressingMode 2 5265 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5266 int RHSC = (int)RHS->getZExtValue(); 5267 if (RHSC < 0 && RHSC > -0x1000) { 5268 assert(Ptr->getOpcode() == ISD::ADD); 5269 isInc = false; 5270 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5271 Base = Ptr->getOperand(0); 5272 return true; 5273 } 5274 } 5275 5276 if (Ptr->getOpcode() == ISD::ADD) { 5277 isInc = true; 5278 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 5279 if (ShOpcVal != ARM_AM::no_shift) { 5280 Base = Ptr->getOperand(1); 5281 Offset = Ptr->getOperand(0); 5282 } else { 5283 Base = Ptr->getOperand(0); 5284 Offset = Ptr->getOperand(1); 5285 } 5286 return true; 5287 } 5288 5289 isInc = (Ptr->getOpcode() == ISD::ADD); 5290 Base = Ptr->getOperand(0); 5291 Offset = Ptr->getOperand(1); 5292 return true; 5293 } 5294 5295 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 5296 return false; 5297} 5298 5299static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 5300 bool isSEXTLoad, SDValue &Base, 5301 SDValue &Offset, bool &isInc, 5302 SelectionDAG &DAG) { 5303 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5304 return false; 5305 5306 Base = Ptr->getOperand(0); 5307 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5308 int RHSC = (int)RHS->getZExtValue(); 5309 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 5310 assert(Ptr->getOpcode() == ISD::ADD); 5311 isInc = false; 5312 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5313 return true; 5314 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 5315 isInc = Ptr->getOpcode() == ISD::ADD; 5316 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 5317 return true; 5318 } 5319 } 5320 5321 return false; 5322} 5323 5324/// getPreIndexedAddressParts - returns true by value, base pointer and 5325/// offset pointer and addressing mode by reference if the node's address 5326/// can be legally represented as pre-indexed load / store address. 5327bool 5328ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 5329 SDValue &Offset, 5330 ISD::MemIndexedMode &AM, 5331 SelectionDAG &DAG) const { 5332 if (Subtarget->isThumb1Only()) 5333 return false; 5334 5335 EVT VT; 5336 SDValue Ptr; 5337 bool isSEXTLoad = false; 5338 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5339 Ptr = LD->getBasePtr(); 5340 VT = LD->getMemoryVT(); 5341 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5342 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5343 Ptr = ST->getBasePtr(); 5344 VT = ST->getMemoryVT(); 5345 } else 5346 return false; 5347 5348 bool isInc; 5349 bool isLegal = false; 5350 if (Subtarget->isThumb2()) 5351 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5352 Offset, isInc, DAG); 5353 else 5354 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5355 Offset, isInc, DAG); 5356 if (!isLegal) 5357 return false; 5358 5359 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 5360 return true; 5361} 5362 5363/// getPostIndexedAddressParts - returns true by value, base pointer and 5364/// offset pointer and addressing mode by reference if this node can be 5365/// combined with a load / store to form a post-indexed load / store. 5366bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 5367 SDValue &Base, 5368 SDValue &Offset, 5369 ISD::MemIndexedMode &AM, 5370 SelectionDAG &DAG) const { 5371 if (Subtarget->isThumb1Only()) 5372 return false; 5373 5374 EVT VT; 5375 SDValue Ptr; 5376 bool isSEXTLoad = false; 5377 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5378 VT = LD->getMemoryVT(); 5379 Ptr = LD->getBasePtr(); 5380 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5381 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5382 VT = ST->getMemoryVT(); 5383 Ptr = ST->getBasePtr(); 5384 } else 5385 return false; 5386 5387 bool isInc; 5388 bool isLegal = false; 5389 if (Subtarget->isThumb2()) 5390 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5391 isInc, DAG); 5392 else 5393 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5394 isInc, DAG); 5395 if (!isLegal) 5396 return false; 5397 5398 if (Ptr != Base) { 5399 // Swap base ptr and offset to catch more post-index load / store when 5400 // it's legal. In Thumb2 mode, offset must be an immediate. 5401 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 5402 !Subtarget->isThumb2()) 5403 std::swap(Base, Offset); 5404 5405 // Post-indexed load / store update the base pointer. 5406 if (Ptr != Base) 5407 return false; 5408 } 5409 5410 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 5411 return true; 5412} 5413 5414void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5415 const APInt &Mask, 5416 APInt &KnownZero, 5417 APInt &KnownOne, 5418 const SelectionDAG &DAG, 5419 unsigned Depth) const { 5420 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5421 switch (Op.getOpcode()) { 5422 default: break; 5423 case ARMISD::CMOV: { 5424 // Bits are known zero/one if known on the LHS and RHS. 5425 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 5426 if (KnownZero == 0 && KnownOne == 0) return; 5427 5428 APInt KnownZeroRHS, KnownOneRHS; 5429 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 5430 KnownZeroRHS, KnownOneRHS, Depth+1); 5431 KnownZero &= KnownZeroRHS; 5432 KnownOne &= KnownOneRHS; 5433 return; 5434 } 5435 } 5436} 5437 5438//===----------------------------------------------------------------------===// 5439// ARM Inline Assembly Support 5440//===----------------------------------------------------------------------===// 5441 5442/// getConstraintType - Given a constraint letter, return the type of 5443/// constraint it is for this target. 5444ARMTargetLowering::ConstraintType 5445ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 5446 if (Constraint.size() == 1) { 5447 switch (Constraint[0]) { 5448 default: break; 5449 case 'l': return C_RegisterClass; 5450 case 'w': return C_RegisterClass; 5451 } 5452 } 5453 return TargetLowering::getConstraintType(Constraint); 5454} 5455 5456/// Examine constraint type and operand type and determine a weight value. 5457/// This object must already have been set up with the operand type 5458/// and the current alternative constraint selected. 5459TargetLowering::ConstraintWeight 5460ARMTargetLowering::getSingleConstraintMatchWeight( 5461 AsmOperandInfo &info, const char *constraint) const { 5462 ConstraintWeight weight = CW_Invalid; 5463 Value *CallOperandVal = info.CallOperandVal; 5464 // If we don't have a value, we can't do a match, 5465 // but allow it at the lowest weight. 5466 if (CallOperandVal == NULL) 5467 return CW_Default; 5468 const Type *type = CallOperandVal->getType(); 5469 // Look at the constraint type. 5470 switch (*constraint) { 5471 default: 5472 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 5473 break; 5474 case 'l': 5475 if (type->isIntegerTy()) { 5476 if (Subtarget->isThumb()) 5477 weight = CW_SpecificReg; 5478 else 5479 weight = CW_Register; 5480 } 5481 break; 5482 case 'w': 5483 if (type->isFloatingPointTy()) 5484 weight = CW_Register; 5485 break; 5486 } 5487 return weight; 5488} 5489 5490std::pair<unsigned, const TargetRegisterClass*> 5491ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5492 EVT VT) const { 5493 if (Constraint.size() == 1) { 5494 // GCC ARM Constraint Letters 5495 switch (Constraint[0]) { 5496 case 'l': 5497 if (Subtarget->isThumb()) 5498 return std::make_pair(0U, ARM::tGPRRegisterClass); 5499 else 5500 return std::make_pair(0U, ARM::GPRRegisterClass); 5501 case 'r': 5502 return std::make_pair(0U, ARM::GPRRegisterClass); 5503 case 'w': 5504 if (VT == MVT::f32) 5505 return std::make_pair(0U, ARM::SPRRegisterClass); 5506 if (VT.getSizeInBits() == 64) 5507 return std::make_pair(0U, ARM::DPRRegisterClass); 5508 if (VT.getSizeInBits() == 128) 5509 return std::make_pair(0U, ARM::QPRRegisterClass); 5510 break; 5511 } 5512 } 5513 if (StringRef("{cc}").equals_lower(Constraint)) 5514 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 5515 5516 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5517} 5518 5519std::vector<unsigned> ARMTargetLowering:: 5520getRegClassForInlineAsmConstraint(const std::string &Constraint, 5521 EVT VT) const { 5522 if (Constraint.size() != 1) 5523 return std::vector<unsigned>(); 5524 5525 switch (Constraint[0]) { // GCC ARM Constraint Letters 5526 default: break; 5527 case 'l': 5528 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5529 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5530 0); 5531 case 'r': 5532 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5533 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5534 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 5535 ARM::R12, ARM::LR, 0); 5536 case 'w': 5537 if (VT == MVT::f32) 5538 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 5539 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 5540 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 5541 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 5542 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 5543 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 5544 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 5545 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 5546 if (VT.getSizeInBits() == 64) 5547 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 5548 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 5549 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 5550 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 5551 if (VT.getSizeInBits() == 128) 5552 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 5553 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 5554 break; 5555 } 5556 5557 return std::vector<unsigned>(); 5558} 5559 5560/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5561/// vector. If it is invalid, don't add anything to Ops. 5562void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5563 char Constraint, 5564 std::vector<SDValue>&Ops, 5565 SelectionDAG &DAG) const { 5566 SDValue Result(0, 0); 5567 5568 switch (Constraint) { 5569 default: break; 5570 case 'I': case 'J': case 'K': case 'L': 5571 case 'M': case 'N': case 'O': 5572 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 5573 if (!C) 5574 return; 5575 5576 int64_t CVal64 = C->getSExtValue(); 5577 int CVal = (int) CVal64; 5578 // None of these constraints allow values larger than 32 bits. Check 5579 // that the value fits in an int. 5580 if (CVal != CVal64) 5581 return; 5582 5583 switch (Constraint) { 5584 case 'I': 5585 if (Subtarget->isThumb1Only()) { 5586 // This must be a constant between 0 and 255, for ADD 5587 // immediates. 5588 if (CVal >= 0 && CVal <= 255) 5589 break; 5590 } else if (Subtarget->isThumb2()) { 5591 // A constant that can be used as an immediate value in a 5592 // data-processing instruction. 5593 if (ARM_AM::getT2SOImmVal(CVal) != -1) 5594 break; 5595 } else { 5596 // A constant that can be used as an immediate value in a 5597 // data-processing instruction. 5598 if (ARM_AM::getSOImmVal(CVal) != -1) 5599 break; 5600 } 5601 return; 5602 5603 case 'J': 5604 if (Subtarget->isThumb()) { // FIXME thumb2 5605 // This must be a constant between -255 and -1, for negated ADD 5606 // immediates. This can be used in GCC with an "n" modifier that 5607 // prints the negated value, for use with SUB instructions. It is 5608 // not useful otherwise but is implemented for compatibility. 5609 if (CVal >= -255 && CVal <= -1) 5610 break; 5611 } else { 5612 // This must be a constant between -4095 and 4095. It is not clear 5613 // what this constraint is intended for. Implemented for 5614 // compatibility with GCC. 5615 if (CVal >= -4095 && CVal <= 4095) 5616 break; 5617 } 5618 return; 5619 5620 case 'K': 5621 if (Subtarget->isThumb1Only()) { 5622 // A 32-bit value where only one byte has a nonzero value. Exclude 5623 // zero to match GCC. This constraint is used by GCC internally for 5624 // constants that can be loaded with a move/shift combination. 5625 // It is not useful otherwise but is implemented for compatibility. 5626 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 5627 break; 5628 } else if (Subtarget->isThumb2()) { 5629 // A constant whose bitwise inverse can be used as an immediate 5630 // value in a data-processing instruction. This can be used in GCC 5631 // with a "B" modifier that prints the inverted value, for use with 5632 // BIC and MVN instructions. It is not useful otherwise but is 5633 // implemented for compatibility. 5634 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 5635 break; 5636 } else { 5637 // A constant whose bitwise inverse can be used as an immediate 5638 // value in a data-processing instruction. This can be used in GCC 5639 // with a "B" modifier that prints the inverted value, for use with 5640 // BIC and MVN instructions. It is not useful otherwise but is 5641 // implemented for compatibility. 5642 if (ARM_AM::getSOImmVal(~CVal) != -1) 5643 break; 5644 } 5645 return; 5646 5647 case 'L': 5648 if (Subtarget->isThumb1Only()) { 5649 // This must be a constant between -7 and 7, 5650 // for 3-operand ADD/SUB immediate instructions. 5651 if (CVal >= -7 && CVal < 7) 5652 break; 5653 } else if (Subtarget->isThumb2()) { 5654 // A constant whose negation can be used as an immediate value in a 5655 // data-processing instruction. This can be used in GCC with an "n" 5656 // modifier that prints the negated value, for use with SUB 5657 // instructions. It is not useful otherwise but is implemented for 5658 // compatibility. 5659 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 5660 break; 5661 } else { 5662 // A constant whose negation can be used as an immediate value in a 5663 // data-processing instruction. This can be used in GCC with an "n" 5664 // modifier that prints the negated value, for use with SUB 5665 // instructions. It is not useful otherwise but is implemented for 5666 // compatibility. 5667 if (ARM_AM::getSOImmVal(-CVal) != -1) 5668 break; 5669 } 5670 return; 5671 5672 case 'M': 5673 if (Subtarget->isThumb()) { // FIXME thumb2 5674 // This must be a multiple of 4 between 0 and 1020, for 5675 // ADD sp + immediate. 5676 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 5677 break; 5678 } else { 5679 // A power of two or a constant between 0 and 32. This is used in 5680 // GCC for the shift amount on shifted register operands, but it is 5681 // useful in general for any shift amounts. 5682 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 5683 break; 5684 } 5685 return; 5686 5687 case 'N': 5688 if (Subtarget->isThumb()) { // FIXME thumb2 5689 // This must be a constant between 0 and 31, for shift amounts. 5690 if (CVal >= 0 && CVal <= 31) 5691 break; 5692 } 5693 return; 5694 5695 case 'O': 5696 if (Subtarget->isThumb()) { // FIXME thumb2 5697 // This must be a multiple of 4 between -508 and 508, for 5698 // ADD/SUB sp = sp + immediate. 5699 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 5700 break; 5701 } 5702 return; 5703 } 5704 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 5705 break; 5706 } 5707 5708 if (Result.getNode()) { 5709 Ops.push_back(Result); 5710 return; 5711 } 5712 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5713} 5714 5715bool 5716ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 5717 // The ARM target isn't yet aware of offsets. 5718 return false; 5719} 5720 5721int ARM::getVFPf32Imm(const APFloat &FPImm) { 5722 APInt Imm = FPImm.bitcastToAPInt(); 5723 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 5724 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 5725 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 5726 5727 // We can handle 4 bits of mantissa. 5728 // mantissa = (16+UInt(e:f:g:h))/16. 5729 if (Mantissa & 0x7ffff) 5730 return -1; 5731 Mantissa >>= 19; 5732 if ((Mantissa & 0xf) != Mantissa) 5733 return -1; 5734 5735 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 5736 if (Exp < -3 || Exp > 4) 5737 return -1; 5738 Exp = ((Exp+3) & 0x7) ^ 4; 5739 5740 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 5741} 5742 5743int ARM::getVFPf64Imm(const APFloat &FPImm) { 5744 APInt Imm = FPImm.bitcastToAPInt(); 5745 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 5746 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 5747 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 5748 5749 // We can handle 4 bits of mantissa. 5750 // mantissa = (16+UInt(e:f:g:h))/16. 5751 if (Mantissa & 0xffffffffffffLL) 5752 return -1; 5753 Mantissa >>= 48; 5754 if ((Mantissa & 0xf) != Mantissa) 5755 return -1; 5756 5757 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 5758 if (Exp < -3 || Exp > 4) 5759 return -1; 5760 Exp = ((Exp+3) & 0x7) ^ 4; 5761 5762 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 5763} 5764 5765bool ARM::isBitFieldInvertedMask(unsigned v) { 5766 if (v == 0xffffffff) 5767 return 0; 5768 // there can be 1's on either or both "outsides", all the "inside" 5769 // bits must be 0's 5770 unsigned int lsb = 0, msb = 31; 5771 while (v & (1 << msb)) --msb; 5772 while (v & (1 << lsb)) ++lsb; 5773 for (unsigned int i = lsb; i <= msb; ++i) { 5774 if (v & (1 << i)) 5775 return 0; 5776 } 5777 return 1; 5778} 5779 5780/// isFPImmLegal - Returns true if the target can instruction select the 5781/// specified FP immediate natively. If false, the legalizer will 5782/// materialize the FP immediate as a load from a constant pool. 5783bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 5784 if (!Subtarget->hasVFP3()) 5785 return false; 5786 if (VT == MVT::f32) 5787 return ARM::getVFPf32Imm(Imm) != -1; 5788 if (VT == MVT::f64) 5789 return ARM::getVFPf64Imm(Imm) != -1; 5790 return false; 5791} 5792 5793/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 5794/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 5795/// specified in the intrinsic calls. 5796bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 5797 const CallInst &I, 5798 unsigned Intrinsic) const { 5799 switch (Intrinsic) { 5800 case Intrinsic::arm_neon_vld1: 5801 case Intrinsic::arm_neon_vld2: 5802 case Intrinsic::arm_neon_vld3: 5803 case Intrinsic::arm_neon_vld4: 5804 case Intrinsic::arm_neon_vld2lane: 5805 case Intrinsic::arm_neon_vld3lane: 5806 case Intrinsic::arm_neon_vld4lane: { 5807 Info.opc = ISD::INTRINSIC_W_CHAIN; 5808 // Conservatively set memVT to the entire set of vectors loaded. 5809 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 5810 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 5811 Info.ptrVal = I.getArgOperand(0); 5812 Info.offset = 0; 5813 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 5814 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 5815 Info.vol = false; // volatile loads with NEON intrinsics not supported 5816 Info.readMem = true; 5817 Info.writeMem = false; 5818 return true; 5819 } 5820 case Intrinsic::arm_neon_vst1: 5821 case Intrinsic::arm_neon_vst2: 5822 case Intrinsic::arm_neon_vst3: 5823 case Intrinsic::arm_neon_vst4: 5824 case Intrinsic::arm_neon_vst2lane: 5825 case Intrinsic::arm_neon_vst3lane: 5826 case Intrinsic::arm_neon_vst4lane: { 5827 Info.opc = ISD::INTRINSIC_VOID; 5828 // Conservatively set memVT to the entire set of vectors stored. 5829 unsigned NumElts = 0; 5830 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 5831 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 5832 if (!ArgTy->isVectorTy()) 5833 break; 5834 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 5835 } 5836 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 5837 Info.ptrVal = I.getArgOperand(0); 5838 Info.offset = 0; 5839 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 5840 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 5841 Info.vol = false; // volatile stores with NEON intrinsics not supported 5842 Info.readMem = false; 5843 Info.writeMem = true; 5844 return true; 5845 } 5846 default: 5847 break; 5848 } 5849 5850 return false; 5851} 5852