ARMISelLowering.cpp revision c24cb3551ed66830b53362f593269873cb53a0c4
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/MachineBasicBlock.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineInstrBuilder.h" 40#include "llvm/CodeGen/MachineRegisterInfo.h" 41#include "llvm/CodeGen/PseudoSourceValue.h" 42#include "llvm/CodeGen/SelectionDAG.h" 43#include "llvm/MC/MCSectionMachO.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/ADT/VectorExtras.h" 46#include "llvm/ADT/Statistic.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/raw_ostream.h" 51#include <sstream> 52using namespace llvm; 53 54STATISTIC(NumTailCalls, "Number of tail calls"); 55 56// This option should go away when tail calls fully work. 57static cl::opt<bool> 58EnableARMTailCalls("arm-tail-calls", cl::Hidden, 59 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 60 cl::init(false)); 61 62static cl::opt<bool> 63EnableARMLongCalls("arm-long-calls", cl::Hidden, 64 cl::desc("Generate calls via indirect call instructions"), 65 cl::init(false)); 66 67static cl::opt<bool> 68ARMInterworking("arm-interworking", cl::Hidden, 69 cl::desc("Enable / disable ARM interworking (for debugging only)"), 70 cl::init(true)); 71 72void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 73 EVT PromotedBitwiseVT) { 74 if (VT != PromotedLdStVT) { 75 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 76 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 77 PromotedLdStVT.getSimpleVT()); 78 79 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 80 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 81 PromotedLdStVT.getSimpleVT()); 82 } 83 84 EVT ElemTy = VT.getVectorElementType(); 85 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 86 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 87 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 88 if (ElemTy != MVT::i32) { 89 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 90 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 91 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 92 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 93 } 94 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 95 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 96 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 97 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand); 98 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 100 if (VT.isInteger()) { 101 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 102 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 104 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 105 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 106 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 107 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 108 setTruncStoreAction(VT.getSimpleVT(), 109 (MVT::SimpleValueType)InnerVT, Expand); 110 } 111 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 112 113 // Promote all bit-wise operations. 114 if (VT.isInteger() && VT != PromotedBitwiseVT) { 115 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 116 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 117 PromotedBitwiseVT.getSimpleVT()); 118 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 119 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 120 PromotedBitwiseVT.getSimpleVT()); 121 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 122 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 123 PromotedBitwiseVT.getSimpleVT()); 124 } 125 126 // Neon does not support vector divide/remainder operations. 127 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 128 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 129 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 130 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 131 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 132 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 133} 134 135void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 136 addRegisterClass(VT, ARM::DPRRegisterClass); 137 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 138} 139 140void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 141 addRegisterClass(VT, ARM::QPRRegisterClass); 142 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 143} 144 145static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 146 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 147 return new TargetLoweringObjectFileMachO(); 148 149 return new ARMElfTargetObjectFile(); 150} 151 152ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 153 : TargetLowering(TM, createTLOF(TM)) { 154 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 155 RegInfo = TM.getRegisterInfo(); 156 Itins = TM.getInstrItineraryData(); 157 158 if (Subtarget->isTargetDarwin()) { 159 // Uses VFP for Thumb libfuncs if available. 160 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 161 // Single-precision floating-point arithmetic. 162 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 163 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 164 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 165 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 166 167 // Double-precision floating-point arithmetic. 168 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 169 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 170 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 171 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 172 173 // Single-precision comparisons. 174 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 175 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 176 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 177 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 178 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 179 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 180 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 181 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 182 183 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 184 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 185 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 186 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 187 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 188 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 189 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 190 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 191 192 // Double-precision comparisons. 193 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 194 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 195 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 196 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 197 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 198 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 199 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 200 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 201 202 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 210 211 // Floating-point to integer conversions. 212 // i64 conversions are done via library routines even when generating VFP 213 // instructions, so use the same ones. 214 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 215 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 216 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 217 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 218 219 // Conversions between floating types. 220 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 221 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 222 223 // Integer to floating-point conversions. 224 // i64 conversions are done via library routines even when generating VFP 225 // instructions, so use the same ones. 226 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 227 // e.g., __floatunsidf vs. __floatunssidfvfp. 228 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 229 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 230 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 231 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 232 } 233 } 234 235 // These libcalls are not available in 32-bit. 236 setLibcallName(RTLIB::SHL_I128, 0); 237 setLibcallName(RTLIB::SRL_I128, 0); 238 setLibcallName(RTLIB::SRA_I128, 0); 239 240 if (Subtarget->isAAPCS_ABI()) { 241 // Double-precision floating-point arithmetic helper functions 242 // RTABI chapter 4.1.2, Table 2 243 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 244 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 245 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 246 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 247 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 248 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 249 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 250 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 251 252 // Double-precision floating-point comparison helper functions 253 // RTABI chapter 4.1.2, Table 3 254 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 255 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 256 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 257 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 258 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 259 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 260 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 261 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 262 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 263 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 264 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 265 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 266 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 267 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 268 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 269 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 270 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 271 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 272 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 273 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 275 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 276 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 277 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 278 279 // Single-precision floating-point arithmetic helper functions 280 // RTABI chapter 4.1.2, Table 4 281 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 282 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 283 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 284 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 285 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 286 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 287 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 288 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 289 290 // Single-precision floating-point comparison helper functions 291 // RTABI chapter 4.1.2, Table 5 292 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 293 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 294 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 295 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 296 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 297 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 298 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 299 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 300 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 301 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 302 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 303 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 304 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 305 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 306 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 307 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 308 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 309 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 310 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 311 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 313 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 314 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 315 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 316 317 // Floating-point to integer conversions. 318 // RTABI chapter 4.1.2, Table 6 319 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 320 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 321 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 322 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 323 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 324 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 325 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 326 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 327 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 335 336 // Conversions between floating types. 337 // RTABI chapter 4.1.2, Table 7 338 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 339 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 340 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 342 343 // Integer to floating-point conversions. 344 // RTABI chapter 4.1.2, Table 8 345 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 346 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 347 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 348 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 349 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 350 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 351 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 352 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 353 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 358 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 359 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 361 362 // Long long helper functions 363 // RTABI chapter 4.2, Table 9 364 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 365 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 366 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 367 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 368 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 369 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 370 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 371 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 372 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 373 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 376 377 // Integer division functions 378 // RTABI chapter 4.3.1 379 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 380 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 381 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 382 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 383 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 384 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 385 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 387 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 388 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 391 } 392 393 if (Subtarget->isThumb1Only()) 394 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 395 else 396 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 397 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 398 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 399 if (!Subtarget->isFPOnlySP()) 400 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 401 402 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 403 } 404 405 if (Subtarget->hasNEON()) { 406 addDRTypeForNEON(MVT::v2f32); 407 addDRTypeForNEON(MVT::v8i8); 408 addDRTypeForNEON(MVT::v4i16); 409 addDRTypeForNEON(MVT::v2i32); 410 addDRTypeForNEON(MVT::v1i64); 411 412 addQRTypeForNEON(MVT::v4f32); 413 addQRTypeForNEON(MVT::v2f64); 414 addQRTypeForNEON(MVT::v16i8); 415 addQRTypeForNEON(MVT::v8i16); 416 addQRTypeForNEON(MVT::v4i32); 417 addQRTypeForNEON(MVT::v2i64); 418 419 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 420 // neither Neon nor VFP support any arithmetic operations on it. 421 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 422 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 423 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 424 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 425 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 426 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 427 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 428 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 429 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 430 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 431 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 432 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 433 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 434 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 435 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 436 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 437 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 438 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 439 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 440 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 441 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 442 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 443 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 444 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 445 446 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 447 448 // Neon does not support some operations on v1i64 and v2i64 types. 449 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 450 // Custom handling for some quad-vector types to detect VMULL. 451 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 452 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 453 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 454 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 455 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 456 457 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 458 setTargetDAGCombine(ISD::SHL); 459 setTargetDAGCombine(ISD::SRL); 460 setTargetDAGCombine(ISD::SRA); 461 setTargetDAGCombine(ISD::SIGN_EXTEND); 462 setTargetDAGCombine(ISD::ZERO_EXTEND); 463 setTargetDAGCombine(ISD::ANY_EXTEND); 464 setTargetDAGCombine(ISD::SELECT_CC); 465 setTargetDAGCombine(ISD::BUILD_VECTOR); 466 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 467 } 468 469 computeRegisterProperties(); 470 471 // ARM does not have f32 extending load. 472 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 473 474 // ARM does not have i1 sign extending load. 475 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 476 477 // ARM supports all 4 flavors of integer indexed load / store. 478 if (!Subtarget->isThumb1Only()) { 479 for (unsigned im = (unsigned)ISD::PRE_INC; 480 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 481 setIndexedLoadAction(im, MVT::i1, Legal); 482 setIndexedLoadAction(im, MVT::i8, Legal); 483 setIndexedLoadAction(im, MVT::i16, Legal); 484 setIndexedLoadAction(im, MVT::i32, Legal); 485 setIndexedStoreAction(im, MVT::i1, Legal); 486 setIndexedStoreAction(im, MVT::i8, Legal); 487 setIndexedStoreAction(im, MVT::i16, Legal); 488 setIndexedStoreAction(im, MVT::i32, Legal); 489 } 490 } 491 492 // i64 operation support. 493 if (Subtarget->isThumb1Only()) { 494 setOperationAction(ISD::MUL, MVT::i64, Expand); 495 setOperationAction(ISD::MULHU, MVT::i32, Expand); 496 setOperationAction(ISD::MULHS, MVT::i32, Expand); 497 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 498 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 499 } else { 500 setOperationAction(ISD::MUL, MVT::i64, Expand); 501 setOperationAction(ISD::MULHU, MVT::i32, Expand); 502 if (!Subtarget->hasV6Ops()) 503 setOperationAction(ISD::MULHS, MVT::i32, Expand); 504 } 505 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 506 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 507 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 508 setOperationAction(ISD::SRL, MVT::i64, Custom); 509 setOperationAction(ISD::SRA, MVT::i64, Custom); 510 511 // ARM does not have ROTL. 512 setOperationAction(ISD::ROTL, MVT::i32, Expand); 513 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 514 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 515 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 516 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 517 518 // Only ARMv6 has BSWAP. 519 if (!Subtarget->hasV6Ops()) 520 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 521 522 // These are expanded into libcalls. 523 if (!Subtarget->hasDivide()) { 524 // v7M has a hardware divider 525 setOperationAction(ISD::SDIV, MVT::i32, Expand); 526 setOperationAction(ISD::UDIV, MVT::i32, Expand); 527 } 528 setOperationAction(ISD::SREM, MVT::i32, Expand); 529 setOperationAction(ISD::UREM, MVT::i32, Expand); 530 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 531 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 532 533 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 534 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 535 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 536 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 537 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 538 539 setOperationAction(ISD::TRAP, MVT::Other, Legal); 540 541 // Use the default implementation. 542 setOperationAction(ISD::VASTART, MVT::Other, Custom); 543 setOperationAction(ISD::VAARG, MVT::Other, Expand); 544 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 545 setOperationAction(ISD::VAEND, MVT::Other, Expand); 546 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 547 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 548 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 549 // FIXME: Shouldn't need this, since no register is used, but the legalizer 550 // doesn't yet know how to not do that for SjLj. 551 setExceptionSelectorRegister(ARM::R0); 552 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 553 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 554 // the default expansion. 555 if (Subtarget->hasDataBarrier() || 556 (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())) { 557 // membarrier needs custom lowering; the rest are legal and handled 558 // normally. 559 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 560 } else { 561 // Set them all for expansion, which will force libcalls. 562 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 563 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 564 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 565 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 566 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 567 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 568 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 569 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 570 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 571 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 572 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 573 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 574 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 575 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 576 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 577 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 578 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 579 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 580 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 581 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 582 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 583 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 584 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 585 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 586 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 587 // Since the libcalls include locking, fold in the fences 588 setShouldFoldAtomicFences(true); 589 } 590 // 64-bit versions are always libcalls (for now) 591 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 592 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 593 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 594 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 595 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 596 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 597 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 598 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 599 600 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 601 602 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 603 if (!Subtarget->hasV6Ops()) { 604 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 605 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 606 } 607 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 608 609 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 610 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 611 // iff target supports vfp2. 612 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom); 613 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 614 } 615 616 // We want to custom lower some of our intrinsics. 617 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 618 if (Subtarget->isTargetDarwin()) { 619 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 620 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 621 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 622 } 623 624 setOperationAction(ISD::SETCC, MVT::i32, Expand); 625 setOperationAction(ISD::SETCC, MVT::f32, Expand); 626 setOperationAction(ISD::SETCC, MVT::f64, Expand); 627 setOperationAction(ISD::SELECT, MVT::i32, Custom); 628 setOperationAction(ISD::SELECT, MVT::f32, Custom); 629 setOperationAction(ISD::SELECT, MVT::f64, Custom); 630 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 631 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 632 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 633 634 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 635 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 636 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 637 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 638 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 639 640 // We don't support sin/cos/fmod/copysign/pow 641 setOperationAction(ISD::FSIN, MVT::f64, Expand); 642 setOperationAction(ISD::FSIN, MVT::f32, Expand); 643 setOperationAction(ISD::FCOS, MVT::f32, Expand); 644 setOperationAction(ISD::FCOS, MVT::f64, Expand); 645 setOperationAction(ISD::FREM, MVT::f64, Expand); 646 setOperationAction(ISD::FREM, MVT::f32, Expand); 647 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 648 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 649 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 650 } 651 setOperationAction(ISD::FPOW, MVT::f64, Expand); 652 setOperationAction(ISD::FPOW, MVT::f32, Expand); 653 654 // Various VFP goodness 655 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 656 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 657 if (Subtarget->hasVFP2()) { 658 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 659 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 660 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 661 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 662 } 663 // Special handling for half-precision FP. 664 if (!Subtarget->hasFP16()) { 665 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 666 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 667 } 668 } 669 670 // We have target-specific dag combine patterns for the following nodes: 671 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 672 setTargetDAGCombine(ISD::ADD); 673 setTargetDAGCombine(ISD::SUB); 674 setTargetDAGCombine(ISD::MUL); 675 676 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 677 setTargetDAGCombine(ISD::OR); 678 if (Subtarget->hasNEON()) 679 setTargetDAGCombine(ISD::AND); 680 681 setStackPointerRegisterToSaveRestore(ARM::SP); 682 683 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 684 setSchedulingPreference(Sched::RegPressure); 685 else 686 setSchedulingPreference(Sched::Hybrid); 687 688 maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type 689 690 // On ARM arguments smaller than 4 bytes are extended, so all arguments 691 // are at least 4 bytes aligned. 692 setMinStackArgumentAlignment(4); 693 694 benefitFromCodePlacementOpt = true; 695} 696 697std::pair<const TargetRegisterClass*, uint8_t> 698ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 699 const TargetRegisterClass *RRC = 0; 700 uint8_t Cost = 1; 701 switch (VT.getSimpleVT().SimpleTy) { 702 default: 703 return TargetLowering::findRepresentativeClass(VT); 704 // Use DPR as representative register class for all floating point 705 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 706 // the cost is 1 for both f32 and f64. 707 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 708 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 709 RRC = ARM::DPRRegisterClass; 710 break; 711 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 712 case MVT::v4f32: case MVT::v2f64: 713 RRC = ARM::DPRRegisterClass; 714 Cost = 2; 715 break; 716 case MVT::v4i64: 717 RRC = ARM::DPRRegisterClass; 718 Cost = 4; 719 break; 720 case MVT::v8i64: 721 RRC = ARM::DPRRegisterClass; 722 Cost = 8; 723 break; 724 } 725 return std::make_pair(RRC, Cost); 726} 727 728const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 729 switch (Opcode) { 730 default: return 0; 731 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 732 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 733 case ARMISD::CALL: return "ARMISD::CALL"; 734 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 735 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 736 case ARMISD::tCALL: return "ARMISD::tCALL"; 737 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 738 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 739 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 740 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 741 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 742 case ARMISD::CMP: return "ARMISD::CMP"; 743 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 744 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 745 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 746 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 747 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 748 case ARMISD::CMOV: return "ARMISD::CMOV"; 749 case ARMISD::CNEG: return "ARMISD::CNEG"; 750 751 case ARMISD::RBIT: return "ARMISD::RBIT"; 752 753 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 754 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 755 case ARMISD::SITOF: return "ARMISD::SITOF"; 756 case ARMISD::UITOF: return "ARMISD::UITOF"; 757 758 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 759 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 760 case ARMISD::RRX: return "ARMISD::RRX"; 761 762 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 763 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 764 765 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 766 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 767 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 768 769 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 770 771 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 772 773 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 774 775 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 776 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 777 778 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 779 780 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 781 case ARMISD::VCGE: return "ARMISD::VCGE"; 782 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 783 case ARMISD::VCGT: return "ARMISD::VCGT"; 784 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 785 case ARMISD::VTST: return "ARMISD::VTST"; 786 787 case ARMISD::VSHL: return "ARMISD::VSHL"; 788 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 789 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 790 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 791 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 792 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 793 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 794 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 795 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 796 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 797 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 798 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 799 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 800 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 801 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 802 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 803 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 804 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 805 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 806 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 807 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 808 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 809 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 810 case ARMISD::VDUP: return "ARMISD::VDUP"; 811 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 812 case ARMISD::VEXT: return "ARMISD::VEXT"; 813 case ARMISD::VREV64: return "ARMISD::VREV64"; 814 case ARMISD::VREV32: return "ARMISD::VREV32"; 815 case ARMISD::VREV16: return "ARMISD::VREV16"; 816 case ARMISD::VZIP: return "ARMISD::VZIP"; 817 case ARMISD::VUZP: return "ARMISD::VUZP"; 818 case ARMISD::VTRN: return "ARMISD::VTRN"; 819 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 820 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 821 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 822 case ARMISD::FMAX: return "ARMISD::FMAX"; 823 case ARMISD::FMIN: return "ARMISD::FMIN"; 824 case ARMISD::BFI: return "ARMISD::BFI"; 825 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 826 } 827} 828 829/// getRegClassFor - Return the register class that should be used for the 830/// specified value type. 831TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 832 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 833 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 834 // load / store 4 to 8 consecutive D registers. 835 if (Subtarget->hasNEON()) { 836 if (VT == MVT::v4i64) 837 return ARM::QQPRRegisterClass; 838 else if (VT == MVT::v8i64) 839 return ARM::QQQQPRRegisterClass; 840 } 841 return TargetLowering::getRegClassFor(VT); 842} 843 844// Create a fast isel object. 845FastISel * 846ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 847 return ARM::createFastISel(funcInfo); 848} 849 850/// getFunctionAlignment - Return the Log2 alignment of this function. 851unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 852 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 853} 854 855/// getMaximalGlobalOffset - Returns the maximal possible offset which can 856/// be used for loads / stores from the global. 857unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 858 return (Subtarget->isThumb1Only() ? 127 : 4095); 859} 860 861Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 862 unsigned NumVals = N->getNumValues(); 863 if (!NumVals) 864 return Sched::RegPressure; 865 866 for (unsigned i = 0; i != NumVals; ++i) { 867 EVT VT = N->getValueType(i); 868 if (VT == MVT::Flag || VT == MVT::Other) 869 continue; 870 if (VT.isFloatingPoint() || VT.isVector()) 871 return Sched::Latency; 872 } 873 874 if (!N->isMachineOpcode()) 875 return Sched::RegPressure; 876 877 // Load are scheduled for latency even if there instruction itinerary 878 // is not available. 879 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 880 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 881 882 if (TID.getNumDefs() == 0) 883 return Sched::RegPressure; 884 if (!Itins->isEmpty() && 885 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 886 return Sched::Latency; 887 888 return Sched::RegPressure; 889} 890 891unsigned 892ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 893 MachineFunction &MF) const { 894 switch (RC->getID()) { 895 default: 896 return 0; 897 case ARM::tGPRRegClassID: 898 return RegInfo->hasFP(MF) ? 4 : 5; 899 case ARM::GPRRegClassID: { 900 unsigned FP = RegInfo->hasFP(MF) ? 1 : 0; 901 return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0); 902 } 903 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 904 case ARM::DPRRegClassID: 905 return 32 - 10; 906 } 907} 908 909//===----------------------------------------------------------------------===// 910// Lowering Code 911//===----------------------------------------------------------------------===// 912 913/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 914static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 915 switch (CC) { 916 default: llvm_unreachable("Unknown condition code!"); 917 case ISD::SETNE: return ARMCC::NE; 918 case ISD::SETEQ: return ARMCC::EQ; 919 case ISD::SETGT: return ARMCC::GT; 920 case ISD::SETGE: return ARMCC::GE; 921 case ISD::SETLT: return ARMCC::LT; 922 case ISD::SETLE: return ARMCC::LE; 923 case ISD::SETUGT: return ARMCC::HI; 924 case ISD::SETUGE: return ARMCC::HS; 925 case ISD::SETULT: return ARMCC::LO; 926 case ISD::SETULE: return ARMCC::LS; 927 } 928} 929 930/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 931static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 932 ARMCC::CondCodes &CondCode2) { 933 CondCode2 = ARMCC::AL; 934 switch (CC) { 935 default: llvm_unreachable("Unknown FP condition!"); 936 case ISD::SETEQ: 937 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 938 case ISD::SETGT: 939 case ISD::SETOGT: CondCode = ARMCC::GT; break; 940 case ISD::SETGE: 941 case ISD::SETOGE: CondCode = ARMCC::GE; break; 942 case ISD::SETOLT: CondCode = ARMCC::MI; break; 943 case ISD::SETOLE: CondCode = ARMCC::LS; break; 944 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 945 case ISD::SETO: CondCode = ARMCC::VC; break; 946 case ISD::SETUO: CondCode = ARMCC::VS; break; 947 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 948 case ISD::SETUGT: CondCode = ARMCC::HI; break; 949 case ISD::SETUGE: CondCode = ARMCC::PL; break; 950 case ISD::SETLT: 951 case ISD::SETULT: CondCode = ARMCC::LT; break; 952 case ISD::SETLE: 953 case ISD::SETULE: CondCode = ARMCC::LE; break; 954 case ISD::SETNE: 955 case ISD::SETUNE: CondCode = ARMCC::NE; break; 956 } 957} 958 959//===----------------------------------------------------------------------===// 960// Calling Convention Implementation 961//===----------------------------------------------------------------------===// 962 963#include "ARMGenCallingConv.inc" 964 965/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 966/// given CallingConvention value. 967CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 968 bool Return, 969 bool isVarArg) const { 970 switch (CC) { 971 default: 972 llvm_unreachable("Unsupported calling convention"); 973 case CallingConv::Fast: 974 if (Subtarget->hasVFP2() && !isVarArg) { 975 if (!Subtarget->isAAPCS_ABI()) 976 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 977 // For AAPCS ABI targets, just use VFP variant of the calling convention. 978 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 979 } 980 // Fallthrough 981 case CallingConv::C: { 982 // Use target triple & subtarget features to do actual dispatch. 983 if (!Subtarget->isAAPCS_ABI()) 984 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 985 else if (Subtarget->hasVFP2() && 986 FloatABIType == FloatABI::Hard && !isVarArg) 987 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 988 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 989 } 990 case CallingConv::ARM_AAPCS_VFP: 991 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 992 case CallingConv::ARM_AAPCS: 993 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 994 case CallingConv::ARM_APCS: 995 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 996 } 997} 998 999/// LowerCallResult - Lower the result values of a call into the 1000/// appropriate copies out of appropriate physical registers. 1001SDValue 1002ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1003 CallingConv::ID CallConv, bool isVarArg, 1004 const SmallVectorImpl<ISD::InputArg> &Ins, 1005 DebugLoc dl, SelectionDAG &DAG, 1006 SmallVectorImpl<SDValue> &InVals) const { 1007 1008 // Assign locations to each value returned by this call. 1009 SmallVector<CCValAssign, 16> RVLocs; 1010 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1011 RVLocs, *DAG.getContext()); 1012 CCInfo.AnalyzeCallResult(Ins, 1013 CCAssignFnForNode(CallConv, /* Return*/ true, 1014 isVarArg)); 1015 1016 // Copy all of the result registers out of their specified physreg. 1017 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1018 CCValAssign VA = RVLocs[i]; 1019 1020 SDValue Val; 1021 if (VA.needsCustom()) { 1022 // Handle f64 or half of a v2f64. 1023 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1024 InFlag); 1025 Chain = Lo.getValue(1); 1026 InFlag = Lo.getValue(2); 1027 VA = RVLocs[++i]; // skip ahead to next loc 1028 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1029 InFlag); 1030 Chain = Hi.getValue(1); 1031 InFlag = Hi.getValue(2); 1032 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1033 1034 if (VA.getLocVT() == MVT::v2f64) { 1035 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1036 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1037 DAG.getConstant(0, MVT::i32)); 1038 1039 VA = RVLocs[++i]; // skip ahead to next loc 1040 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1041 Chain = Lo.getValue(1); 1042 InFlag = Lo.getValue(2); 1043 VA = RVLocs[++i]; // skip ahead to next loc 1044 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1045 Chain = Hi.getValue(1); 1046 InFlag = Hi.getValue(2); 1047 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1048 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1049 DAG.getConstant(1, MVT::i32)); 1050 } 1051 } else { 1052 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1053 InFlag); 1054 Chain = Val.getValue(1); 1055 InFlag = Val.getValue(2); 1056 } 1057 1058 switch (VA.getLocInfo()) { 1059 default: llvm_unreachable("Unknown loc info!"); 1060 case CCValAssign::Full: break; 1061 case CCValAssign::BCvt: 1062 Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val); 1063 break; 1064 } 1065 1066 InVals.push_back(Val); 1067 } 1068 1069 return Chain; 1070} 1071 1072/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1073/// by "Src" to address "Dst" of size "Size". Alignment information is 1074/// specified by the specific parameter attribute. The copy will be passed as 1075/// a byval function parameter. 1076/// Sometimes what we are copying is the end of a larger object, the part that 1077/// does not fit in registers. 1078static SDValue 1079CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1080 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1081 DebugLoc dl) { 1082 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1083 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1084 /*isVolatile=*/false, /*AlwaysInline=*/false, 1085 MachinePointerInfo(0), MachinePointerInfo(0)); 1086} 1087 1088/// LowerMemOpCallTo - Store the argument to the stack. 1089SDValue 1090ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1091 SDValue StackPtr, SDValue Arg, 1092 DebugLoc dl, SelectionDAG &DAG, 1093 const CCValAssign &VA, 1094 ISD::ArgFlagsTy Flags) const { 1095 unsigned LocMemOffset = VA.getLocMemOffset(); 1096 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1097 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1098 if (Flags.isByVal()) 1099 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1100 1101 return DAG.getStore(Chain, dl, Arg, PtrOff, 1102 MachinePointerInfo::getStack(LocMemOffset), 1103 false, false, 0); 1104} 1105 1106void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1107 SDValue Chain, SDValue &Arg, 1108 RegsToPassVector &RegsToPass, 1109 CCValAssign &VA, CCValAssign &NextVA, 1110 SDValue &StackPtr, 1111 SmallVector<SDValue, 8> &MemOpChains, 1112 ISD::ArgFlagsTy Flags) const { 1113 1114 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1115 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1116 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1117 1118 if (NextVA.isRegLoc()) 1119 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1120 else { 1121 assert(NextVA.isMemLoc()); 1122 if (StackPtr.getNode() == 0) 1123 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1124 1125 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1126 dl, DAG, NextVA, 1127 Flags)); 1128 } 1129} 1130 1131/// LowerCall - Lowering a call into a callseq_start <- 1132/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1133/// nodes. 1134SDValue 1135ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1136 CallingConv::ID CallConv, bool isVarArg, 1137 bool &isTailCall, 1138 const SmallVectorImpl<ISD::OutputArg> &Outs, 1139 const SmallVectorImpl<SDValue> &OutVals, 1140 const SmallVectorImpl<ISD::InputArg> &Ins, 1141 DebugLoc dl, SelectionDAG &DAG, 1142 SmallVectorImpl<SDValue> &InVals) const { 1143 MachineFunction &MF = DAG.getMachineFunction(); 1144 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1145 bool IsSibCall = false; 1146 // Temporarily disable tail calls so things don't break. 1147 if (!EnableARMTailCalls) 1148 isTailCall = false; 1149 if (isTailCall) { 1150 // Check if it's really possible to do a tail call. 1151 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1152 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1153 Outs, OutVals, Ins, DAG); 1154 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1155 // detected sibcalls. 1156 if (isTailCall) { 1157 ++NumTailCalls; 1158 IsSibCall = true; 1159 } 1160 } 1161 1162 // Analyze operands of the call, assigning locations to each operand. 1163 SmallVector<CCValAssign, 16> ArgLocs; 1164 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1165 *DAG.getContext()); 1166 CCInfo.AnalyzeCallOperands(Outs, 1167 CCAssignFnForNode(CallConv, /* Return*/ false, 1168 isVarArg)); 1169 1170 // Get a count of how many bytes are to be pushed on the stack. 1171 unsigned NumBytes = CCInfo.getNextStackOffset(); 1172 1173 // For tail calls, memory operands are available in our caller's stack. 1174 if (IsSibCall) 1175 NumBytes = 0; 1176 1177 // Adjust the stack pointer for the new arguments... 1178 // These operations are automatically eliminated by the prolog/epilog pass 1179 if (!IsSibCall) 1180 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1181 1182 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1183 1184 RegsToPassVector RegsToPass; 1185 SmallVector<SDValue, 8> MemOpChains; 1186 1187 // Walk the register/memloc assignments, inserting copies/loads. In the case 1188 // of tail call optimization, arguments are handled later. 1189 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1190 i != e; 1191 ++i, ++realArgIdx) { 1192 CCValAssign &VA = ArgLocs[i]; 1193 SDValue Arg = OutVals[realArgIdx]; 1194 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1195 1196 // Promote the value if needed. 1197 switch (VA.getLocInfo()) { 1198 default: llvm_unreachable("Unknown loc info!"); 1199 case CCValAssign::Full: break; 1200 case CCValAssign::SExt: 1201 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1202 break; 1203 case CCValAssign::ZExt: 1204 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1205 break; 1206 case CCValAssign::AExt: 1207 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1208 break; 1209 case CCValAssign::BCvt: 1210 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1211 break; 1212 } 1213 1214 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1215 if (VA.needsCustom()) { 1216 if (VA.getLocVT() == MVT::v2f64) { 1217 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1218 DAG.getConstant(0, MVT::i32)); 1219 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1220 DAG.getConstant(1, MVT::i32)); 1221 1222 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1223 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1224 1225 VA = ArgLocs[++i]; // skip ahead to next loc 1226 if (VA.isRegLoc()) { 1227 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1228 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1229 } else { 1230 assert(VA.isMemLoc()); 1231 1232 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1233 dl, DAG, VA, Flags)); 1234 } 1235 } else { 1236 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1237 StackPtr, MemOpChains, Flags); 1238 } 1239 } else if (VA.isRegLoc()) { 1240 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1241 } else if (!IsSibCall) { 1242 assert(VA.isMemLoc()); 1243 1244 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1245 dl, DAG, VA, Flags)); 1246 } 1247 } 1248 1249 if (!MemOpChains.empty()) 1250 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1251 &MemOpChains[0], MemOpChains.size()); 1252 1253 // Build a sequence of copy-to-reg nodes chained together with token chain 1254 // and flag operands which copy the outgoing args into the appropriate regs. 1255 SDValue InFlag; 1256 // Tail call byval lowering might overwrite argument registers so in case of 1257 // tail call optimization the copies to registers are lowered later. 1258 if (!isTailCall) 1259 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1260 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1261 RegsToPass[i].second, InFlag); 1262 InFlag = Chain.getValue(1); 1263 } 1264 1265 // For tail calls lower the arguments to the 'real' stack slot. 1266 if (isTailCall) { 1267 // Force all the incoming stack arguments to be loaded from the stack 1268 // before any new outgoing arguments are stored to the stack, because the 1269 // outgoing stack slots may alias the incoming argument stack slots, and 1270 // the alias isn't otherwise explicit. This is slightly more conservative 1271 // than necessary, because it means that each store effectively depends 1272 // on every argument instead of just those arguments it would clobber. 1273 1274 // Do not flag preceeding copytoreg stuff together with the following stuff. 1275 InFlag = SDValue(); 1276 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1277 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1278 RegsToPass[i].second, InFlag); 1279 InFlag = Chain.getValue(1); 1280 } 1281 InFlag =SDValue(); 1282 } 1283 1284 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1285 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1286 // node so that legalize doesn't hack it. 1287 bool isDirect = false; 1288 bool isARMFunc = false; 1289 bool isLocalARMFunc = false; 1290 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1291 1292 if (EnableARMLongCalls) { 1293 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1294 && "long-calls with non-static relocation model!"); 1295 // Handle a global address or an external symbol. If it's not one of 1296 // those, the target's already in a register, so we don't need to do 1297 // anything extra. 1298 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1299 const GlobalValue *GV = G->getGlobal(); 1300 // Create a constant pool entry for the callee address 1301 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1302 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1303 ARMPCLabelIndex, 1304 ARMCP::CPValue, 0); 1305 // Get the address of the callee into a register 1306 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1307 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1308 Callee = DAG.getLoad(getPointerTy(), dl, 1309 DAG.getEntryNode(), CPAddr, 1310 MachinePointerInfo::getConstantPool(), 1311 false, false, 0); 1312 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1313 const char *Sym = S->getSymbol(); 1314 1315 // Create a constant pool entry for the callee address 1316 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1317 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1318 Sym, ARMPCLabelIndex, 0); 1319 // Get the address of the callee into a register 1320 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1321 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1322 Callee = DAG.getLoad(getPointerTy(), dl, 1323 DAG.getEntryNode(), CPAddr, 1324 MachinePointerInfo::getConstantPool(), 1325 false, false, 0); 1326 } 1327 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1328 const GlobalValue *GV = G->getGlobal(); 1329 isDirect = true; 1330 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1331 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1332 getTargetMachine().getRelocationModel() != Reloc::Static; 1333 isARMFunc = !Subtarget->isThumb() || isStub; 1334 // ARM call to a local ARM function is predicable. 1335 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1336 // tBX takes a register source operand. 1337 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1338 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1339 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1340 ARMPCLabelIndex, 1341 ARMCP::CPValue, 4); 1342 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1343 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1344 Callee = DAG.getLoad(getPointerTy(), dl, 1345 DAG.getEntryNode(), CPAddr, 1346 MachinePointerInfo::getConstantPool(), 1347 false, false, 0); 1348 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1349 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1350 getPointerTy(), Callee, PICLabel); 1351 } else { 1352 // On ELF targets for PIC code, direct calls should go through the PLT 1353 unsigned OpFlags = 0; 1354 if (Subtarget->isTargetELF() && 1355 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1356 OpFlags = ARMII::MO_PLT; 1357 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1358 } 1359 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1360 isDirect = true; 1361 bool isStub = Subtarget->isTargetDarwin() && 1362 getTargetMachine().getRelocationModel() != Reloc::Static; 1363 isARMFunc = !Subtarget->isThumb() || isStub; 1364 // tBX takes a register source operand. 1365 const char *Sym = S->getSymbol(); 1366 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1367 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1368 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1369 Sym, ARMPCLabelIndex, 4); 1370 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1371 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1372 Callee = DAG.getLoad(getPointerTy(), dl, 1373 DAG.getEntryNode(), CPAddr, 1374 MachinePointerInfo::getConstantPool(), 1375 false, false, 0); 1376 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1377 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1378 getPointerTy(), Callee, PICLabel); 1379 } else { 1380 unsigned OpFlags = 0; 1381 // On ELF targets for PIC code, direct calls should go through the PLT 1382 if (Subtarget->isTargetELF() && 1383 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1384 OpFlags = ARMII::MO_PLT; 1385 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1386 } 1387 } 1388 1389 // FIXME: handle tail calls differently. 1390 unsigned CallOpc; 1391 if (Subtarget->isThumb()) { 1392 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1393 CallOpc = ARMISD::CALL_NOLINK; 1394 else 1395 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1396 } else { 1397 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1398 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1399 : ARMISD::CALL_NOLINK; 1400 } 1401 1402 std::vector<SDValue> Ops; 1403 Ops.push_back(Chain); 1404 Ops.push_back(Callee); 1405 1406 // Add argument registers to the end of the list so that they are known live 1407 // into the call. 1408 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1409 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1410 RegsToPass[i].second.getValueType())); 1411 1412 if (InFlag.getNode()) 1413 Ops.push_back(InFlag); 1414 1415 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1416 if (isTailCall) 1417 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1418 1419 // Returns a chain and a flag for retval copy to use. 1420 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1421 InFlag = Chain.getValue(1); 1422 1423 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1424 DAG.getIntPtrConstant(0, true), InFlag); 1425 if (!Ins.empty()) 1426 InFlag = Chain.getValue(1); 1427 1428 // Handle result values, copying them out of physregs into vregs that we 1429 // return. 1430 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1431 dl, DAG, InVals); 1432} 1433 1434/// MatchingStackOffset - Return true if the given stack call argument is 1435/// already available in the same position (relatively) of the caller's 1436/// incoming argument stack. 1437static 1438bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1439 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1440 const ARMInstrInfo *TII) { 1441 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1442 int FI = INT_MAX; 1443 if (Arg.getOpcode() == ISD::CopyFromReg) { 1444 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1445 if (!VR || TargetRegisterInfo::isPhysicalRegister(VR)) 1446 return false; 1447 MachineInstr *Def = MRI->getVRegDef(VR); 1448 if (!Def) 1449 return false; 1450 if (!Flags.isByVal()) { 1451 if (!TII->isLoadFromStackSlot(Def, FI)) 1452 return false; 1453 } else { 1454 return false; 1455 } 1456 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1457 if (Flags.isByVal()) 1458 // ByVal argument is passed in as a pointer but it's now being 1459 // dereferenced. e.g. 1460 // define @foo(%struct.X* %A) { 1461 // tail call @bar(%struct.X* byval %A) 1462 // } 1463 return false; 1464 SDValue Ptr = Ld->getBasePtr(); 1465 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1466 if (!FINode) 1467 return false; 1468 FI = FINode->getIndex(); 1469 } else 1470 return false; 1471 1472 assert(FI != INT_MAX); 1473 if (!MFI->isFixedObjectIndex(FI)) 1474 return false; 1475 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1476} 1477 1478/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1479/// for tail call optimization. Targets which want to do tail call 1480/// optimization should implement this function. 1481bool 1482ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1483 CallingConv::ID CalleeCC, 1484 bool isVarArg, 1485 bool isCalleeStructRet, 1486 bool isCallerStructRet, 1487 const SmallVectorImpl<ISD::OutputArg> &Outs, 1488 const SmallVectorImpl<SDValue> &OutVals, 1489 const SmallVectorImpl<ISD::InputArg> &Ins, 1490 SelectionDAG& DAG) const { 1491 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1492 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1493 bool CCMatch = CallerCC == CalleeCC; 1494 1495 // Look for obvious safe cases to perform tail call optimization that do not 1496 // require ABI changes. This is what gcc calls sibcall. 1497 1498 // Do not sibcall optimize vararg calls unless the call site is not passing 1499 // any arguments. 1500 if (isVarArg && !Outs.empty()) 1501 return false; 1502 1503 // Also avoid sibcall optimization if either caller or callee uses struct 1504 // return semantics. 1505 if (isCalleeStructRet || isCallerStructRet) 1506 return false; 1507 1508 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1509 // emitEpilogue is not ready for them. 1510 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1511 // LR. This means if we need to reload LR, it takes an extra instructions, 1512 // which outweighs the value of the tail call; but here we don't know yet 1513 // whether LR is going to be used. Probably the right approach is to 1514 // generate the tail call here and turn it back into CALL/RET in 1515 // emitEpilogue if LR is used. 1516 if (Subtarget->isThumb1Only()) 1517 return false; 1518 1519 // For the moment, we can only do this to functions defined in this 1520 // compilation, or to indirect calls. A Thumb B to an ARM function, 1521 // or vice versa, is not easily fixed up in the linker unlike BL. 1522 // (We could do this by loading the address of the callee into a register; 1523 // that is an extra instruction over the direct call and burns a register 1524 // as well, so is not likely to be a win.) 1525 1526 // It might be safe to remove this restriction on non-Darwin. 1527 1528 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1529 // but we need to make sure there are enough registers; the only valid 1530 // registers are the 4 used for parameters. We don't currently do this 1531 // case. 1532 if (isa<ExternalSymbolSDNode>(Callee)) 1533 return false; 1534 1535 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1536 const GlobalValue *GV = G->getGlobal(); 1537 if (GV->isDeclaration() || GV->isWeakForLinker()) 1538 return false; 1539 } 1540 1541 // If the calling conventions do not match, then we'd better make sure the 1542 // results are returned in the same way as what the caller expects. 1543 if (!CCMatch) { 1544 SmallVector<CCValAssign, 16> RVLocs1; 1545 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1546 RVLocs1, *DAG.getContext()); 1547 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1548 1549 SmallVector<CCValAssign, 16> RVLocs2; 1550 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1551 RVLocs2, *DAG.getContext()); 1552 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1553 1554 if (RVLocs1.size() != RVLocs2.size()) 1555 return false; 1556 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1557 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1558 return false; 1559 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1560 return false; 1561 if (RVLocs1[i].isRegLoc()) { 1562 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1563 return false; 1564 } else { 1565 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1566 return false; 1567 } 1568 } 1569 } 1570 1571 // If the callee takes no arguments then go on to check the results of the 1572 // call. 1573 if (!Outs.empty()) { 1574 // Check if stack adjustment is needed. For now, do not do this if any 1575 // argument is passed on the stack. 1576 SmallVector<CCValAssign, 16> ArgLocs; 1577 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1578 ArgLocs, *DAG.getContext()); 1579 CCInfo.AnalyzeCallOperands(Outs, 1580 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1581 if (CCInfo.getNextStackOffset()) { 1582 MachineFunction &MF = DAG.getMachineFunction(); 1583 1584 // Check if the arguments are already laid out in the right way as 1585 // the caller's fixed stack objects. 1586 MachineFrameInfo *MFI = MF.getFrameInfo(); 1587 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1588 const ARMInstrInfo *TII = 1589 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1590 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1591 i != e; 1592 ++i, ++realArgIdx) { 1593 CCValAssign &VA = ArgLocs[i]; 1594 EVT RegVT = VA.getLocVT(); 1595 SDValue Arg = OutVals[realArgIdx]; 1596 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1597 if (VA.getLocInfo() == CCValAssign::Indirect) 1598 return false; 1599 if (VA.needsCustom()) { 1600 // f64 and vector types are split into multiple registers or 1601 // register/stack-slot combinations. The types will not match 1602 // the registers; give up on memory f64 refs until we figure 1603 // out what to do about this. 1604 if (!VA.isRegLoc()) 1605 return false; 1606 if (!ArgLocs[++i].isRegLoc()) 1607 return false; 1608 if (RegVT == MVT::v2f64) { 1609 if (!ArgLocs[++i].isRegLoc()) 1610 return false; 1611 if (!ArgLocs[++i].isRegLoc()) 1612 return false; 1613 } 1614 } else if (!VA.isRegLoc()) { 1615 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1616 MFI, MRI, TII)) 1617 return false; 1618 } 1619 } 1620 } 1621 } 1622 1623 return true; 1624} 1625 1626SDValue 1627ARMTargetLowering::LowerReturn(SDValue Chain, 1628 CallingConv::ID CallConv, bool isVarArg, 1629 const SmallVectorImpl<ISD::OutputArg> &Outs, 1630 const SmallVectorImpl<SDValue> &OutVals, 1631 DebugLoc dl, SelectionDAG &DAG) const { 1632 1633 // CCValAssign - represent the assignment of the return value to a location. 1634 SmallVector<CCValAssign, 16> RVLocs; 1635 1636 // CCState - Info about the registers and stack slots. 1637 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1638 *DAG.getContext()); 1639 1640 // Analyze outgoing return values. 1641 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1642 isVarArg)); 1643 1644 // If this is the first return lowered for this function, add 1645 // the regs to the liveout set for the function. 1646 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1647 for (unsigned i = 0; i != RVLocs.size(); ++i) 1648 if (RVLocs[i].isRegLoc()) 1649 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1650 } 1651 1652 SDValue Flag; 1653 1654 // Copy the result values into the output registers. 1655 for (unsigned i = 0, realRVLocIdx = 0; 1656 i != RVLocs.size(); 1657 ++i, ++realRVLocIdx) { 1658 CCValAssign &VA = RVLocs[i]; 1659 assert(VA.isRegLoc() && "Can only return in registers!"); 1660 1661 SDValue Arg = OutVals[realRVLocIdx]; 1662 1663 switch (VA.getLocInfo()) { 1664 default: llvm_unreachable("Unknown loc info!"); 1665 case CCValAssign::Full: break; 1666 case CCValAssign::BCvt: 1667 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1668 break; 1669 } 1670 1671 if (VA.needsCustom()) { 1672 if (VA.getLocVT() == MVT::v2f64) { 1673 // Extract the first half and return it in two registers. 1674 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1675 DAG.getConstant(0, MVT::i32)); 1676 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1677 DAG.getVTList(MVT::i32, MVT::i32), Half); 1678 1679 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1680 Flag = Chain.getValue(1); 1681 VA = RVLocs[++i]; // skip ahead to next loc 1682 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1683 HalfGPRs.getValue(1), Flag); 1684 Flag = Chain.getValue(1); 1685 VA = RVLocs[++i]; // skip ahead to next loc 1686 1687 // Extract the 2nd half and fall through to handle it as an f64 value. 1688 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1689 DAG.getConstant(1, MVT::i32)); 1690 } 1691 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1692 // available. 1693 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1694 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1695 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1696 Flag = Chain.getValue(1); 1697 VA = RVLocs[++i]; // skip ahead to next loc 1698 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1699 Flag); 1700 } else 1701 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1702 1703 // Guarantee that all emitted copies are 1704 // stuck together, avoiding something bad. 1705 Flag = Chain.getValue(1); 1706 } 1707 1708 SDValue result; 1709 if (Flag.getNode()) 1710 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1711 else // Return Void 1712 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1713 1714 return result; 1715} 1716 1717// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1718// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1719// one of the above mentioned nodes. It has to be wrapped because otherwise 1720// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1721// be used to form addressing mode. These wrapped nodes will be selected 1722// into MOVi. 1723static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1724 EVT PtrVT = Op.getValueType(); 1725 // FIXME there is no actual debug info here 1726 DebugLoc dl = Op.getDebugLoc(); 1727 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1728 SDValue Res; 1729 if (CP->isMachineConstantPoolEntry()) 1730 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1731 CP->getAlignment()); 1732 else 1733 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1734 CP->getAlignment()); 1735 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1736} 1737 1738unsigned ARMTargetLowering::getJumpTableEncoding() const { 1739 return MachineJumpTableInfo::EK_Inline; 1740} 1741 1742SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1743 SelectionDAG &DAG) const { 1744 MachineFunction &MF = DAG.getMachineFunction(); 1745 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1746 unsigned ARMPCLabelIndex = 0; 1747 DebugLoc DL = Op.getDebugLoc(); 1748 EVT PtrVT = getPointerTy(); 1749 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1750 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1751 SDValue CPAddr; 1752 if (RelocM == Reloc::Static) { 1753 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1754 } else { 1755 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1756 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1757 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1758 ARMCP::CPBlockAddress, 1759 PCAdj); 1760 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1761 } 1762 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1763 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1764 MachinePointerInfo::getConstantPool(), 1765 false, false, 0); 1766 if (RelocM == Reloc::Static) 1767 return Result; 1768 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1769 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1770} 1771 1772// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1773SDValue 1774ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1775 SelectionDAG &DAG) const { 1776 DebugLoc dl = GA->getDebugLoc(); 1777 EVT PtrVT = getPointerTy(); 1778 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1779 MachineFunction &MF = DAG.getMachineFunction(); 1780 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1781 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1782 ARMConstantPoolValue *CPV = 1783 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1784 ARMCP::CPValue, PCAdj, "tlsgd", true); 1785 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1786 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1787 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1788 MachinePointerInfo::getConstantPool(), 1789 false, false, 0); 1790 SDValue Chain = Argument.getValue(1); 1791 1792 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1793 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1794 1795 // call __tls_get_addr. 1796 ArgListTy Args; 1797 ArgListEntry Entry; 1798 Entry.Node = Argument; 1799 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1800 Args.push_back(Entry); 1801 // FIXME: is there useful debug info available here? 1802 std::pair<SDValue, SDValue> CallResult = 1803 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1804 false, false, false, false, 1805 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1806 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1807 return CallResult.first; 1808} 1809 1810// Lower ISD::GlobalTLSAddress using the "initial exec" or 1811// "local exec" model. 1812SDValue 1813ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1814 SelectionDAG &DAG) const { 1815 const GlobalValue *GV = GA->getGlobal(); 1816 DebugLoc dl = GA->getDebugLoc(); 1817 SDValue Offset; 1818 SDValue Chain = DAG.getEntryNode(); 1819 EVT PtrVT = getPointerTy(); 1820 // Get the Thread Pointer 1821 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1822 1823 if (GV->isDeclaration()) { 1824 MachineFunction &MF = DAG.getMachineFunction(); 1825 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1826 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1827 // Initial exec model. 1828 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1829 ARMConstantPoolValue *CPV = 1830 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1831 ARMCP::CPValue, PCAdj, "gottpoff", true); 1832 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1833 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1834 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1835 MachinePointerInfo::getConstantPool(), 1836 false, false, 0); 1837 Chain = Offset.getValue(1); 1838 1839 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1840 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1841 1842 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1843 MachinePointerInfo::getConstantPool(), 1844 false, false, 0); 1845 } else { 1846 // local exec model 1847 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff"); 1848 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1849 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1850 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1851 MachinePointerInfo::getConstantPool(), 1852 false, false, 0); 1853 } 1854 1855 // The address of the thread local variable is the add of the thread 1856 // pointer with the offset of the variable. 1857 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1858} 1859 1860SDValue 1861ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1862 // TODO: implement the "local dynamic" model 1863 assert(Subtarget->isTargetELF() && 1864 "TLS not implemented for non-ELF targets"); 1865 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1866 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1867 // otherwise use the "Local Exec" TLS Model 1868 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1869 return LowerToTLSGeneralDynamicModel(GA, DAG); 1870 else 1871 return LowerToTLSExecModels(GA, DAG); 1872} 1873 1874SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1875 SelectionDAG &DAG) const { 1876 EVT PtrVT = getPointerTy(); 1877 DebugLoc dl = Op.getDebugLoc(); 1878 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1879 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1880 if (RelocM == Reloc::PIC_) { 1881 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1882 ARMConstantPoolValue *CPV = 1883 new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT"); 1884 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1885 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1886 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1887 CPAddr, 1888 MachinePointerInfo::getConstantPool(), 1889 false, false, 0); 1890 SDValue Chain = Result.getValue(1); 1891 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1892 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1893 if (!UseGOTOFF) 1894 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1895 MachinePointerInfo::getGOT(), false, false, 0); 1896 return Result; 1897 } else { 1898 // If we have T2 ops, we can materialize the address directly via movt/movw 1899 // pair. This is always cheaper. 1900 if (Subtarget->useMovt()) { 1901 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1902 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 1903 } else { 1904 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1905 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1906 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1907 MachinePointerInfo::getConstantPool(), 1908 false, false, 0); 1909 } 1910 } 1911} 1912 1913SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1914 SelectionDAG &DAG) const { 1915 MachineFunction &MF = DAG.getMachineFunction(); 1916 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1917 unsigned ARMPCLabelIndex = 0; 1918 EVT PtrVT = getPointerTy(); 1919 DebugLoc dl = Op.getDebugLoc(); 1920 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1921 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1922 SDValue CPAddr; 1923 if (RelocM == Reloc::Static) 1924 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1925 else { 1926 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1927 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 1928 ARMConstantPoolValue *CPV = 1929 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 1930 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1931 } 1932 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1933 1934 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1935 MachinePointerInfo::getConstantPool(), 1936 false, false, 0); 1937 SDValue Chain = Result.getValue(1); 1938 1939 if (RelocM == Reloc::PIC_) { 1940 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1941 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1942 } 1943 1944 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 1945 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 1946 false, false, 0); 1947 1948 return Result; 1949} 1950 1951SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 1952 SelectionDAG &DAG) const { 1953 assert(Subtarget->isTargetELF() && 1954 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 1955 MachineFunction &MF = DAG.getMachineFunction(); 1956 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1957 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1958 EVT PtrVT = getPointerTy(); 1959 DebugLoc dl = Op.getDebugLoc(); 1960 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1961 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1962 "_GLOBAL_OFFSET_TABLE_", 1963 ARMPCLabelIndex, PCAdj); 1964 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1965 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1966 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1967 MachinePointerInfo::getConstantPool(), 1968 false, false, 0); 1969 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1970 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1971} 1972 1973SDValue 1974ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 1975 const { 1976 DebugLoc dl = Op.getDebugLoc(); 1977 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 1978 Op.getOperand(0), Op.getOperand(1)); 1979} 1980 1981SDValue 1982ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 1983 DebugLoc dl = Op.getDebugLoc(); 1984 SDValue Val = DAG.getConstant(0, MVT::i32); 1985 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 1986 Op.getOperand(1), Val); 1987} 1988 1989SDValue 1990ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 1991 DebugLoc dl = Op.getDebugLoc(); 1992 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 1993 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 1994} 1995 1996SDValue 1997ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 1998 const ARMSubtarget *Subtarget) const { 1999 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2000 DebugLoc dl = Op.getDebugLoc(); 2001 switch (IntNo) { 2002 default: return SDValue(); // Don't custom lower most intrinsics. 2003 case Intrinsic::arm_thread_pointer: { 2004 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2005 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2006 } 2007 case Intrinsic::eh_sjlj_lsda: { 2008 MachineFunction &MF = DAG.getMachineFunction(); 2009 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2010 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 2011 EVT PtrVT = getPointerTy(); 2012 DebugLoc dl = Op.getDebugLoc(); 2013 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2014 SDValue CPAddr; 2015 unsigned PCAdj = (RelocM != Reloc::PIC_) 2016 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2017 ARMConstantPoolValue *CPV = 2018 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2019 ARMCP::CPLSDA, PCAdj); 2020 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2021 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2022 SDValue Result = 2023 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2024 MachinePointerInfo::getConstantPool(), 2025 false, false, 0); 2026 2027 if (RelocM == Reloc::PIC_) { 2028 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2029 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2030 } 2031 return Result; 2032 } 2033 } 2034} 2035 2036static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2037 const ARMSubtarget *Subtarget) { 2038 DebugLoc dl = Op.getDebugLoc(); 2039 if (!Subtarget->hasDataBarrier()) { 2040 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2041 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2042 // here. 2043 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb1Only() && 2044 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2045 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2046 DAG.getConstant(0, MVT::i32)); 2047 } 2048 2049 SDValue Op5 = Op.getOperand(5); 2050 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2051 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2052 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2053 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2054 2055 ARM_MB::MemBOpt DMBOpt; 2056 if (isDeviceBarrier) 2057 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2058 else 2059 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2060 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2061 DAG.getConstant(DMBOpt, MVT::i32)); 2062} 2063 2064static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2065 const ARMSubtarget *Subtarget) { 2066 // ARM pre v5TE and Thumb1 does not have preload instructions. 2067 if (!(Subtarget->isThumb2() || 2068 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2069 // Just preserve the chain. 2070 return Op.getOperand(0); 2071 2072 DebugLoc dl = Op.getDebugLoc(); 2073 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2074 if (!isRead && 2075 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2076 // ARMv7 with MP extension has PLDW. 2077 return Op.getOperand(0); 2078 2079 if (Subtarget->isThumb()) 2080 // Invert the bits. 2081 isRead = ~isRead & 1; 2082 unsigned isData = Subtarget->isThumb() ? 0 : 1; 2083 2084 // Currently there is no intrinsic that matches pli. 2085 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2086 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2087 DAG.getConstant(isData, MVT::i32)); 2088} 2089 2090static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2091 MachineFunction &MF = DAG.getMachineFunction(); 2092 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2093 2094 // vastart just stores the address of the VarArgsFrameIndex slot into the 2095 // memory location argument. 2096 DebugLoc dl = Op.getDebugLoc(); 2097 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2098 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2099 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2100 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2101 MachinePointerInfo(SV), false, false, 0); 2102} 2103 2104SDValue 2105ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2106 SDValue &Root, SelectionDAG &DAG, 2107 DebugLoc dl) const { 2108 MachineFunction &MF = DAG.getMachineFunction(); 2109 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2110 2111 TargetRegisterClass *RC; 2112 if (AFI->isThumb1OnlyFunction()) 2113 RC = ARM::tGPRRegisterClass; 2114 else 2115 RC = ARM::GPRRegisterClass; 2116 2117 // Transform the arguments stored in physical registers into virtual ones. 2118 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2119 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2120 2121 SDValue ArgValue2; 2122 if (NextVA.isMemLoc()) { 2123 MachineFrameInfo *MFI = MF.getFrameInfo(); 2124 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2125 2126 // Create load node to retrieve arguments from the stack. 2127 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2128 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2129 MachinePointerInfo::getFixedStack(FI), 2130 false, false, 0); 2131 } else { 2132 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2133 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2134 } 2135 2136 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2137} 2138 2139SDValue 2140ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2141 CallingConv::ID CallConv, bool isVarArg, 2142 const SmallVectorImpl<ISD::InputArg> 2143 &Ins, 2144 DebugLoc dl, SelectionDAG &DAG, 2145 SmallVectorImpl<SDValue> &InVals) 2146 const { 2147 2148 MachineFunction &MF = DAG.getMachineFunction(); 2149 MachineFrameInfo *MFI = MF.getFrameInfo(); 2150 2151 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2152 2153 // Assign locations to all of the incoming arguments. 2154 SmallVector<CCValAssign, 16> ArgLocs; 2155 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2156 *DAG.getContext()); 2157 CCInfo.AnalyzeFormalArguments(Ins, 2158 CCAssignFnForNode(CallConv, /* Return*/ false, 2159 isVarArg)); 2160 2161 SmallVector<SDValue, 16> ArgValues; 2162 2163 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2164 CCValAssign &VA = ArgLocs[i]; 2165 2166 // Arguments stored in registers. 2167 if (VA.isRegLoc()) { 2168 EVT RegVT = VA.getLocVT(); 2169 2170 SDValue ArgValue; 2171 if (VA.needsCustom()) { 2172 // f64 and vector types are split up into multiple registers or 2173 // combinations of registers and stack slots. 2174 if (VA.getLocVT() == MVT::v2f64) { 2175 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2176 Chain, DAG, dl); 2177 VA = ArgLocs[++i]; // skip ahead to next loc 2178 SDValue ArgValue2; 2179 if (VA.isMemLoc()) { 2180 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2181 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2182 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2183 MachinePointerInfo::getFixedStack(FI), 2184 false, false, 0); 2185 } else { 2186 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2187 Chain, DAG, dl); 2188 } 2189 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2190 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2191 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2192 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2193 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2194 } else 2195 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2196 2197 } else { 2198 TargetRegisterClass *RC; 2199 2200 if (RegVT == MVT::f32) 2201 RC = ARM::SPRRegisterClass; 2202 else if (RegVT == MVT::f64) 2203 RC = ARM::DPRRegisterClass; 2204 else if (RegVT == MVT::v2f64) 2205 RC = ARM::QPRRegisterClass; 2206 else if (RegVT == MVT::i32) 2207 RC = (AFI->isThumb1OnlyFunction() ? 2208 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2209 else 2210 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2211 2212 // Transform the arguments in physical registers into virtual ones. 2213 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2214 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2215 } 2216 2217 // If this is an 8 or 16-bit value, it is really passed promoted 2218 // to 32 bits. Insert an assert[sz]ext to capture this, then 2219 // truncate to the right size. 2220 switch (VA.getLocInfo()) { 2221 default: llvm_unreachable("Unknown loc info!"); 2222 case CCValAssign::Full: break; 2223 case CCValAssign::BCvt: 2224 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); 2225 break; 2226 case CCValAssign::SExt: 2227 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2228 DAG.getValueType(VA.getValVT())); 2229 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2230 break; 2231 case CCValAssign::ZExt: 2232 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2233 DAG.getValueType(VA.getValVT())); 2234 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2235 break; 2236 } 2237 2238 InVals.push_back(ArgValue); 2239 2240 } else { // VA.isRegLoc() 2241 2242 // sanity check 2243 assert(VA.isMemLoc()); 2244 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2245 2246 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 2247 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true); 2248 2249 // Create load nodes to retrieve arguments from the stack. 2250 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2251 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2252 MachinePointerInfo::getFixedStack(FI), 2253 false, false, 0)); 2254 } 2255 } 2256 2257 // varargs 2258 if (isVarArg) { 2259 static const unsigned GPRArgRegs[] = { 2260 ARM::R0, ARM::R1, ARM::R2, ARM::R3 2261 }; 2262 2263 unsigned NumGPRs = CCInfo.getFirstUnallocated 2264 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2265 2266 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 2267 unsigned VARegSize = (4 - NumGPRs) * 4; 2268 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2269 unsigned ArgOffset = CCInfo.getNextStackOffset(); 2270 if (VARegSaveSize) { 2271 // If this function is vararg, store any remaining integer argument regs 2272 // to their spots on the stack so that they may be loaded by deferencing 2273 // the result of va_next. 2274 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2275 AFI->setVarArgsFrameIndex( 2276 MFI->CreateFixedObject(VARegSaveSize, 2277 ArgOffset + VARegSaveSize - VARegSize, 2278 false)); 2279 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2280 getPointerTy()); 2281 2282 SmallVector<SDValue, 4> MemOps; 2283 for (; NumGPRs < 4; ++NumGPRs) { 2284 TargetRegisterClass *RC; 2285 if (AFI->isThumb1OnlyFunction()) 2286 RC = ARM::tGPRRegisterClass; 2287 else 2288 RC = ARM::GPRRegisterClass; 2289 2290 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 2291 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2292 SDValue Store = 2293 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2294 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2295 false, false, 0); 2296 MemOps.push_back(Store); 2297 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2298 DAG.getConstant(4, getPointerTy())); 2299 } 2300 if (!MemOps.empty()) 2301 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2302 &MemOps[0], MemOps.size()); 2303 } else 2304 // This will point to the next argument passed via stack. 2305 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2306 } 2307 2308 return Chain; 2309} 2310 2311/// isFloatingPointZero - Return true if this is +0.0. 2312static bool isFloatingPointZero(SDValue Op) { 2313 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2314 return CFP->getValueAPF().isPosZero(); 2315 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2316 // Maybe this has already been legalized into the constant pool? 2317 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2318 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2319 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2320 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2321 return CFP->getValueAPF().isPosZero(); 2322 } 2323 } 2324 return false; 2325} 2326 2327/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2328/// the given operands. 2329SDValue 2330ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2331 SDValue &ARMcc, SelectionDAG &DAG, 2332 DebugLoc dl) const { 2333 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2334 unsigned C = RHSC->getZExtValue(); 2335 if (!isLegalICmpImmediate(C)) { 2336 // Constant does not fit, try adjusting it by one? 2337 switch (CC) { 2338 default: break; 2339 case ISD::SETLT: 2340 case ISD::SETGE: 2341 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2342 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2343 RHS = DAG.getConstant(C-1, MVT::i32); 2344 } 2345 break; 2346 case ISD::SETULT: 2347 case ISD::SETUGE: 2348 if (C != 0 && isLegalICmpImmediate(C-1)) { 2349 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2350 RHS = DAG.getConstant(C-1, MVT::i32); 2351 } 2352 break; 2353 case ISD::SETLE: 2354 case ISD::SETGT: 2355 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2356 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2357 RHS = DAG.getConstant(C+1, MVT::i32); 2358 } 2359 break; 2360 case ISD::SETULE: 2361 case ISD::SETUGT: 2362 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2363 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2364 RHS = DAG.getConstant(C+1, MVT::i32); 2365 } 2366 break; 2367 } 2368 } 2369 } 2370 2371 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2372 ARMISD::NodeType CompareType; 2373 switch (CondCode) { 2374 default: 2375 CompareType = ARMISD::CMP; 2376 break; 2377 case ARMCC::EQ: 2378 case ARMCC::NE: 2379 // Uses only Z Flag 2380 CompareType = ARMISD::CMPZ; 2381 break; 2382 } 2383 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2384 return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS); 2385} 2386 2387/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2388SDValue 2389ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2390 DebugLoc dl) const { 2391 SDValue Cmp; 2392 if (!isFloatingPointZero(RHS)) 2393 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS); 2394 else 2395 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS); 2396 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp); 2397} 2398 2399SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2400 SDValue Cond = Op.getOperand(0); 2401 SDValue SelectTrue = Op.getOperand(1); 2402 SDValue SelectFalse = Op.getOperand(2); 2403 DebugLoc dl = Op.getDebugLoc(); 2404 2405 // Convert: 2406 // 2407 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2408 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2409 // 2410 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2411 const ConstantSDNode *CMOVTrue = 2412 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2413 const ConstantSDNode *CMOVFalse = 2414 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2415 2416 if (CMOVTrue && CMOVFalse) { 2417 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2418 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2419 2420 SDValue True; 2421 SDValue False; 2422 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2423 True = SelectTrue; 2424 False = SelectFalse; 2425 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2426 True = SelectFalse; 2427 False = SelectTrue; 2428 } 2429 2430 if (True.getNode() && False.getNode()) { 2431 EVT VT = Cond.getValueType(); 2432 SDValue ARMcc = Cond.getOperand(2); 2433 SDValue CCR = Cond.getOperand(3); 2434 SDValue Cmp = Cond.getOperand(4); 2435 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2436 } 2437 } 2438 } 2439 2440 return DAG.getSelectCC(dl, Cond, 2441 DAG.getConstant(0, Cond.getValueType()), 2442 SelectTrue, SelectFalse, ISD::SETNE); 2443} 2444 2445SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2446 EVT VT = Op.getValueType(); 2447 SDValue LHS = Op.getOperand(0); 2448 SDValue RHS = Op.getOperand(1); 2449 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2450 SDValue TrueVal = Op.getOperand(2); 2451 SDValue FalseVal = Op.getOperand(3); 2452 DebugLoc dl = Op.getDebugLoc(); 2453 2454 if (LHS.getValueType() == MVT::i32) { 2455 SDValue ARMcc; 2456 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2457 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2458 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2459 } 2460 2461 ARMCC::CondCodes CondCode, CondCode2; 2462 FPCCToARMCC(CC, CondCode, CondCode2); 2463 2464 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2465 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2466 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2467 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2468 ARMcc, CCR, Cmp); 2469 if (CondCode2 != ARMCC::AL) { 2470 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2471 // FIXME: Needs another CMP because flag can have but one use. 2472 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2473 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2474 Result, TrueVal, ARMcc2, CCR, Cmp2); 2475 } 2476 return Result; 2477} 2478 2479/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2480/// to morph to an integer compare sequence. 2481static bool canChangeToInt(SDValue Op, bool &SeenZero, 2482 const ARMSubtarget *Subtarget) { 2483 SDNode *N = Op.getNode(); 2484 if (!N->hasOneUse()) 2485 // Otherwise it requires moving the value from fp to integer registers. 2486 return false; 2487 if (!N->getNumValues()) 2488 return false; 2489 EVT VT = Op.getValueType(); 2490 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2491 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2492 // vmrs are very slow, e.g. cortex-a8. 2493 return false; 2494 2495 if (isFloatingPointZero(Op)) { 2496 SeenZero = true; 2497 return true; 2498 } 2499 return ISD::isNormalLoad(N); 2500} 2501 2502static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2503 if (isFloatingPointZero(Op)) 2504 return DAG.getConstant(0, MVT::i32); 2505 2506 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2507 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2508 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2509 Ld->isVolatile(), Ld->isNonTemporal(), 2510 Ld->getAlignment()); 2511 2512 llvm_unreachable("Unknown VFP cmp argument!"); 2513} 2514 2515static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2516 SDValue &RetVal1, SDValue &RetVal2) { 2517 if (isFloatingPointZero(Op)) { 2518 RetVal1 = DAG.getConstant(0, MVT::i32); 2519 RetVal2 = DAG.getConstant(0, MVT::i32); 2520 return; 2521 } 2522 2523 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2524 SDValue Ptr = Ld->getBasePtr(); 2525 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2526 Ld->getChain(), Ptr, 2527 Ld->getPointerInfo(), 2528 Ld->isVolatile(), Ld->isNonTemporal(), 2529 Ld->getAlignment()); 2530 2531 EVT PtrType = Ptr.getValueType(); 2532 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2533 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2534 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2535 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2536 Ld->getChain(), NewPtr, 2537 Ld->getPointerInfo().getWithOffset(4), 2538 Ld->isVolatile(), Ld->isNonTemporal(), 2539 NewAlign); 2540 return; 2541 } 2542 2543 llvm_unreachable("Unknown VFP cmp argument!"); 2544} 2545 2546/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2547/// f32 and even f64 comparisons to integer ones. 2548SDValue 2549ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2550 SDValue Chain = Op.getOperand(0); 2551 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2552 SDValue LHS = Op.getOperand(2); 2553 SDValue RHS = Op.getOperand(3); 2554 SDValue Dest = Op.getOperand(4); 2555 DebugLoc dl = Op.getDebugLoc(); 2556 2557 bool SeenZero = false; 2558 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2559 canChangeToInt(RHS, SeenZero, Subtarget) && 2560 // If one of the operand is zero, it's safe to ignore the NaN case since 2561 // we only care about equality comparisons. 2562 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2563 // If unsafe fp math optimization is enabled and there are no othter uses of 2564 // the CMP operands, and the condition code is EQ oe NE, we can optimize it 2565 // to an integer comparison. 2566 if (CC == ISD::SETOEQ) 2567 CC = ISD::SETEQ; 2568 else if (CC == ISD::SETUNE) 2569 CC = ISD::SETNE; 2570 2571 SDValue ARMcc; 2572 if (LHS.getValueType() == MVT::f32) { 2573 LHS = bitcastf32Toi32(LHS, DAG); 2574 RHS = bitcastf32Toi32(RHS, DAG); 2575 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2576 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2577 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2578 Chain, Dest, ARMcc, CCR, Cmp); 2579 } 2580 2581 SDValue LHS1, LHS2; 2582 SDValue RHS1, RHS2; 2583 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2584 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2585 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2586 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2587 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2588 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2589 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2590 } 2591 2592 return SDValue(); 2593} 2594 2595SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2596 SDValue Chain = Op.getOperand(0); 2597 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2598 SDValue LHS = Op.getOperand(2); 2599 SDValue RHS = Op.getOperand(3); 2600 SDValue Dest = Op.getOperand(4); 2601 DebugLoc dl = Op.getDebugLoc(); 2602 2603 if (LHS.getValueType() == MVT::i32) { 2604 SDValue ARMcc; 2605 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2606 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2607 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2608 Chain, Dest, ARMcc, CCR, Cmp); 2609 } 2610 2611 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2612 2613 if (UnsafeFPMath && 2614 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2615 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2616 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2617 if (Result.getNode()) 2618 return Result; 2619 } 2620 2621 ARMCC::CondCodes CondCode, CondCode2; 2622 FPCCToARMCC(CC, CondCode, CondCode2); 2623 2624 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2625 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2626 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2627 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2628 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2629 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2630 if (CondCode2 != ARMCC::AL) { 2631 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2632 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2633 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2634 } 2635 return Res; 2636} 2637 2638SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2639 SDValue Chain = Op.getOperand(0); 2640 SDValue Table = Op.getOperand(1); 2641 SDValue Index = Op.getOperand(2); 2642 DebugLoc dl = Op.getDebugLoc(); 2643 2644 EVT PTy = getPointerTy(); 2645 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2646 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2647 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2648 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2649 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2650 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2651 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2652 if (Subtarget->isThumb2()) { 2653 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2654 // which does another jump to the destination. This also makes it easier 2655 // to translate it to TBB / TBH later. 2656 // FIXME: This might not work if the function is extremely large. 2657 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2658 Addr, Op.getOperand(2), JTI, UId); 2659 } 2660 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2661 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2662 MachinePointerInfo::getJumpTable(), 2663 false, false, 0); 2664 Chain = Addr.getValue(1); 2665 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2666 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2667 } else { 2668 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2669 MachinePointerInfo::getJumpTable(), false, false, 0); 2670 Chain = Addr.getValue(1); 2671 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2672 } 2673} 2674 2675static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2676 DebugLoc dl = Op.getDebugLoc(); 2677 unsigned Opc; 2678 2679 switch (Op.getOpcode()) { 2680 default: 2681 assert(0 && "Invalid opcode!"); 2682 case ISD::FP_TO_SINT: 2683 Opc = ARMISD::FTOSI; 2684 break; 2685 case ISD::FP_TO_UINT: 2686 Opc = ARMISD::FTOUI; 2687 break; 2688 } 2689 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2690 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); 2691} 2692 2693static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2694 EVT VT = Op.getValueType(); 2695 DebugLoc dl = Op.getDebugLoc(); 2696 unsigned Opc; 2697 2698 switch (Op.getOpcode()) { 2699 default: 2700 assert(0 && "Invalid opcode!"); 2701 case ISD::SINT_TO_FP: 2702 Opc = ARMISD::SITOF; 2703 break; 2704 case ISD::UINT_TO_FP: 2705 Opc = ARMISD::UITOF; 2706 break; 2707 } 2708 2709 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0)); 2710 return DAG.getNode(Opc, dl, VT, Op); 2711} 2712 2713SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 2714 // Implement fcopysign with a fabs and a conditional fneg. 2715 SDValue Tmp0 = Op.getOperand(0); 2716 SDValue Tmp1 = Op.getOperand(1); 2717 DebugLoc dl = Op.getDebugLoc(); 2718 EVT VT = Op.getValueType(); 2719 EVT SrcVT = Tmp1.getValueType(); 2720 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 2721 SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32); 2722 SDValue FP0 = DAG.getConstantFP(0.0, SrcVT); 2723 SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl); 2724 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2725 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp); 2726} 2727 2728SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2729 MachineFunction &MF = DAG.getMachineFunction(); 2730 MachineFrameInfo *MFI = MF.getFrameInfo(); 2731 MFI->setReturnAddressIsTaken(true); 2732 2733 EVT VT = Op.getValueType(); 2734 DebugLoc dl = Op.getDebugLoc(); 2735 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2736 if (Depth) { 2737 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2738 SDValue Offset = DAG.getConstant(4, MVT::i32); 2739 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2740 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2741 MachinePointerInfo(), false, false, 0); 2742 } 2743 2744 // Return LR, which contains the return address. Mark it an implicit live-in. 2745 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 2746 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 2747} 2748 2749SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 2750 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2751 MFI->setFrameAddressIsTaken(true); 2752 2753 EVT VT = Op.getValueType(); 2754 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 2755 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2756 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 2757 ? ARM::R7 : ARM::R11; 2758 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2759 while (Depth--) 2760 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2761 MachinePointerInfo(), 2762 false, false, 0); 2763 return FrameAddr; 2764} 2765 2766/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to 2767/// expand a bit convert where either the source or destination type is i64 to 2768/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 2769/// operand type is illegal (e.g., v2f32 for a target that doesn't support 2770/// vectors), since the legalizer won't know what to do with that. 2771static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) { 2772 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2773 DebugLoc dl = N->getDebugLoc(); 2774 SDValue Op = N->getOperand(0); 2775 2776 // This function is only supposed to be called for i64 types, either as the 2777 // source or destination of the bit convert. 2778 EVT SrcVT = Op.getValueType(); 2779 EVT DstVT = N->getValueType(0); 2780 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 2781 "ExpandBIT_CONVERT called for non-i64 type"); 2782 2783 // Turn i64->f64 into VMOVDRR. 2784 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 2785 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2786 DAG.getConstant(0, MVT::i32)); 2787 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2788 DAG.getConstant(1, MVT::i32)); 2789 return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT, 2790 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 2791 } 2792 2793 // Turn f64->i64 into VMOVRRD. 2794 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 2795 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 2796 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 2797 // Merge the pieces into a single i64 value. 2798 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 2799 } 2800 2801 return SDValue(); 2802} 2803 2804/// getZeroVector - Returns a vector of specified type with all zero elements. 2805/// Zero vectors are used to represent vector negation and in those cases 2806/// will be implemented with the NEON VNEG instruction. However, VNEG does 2807/// not support i64 elements, so sometimes the zero vectors will need to be 2808/// explicitly constructed. Regardless, use a canonical VMOV to create the 2809/// zero vector. 2810static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2811 assert(VT.isVector() && "Expected a vector type"); 2812 // The canonical modified immediate encoding of a zero vector is....0! 2813 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 2814 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 2815 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 2816 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 2817} 2818 2819/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 2820/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2821SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 2822 SelectionDAG &DAG) const { 2823 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2824 EVT VT = Op.getValueType(); 2825 unsigned VTBits = VT.getSizeInBits(); 2826 DebugLoc dl = Op.getDebugLoc(); 2827 SDValue ShOpLo = Op.getOperand(0); 2828 SDValue ShOpHi = Op.getOperand(1); 2829 SDValue ShAmt = Op.getOperand(2); 2830 SDValue ARMcc; 2831 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 2832 2833 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 2834 2835 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2836 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2837 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 2838 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2839 DAG.getConstant(VTBits, MVT::i32)); 2840 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 2841 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2842 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 2843 2844 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2845 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2846 ARMcc, DAG, dl); 2847 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 2848 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 2849 CCR, Cmp); 2850 2851 SDValue Ops[2] = { Lo, Hi }; 2852 return DAG.getMergeValues(Ops, 2, dl); 2853} 2854 2855/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 2856/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2857SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 2858 SelectionDAG &DAG) const { 2859 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2860 EVT VT = Op.getValueType(); 2861 unsigned VTBits = VT.getSizeInBits(); 2862 DebugLoc dl = Op.getDebugLoc(); 2863 SDValue ShOpLo = Op.getOperand(0); 2864 SDValue ShOpHi = Op.getOperand(1); 2865 SDValue ShAmt = Op.getOperand(2); 2866 SDValue ARMcc; 2867 2868 assert(Op.getOpcode() == ISD::SHL_PARTS); 2869 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2870 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2871 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 2872 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2873 DAG.getConstant(VTBits, MVT::i32)); 2874 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 2875 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 2876 2877 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2878 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2879 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2880 ARMcc, DAG, dl); 2881 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 2882 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 2883 CCR, Cmp); 2884 2885 SDValue Ops[2] = { Lo, Hi }; 2886 return DAG.getMergeValues(Ops, 2, dl); 2887} 2888 2889SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 2890 SelectionDAG &DAG) const { 2891 // The rounding mode is in bits 23:22 of the FPSCR. 2892 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 2893 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 2894 // so that the shift + and get folded into a bitfield extract. 2895 DebugLoc dl = Op.getDebugLoc(); 2896 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 2897 DAG.getConstant(Intrinsic::arm_get_fpscr, 2898 MVT::i32)); 2899 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 2900 DAG.getConstant(1U << 22, MVT::i32)); 2901 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 2902 DAG.getConstant(22, MVT::i32)); 2903 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 2904 DAG.getConstant(3, MVT::i32)); 2905} 2906 2907static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 2908 const ARMSubtarget *ST) { 2909 EVT VT = N->getValueType(0); 2910 DebugLoc dl = N->getDebugLoc(); 2911 2912 if (!ST->hasV6T2Ops()) 2913 return SDValue(); 2914 2915 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 2916 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 2917} 2918 2919static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 2920 const ARMSubtarget *ST) { 2921 EVT VT = N->getValueType(0); 2922 DebugLoc dl = N->getDebugLoc(); 2923 2924 // Lower vector shifts on NEON to use VSHL. 2925 if (VT.isVector()) { 2926 assert(ST->hasNEON() && "unexpected vector shift"); 2927 2928 // Left shifts translate directly to the vshiftu intrinsic. 2929 if (N->getOpcode() == ISD::SHL) 2930 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2931 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 2932 N->getOperand(0), N->getOperand(1)); 2933 2934 assert((N->getOpcode() == ISD::SRA || 2935 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 2936 2937 // NEON uses the same intrinsics for both left and right shifts. For 2938 // right shifts, the shift amounts are negative, so negate the vector of 2939 // shift amounts. 2940 EVT ShiftVT = N->getOperand(1).getValueType(); 2941 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 2942 getZeroVector(ShiftVT, DAG, dl), 2943 N->getOperand(1)); 2944 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 2945 Intrinsic::arm_neon_vshifts : 2946 Intrinsic::arm_neon_vshiftu); 2947 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2948 DAG.getConstant(vshiftInt, MVT::i32), 2949 N->getOperand(0), NegatedCount); 2950 } 2951 2952 // We can get here for a node like i32 = ISD::SHL i32, i64 2953 if (VT != MVT::i64) 2954 return SDValue(); 2955 2956 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 2957 "Unknown shift to lower!"); 2958 2959 // We only lower SRA, SRL of 1 here, all others use generic lowering. 2960 if (!isa<ConstantSDNode>(N->getOperand(1)) || 2961 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 2962 return SDValue(); 2963 2964 // If we are in thumb mode, we don't have RRX. 2965 if (ST->isThumb1Only()) return SDValue(); 2966 2967 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 2968 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2969 DAG.getConstant(0, MVT::i32)); 2970 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2971 DAG.getConstant(1, MVT::i32)); 2972 2973 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 2974 // captures the result into a carry flag. 2975 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 2976 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1); 2977 2978 // The low part is an ARMISD::RRX operand, which shifts the carry in. 2979 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 2980 2981 // Merge the pieces into a single i64 value. 2982 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 2983} 2984 2985static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 2986 SDValue TmpOp0, TmpOp1; 2987 bool Invert = false; 2988 bool Swap = false; 2989 unsigned Opc = 0; 2990 2991 SDValue Op0 = Op.getOperand(0); 2992 SDValue Op1 = Op.getOperand(1); 2993 SDValue CC = Op.getOperand(2); 2994 EVT VT = Op.getValueType(); 2995 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 2996 DebugLoc dl = Op.getDebugLoc(); 2997 2998 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 2999 switch (SetCCOpcode) { 3000 default: llvm_unreachable("Illegal FP comparison"); break; 3001 case ISD::SETUNE: 3002 case ISD::SETNE: Invert = true; // Fallthrough 3003 case ISD::SETOEQ: 3004 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3005 case ISD::SETOLT: 3006 case ISD::SETLT: Swap = true; // Fallthrough 3007 case ISD::SETOGT: 3008 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3009 case ISD::SETOLE: 3010 case ISD::SETLE: Swap = true; // Fallthrough 3011 case ISD::SETOGE: 3012 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3013 case ISD::SETUGE: Swap = true; // Fallthrough 3014 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3015 case ISD::SETUGT: Swap = true; // Fallthrough 3016 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3017 case ISD::SETUEQ: Invert = true; // Fallthrough 3018 case ISD::SETONE: 3019 // Expand this to (OLT | OGT). 3020 TmpOp0 = Op0; 3021 TmpOp1 = Op1; 3022 Opc = ISD::OR; 3023 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3024 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3025 break; 3026 case ISD::SETUO: Invert = true; // Fallthrough 3027 case ISD::SETO: 3028 // Expand this to (OLT | OGE). 3029 TmpOp0 = Op0; 3030 TmpOp1 = Op1; 3031 Opc = ISD::OR; 3032 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3033 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3034 break; 3035 } 3036 } else { 3037 // Integer comparisons. 3038 switch (SetCCOpcode) { 3039 default: llvm_unreachable("Illegal integer comparison"); break; 3040 case ISD::SETNE: Invert = true; 3041 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3042 case ISD::SETLT: Swap = true; 3043 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3044 case ISD::SETLE: Swap = true; 3045 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3046 case ISD::SETULT: Swap = true; 3047 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3048 case ISD::SETULE: Swap = true; 3049 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3050 } 3051 3052 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3053 if (Opc == ARMISD::VCEQ) { 3054 3055 SDValue AndOp; 3056 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3057 AndOp = Op0; 3058 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3059 AndOp = Op1; 3060 3061 // Ignore bitconvert. 3062 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT) 3063 AndOp = AndOp.getOperand(0); 3064 3065 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3066 Opc = ARMISD::VTST; 3067 Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0)); 3068 Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1)); 3069 Invert = !Invert; 3070 } 3071 } 3072 } 3073 3074 if (Swap) 3075 std::swap(Op0, Op1); 3076 3077 // If one of the operands is a constant vector zero, attempt to fold the 3078 // comparison to a specialized compare-against-zero form. 3079 SDValue SingleOp; 3080 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3081 SingleOp = Op0; 3082 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3083 if (Opc == ARMISD::VCGE) 3084 Opc = ARMISD::VCLEZ; 3085 else if (Opc == ARMISD::VCGT) 3086 Opc = ARMISD::VCLTZ; 3087 SingleOp = Op1; 3088 } 3089 3090 SDValue Result; 3091 if (SingleOp.getNode()) { 3092 switch (Opc) { 3093 case ARMISD::VCEQ: 3094 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3095 case ARMISD::VCGE: 3096 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3097 case ARMISD::VCLEZ: 3098 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3099 case ARMISD::VCGT: 3100 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3101 case ARMISD::VCLTZ: 3102 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3103 default: 3104 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3105 } 3106 } else { 3107 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3108 } 3109 3110 if (Invert) 3111 Result = DAG.getNOT(dl, Result, VT); 3112 3113 return Result; 3114} 3115 3116/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3117/// valid vector constant for a NEON instruction with a "modified immediate" 3118/// operand (e.g., VMOV). If so, return the encoded value. 3119static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3120 unsigned SplatBitSize, SelectionDAG &DAG, 3121 EVT &VT, bool is128Bits, NEONModImmType type) { 3122 unsigned OpCmode, Imm; 3123 3124 // SplatBitSize is set to the smallest size that splats the vector, so a 3125 // zero vector will always have SplatBitSize == 8. However, NEON modified 3126 // immediate instructions others than VMOV do not support the 8-bit encoding 3127 // of a zero vector, and the default encoding of zero is supposed to be the 3128 // 32-bit version. 3129 if (SplatBits == 0) 3130 SplatBitSize = 32; 3131 3132 switch (SplatBitSize) { 3133 case 8: 3134 if (type != VMOVModImm) 3135 return SDValue(); 3136 // Any 1-byte value is OK. Op=0, Cmode=1110. 3137 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3138 OpCmode = 0xe; 3139 Imm = SplatBits; 3140 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3141 break; 3142 3143 case 16: 3144 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3145 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3146 if ((SplatBits & ~0xff) == 0) { 3147 // Value = 0x00nn: Op=x, Cmode=100x. 3148 OpCmode = 0x8; 3149 Imm = SplatBits; 3150 break; 3151 } 3152 if ((SplatBits & ~0xff00) == 0) { 3153 // Value = 0xnn00: Op=x, Cmode=101x. 3154 OpCmode = 0xa; 3155 Imm = SplatBits >> 8; 3156 break; 3157 } 3158 return SDValue(); 3159 3160 case 32: 3161 // NEON's 32-bit VMOV supports splat values where: 3162 // * only one byte is nonzero, or 3163 // * the least significant byte is 0xff and the second byte is nonzero, or 3164 // * the least significant 2 bytes are 0xff and the third is nonzero. 3165 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3166 if ((SplatBits & ~0xff) == 0) { 3167 // Value = 0x000000nn: Op=x, Cmode=000x. 3168 OpCmode = 0; 3169 Imm = SplatBits; 3170 break; 3171 } 3172 if ((SplatBits & ~0xff00) == 0) { 3173 // Value = 0x0000nn00: Op=x, Cmode=001x. 3174 OpCmode = 0x2; 3175 Imm = SplatBits >> 8; 3176 break; 3177 } 3178 if ((SplatBits & ~0xff0000) == 0) { 3179 // Value = 0x00nn0000: Op=x, Cmode=010x. 3180 OpCmode = 0x4; 3181 Imm = SplatBits >> 16; 3182 break; 3183 } 3184 if ((SplatBits & ~0xff000000) == 0) { 3185 // Value = 0xnn000000: Op=x, Cmode=011x. 3186 OpCmode = 0x6; 3187 Imm = SplatBits >> 24; 3188 break; 3189 } 3190 3191 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3192 if (type == OtherModImm) return SDValue(); 3193 3194 if ((SplatBits & ~0xffff) == 0 && 3195 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3196 // Value = 0x0000nnff: Op=x, Cmode=1100. 3197 OpCmode = 0xc; 3198 Imm = SplatBits >> 8; 3199 SplatBits |= 0xff; 3200 break; 3201 } 3202 3203 if ((SplatBits & ~0xffffff) == 0 && 3204 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3205 // Value = 0x00nnffff: Op=x, Cmode=1101. 3206 OpCmode = 0xd; 3207 Imm = SplatBits >> 16; 3208 SplatBits |= 0xffff; 3209 break; 3210 } 3211 3212 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3213 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3214 // VMOV.I32. A (very) minor optimization would be to replicate the value 3215 // and fall through here to test for a valid 64-bit splat. But, then the 3216 // caller would also need to check and handle the change in size. 3217 return SDValue(); 3218 3219 case 64: { 3220 if (type != VMOVModImm) 3221 return SDValue(); 3222 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3223 uint64_t BitMask = 0xff; 3224 uint64_t Val = 0; 3225 unsigned ImmMask = 1; 3226 Imm = 0; 3227 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3228 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3229 Val |= BitMask; 3230 Imm |= ImmMask; 3231 } else if ((SplatBits & BitMask) != 0) { 3232 return SDValue(); 3233 } 3234 BitMask <<= 8; 3235 ImmMask <<= 1; 3236 } 3237 // Op=1, Cmode=1110. 3238 OpCmode = 0x1e; 3239 SplatBits = Val; 3240 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3241 break; 3242 } 3243 3244 default: 3245 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3246 return SDValue(); 3247 } 3248 3249 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3250 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3251} 3252 3253static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3254 bool &ReverseVEXT, unsigned &Imm) { 3255 unsigned NumElts = VT.getVectorNumElements(); 3256 ReverseVEXT = false; 3257 3258 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3259 if (M[0] < 0) 3260 return false; 3261 3262 Imm = M[0]; 3263 3264 // If this is a VEXT shuffle, the immediate value is the index of the first 3265 // element. The other shuffle indices must be the successive elements after 3266 // the first one. 3267 unsigned ExpectedElt = Imm; 3268 for (unsigned i = 1; i < NumElts; ++i) { 3269 // Increment the expected index. If it wraps around, it may still be 3270 // a VEXT but the source vectors must be swapped. 3271 ExpectedElt += 1; 3272 if (ExpectedElt == NumElts * 2) { 3273 ExpectedElt = 0; 3274 ReverseVEXT = true; 3275 } 3276 3277 if (M[i] < 0) continue; // ignore UNDEF indices 3278 if (ExpectedElt != static_cast<unsigned>(M[i])) 3279 return false; 3280 } 3281 3282 // Adjust the index value if the source operands will be swapped. 3283 if (ReverseVEXT) 3284 Imm -= NumElts; 3285 3286 return true; 3287} 3288 3289/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3290/// instruction with the specified blocksize. (The order of the elements 3291/// within each block of the vector is reversed.) 3292static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3293 unsigned BlockSize) { 3294 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3295 "Only possible block sizes for VREV are: 16, 32, 64"); 3296 3297 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3298 if (EltSz == 64) 3299 return false; 3300 3301 unsigned NumElts = VT.getVectorNumElements(); 3302 unsigned BlockElts = M[0] + 1; 3303 // If the first shuffle index is UNDEF, be optimistic. 3304 if (M[0] < 0) 3305 BlockElts = BlockSize / EltSz; 3306 3307 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3308 return false; 3309 3310 for (unsigned i = 0; i < NumElts; ++i) { 3311 if (M[i] < 0) continue; // ignore UNDEF indices 3312 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3313 return false; 3314 } 3315 3316 return true; 3317} 3318 3319static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3320 unsigned &WhichResult) { 3321 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3322 if (EltSz == 64) 3323 return false; 3324 3325 unsigned NumElts = VT.getVectorNumElements(); 3326 WhichResult = (M[0] == 0 ? 0 : 1); 3327 for (unsigned i = 0; i < NumElts; i += 2) { 3328 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3329 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3330 return false; 3331 } 3332 return true; 3333} 3334 3335/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3336/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3337/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3338static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3339 unsigned &WhichResult) { 3340 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3341 if (EltSz == 64) 3342 return false; 3343 3344 unsigned NumElts = VT.getVectorNumElements(); 3345 WhichResult = (M[0] == 0 ? 0 : 1); 3346 for (unsigned i = 0; i < NumElts; i += 2) { 3347 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3348 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3349 return false; 3350 } 3351 return true; 3352} 3353 3354static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3355 unsigned &WhichResult) { 3356 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3357 if (EltSz == 64) 3358 return false; 3359 3360 unsigned NumElts = VT.getVectorNumElements(); 3361 WhichResult = (M[0] == 0 ? 0 : 1); 3362 for (unsigned i = 0; i != NumElts; ++i) { 3363 if (M[i] < 0) continue; // ignore UNDEF indices 3364 if ((unsigned) M[i] != 2 * i + WhichResult) 3365 return false; 3366 } 3367 3368 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3369 if (VT.is64BitVector() && EltSz == 32) 3370 return false; 3371 3372 return true; 3373} 3374 3375/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3376/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3377/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3378static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3379 unsigned &WhichResult) { 3380 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3381 if (EltSz == 64) 3382 return false; 3383 3384 unsigned Half = VT.getVectorNumElements() / 2; 3385 WhichResult = (M[0] == 0 ? 0 : 1); 3386 for (unsigned j = 0; j != 2; ++j) { 3387 unsigned Idx = WhichResult; 3388 for (unsigned i = 0; i != Half; ++i) { 3389 int MIdx = M[i + j * Half]; 3390 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3391 return false; 3392 Idx += 2; 3393 } 3394 } 3395 3396 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3397 if (VT.is64BitVector() && EltSz == 32) 3398 return false; 3399 3400 return true; 3401} 3402 3403static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3404 unsigned &WhichResult) { 3405 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3406 if (EltSz == 64) 3407 return false; 3408 3409 unsigned NumElts = VT.getVectorNumElements(); 3410 WhichResult = (M[0] == 0 ? 0 : 1); 3411 unsigned Idx = WhichResult * NumElts / 2; 3412 for (unsigned i = 0; i != NumElts; i += 2) { 3413 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3414 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3415 return false; 3416 Idx += 1; 3417 } 3418 3419 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3420 if (VT.is64BitVector() && EltSz == 32) 3421 return false; 3422 3423 return true; 3424} 3425 3426/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3427/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3428/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3429static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3430 unsigned &WhichResult) { 3431 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3432 if (EltSz == 64) 3433 return false; 3434 3435 unsigned NumElts = VT.getVectorNumElements(); 3436 WhichResult = (M[0] == 0 ? 0 : 1); 3437 unsigned Idx = WhichResult * NumElts / 2; 3438 for (unsigned i = 0; i != NumElts; i += 2) { 3439 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3440 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3441 return false; 3442 Idx += 1; 3443 } 3444 3445 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3446 if (VT.is64BitVector() && EltSz == 32) 3447 return false; 3448 3449 return true; 3450} 3451 3452// If N is an integer constant that can be moved into a register in one 3453// instruction, return an SDValue of such a constant (will become a MOV 3454// instruction). Otherwise return null. 3455static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3456 const ARMSubtarget *ST, DebugLoc dl) { 3457 uint64_t Val; 3458 if (!isa<ConstantSDNode>(N)) 3459 return SDValue(); 3460 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3461 3462 if (ST->isThumb1Only()) { 3463 if (Val <= 255 || ~Val <= 255) 3464 return DAG.getConstant(Val, MVT::i32); 3465 } else { 3466 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3467 return DAG.getConstant(Val, MVT::i32); 3468 } 3469 return SDValue(); 3470} 3471 3472// If this is a case we can't handle, return null and let the default 3473// expansion code take care of it. 3474static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3475 const ARMSubtarget *ST) { 3476 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3477 DebugLoc dl = Op.getDebugLoc(); 3478 EVT VT = Op.getValueType(); 3479 3480 APInt SplatBits, SplatUndef; 3481 unsigned SplatBitSize; 3482 bool HasAnyUndefs; 3483 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3484 if (SplatBitSize <= 64) { 3485 // Check if an immediate VMOV works. 3486 EVT VmovVT; 3487 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3488 SplatUndef.getZExtValue(), SplatBitSize, 3489 DAG, VmovVT, VT.is128BitVector(), 3490 VMOVModImm); 3491 if (Val.getNode()) { 3492 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3493 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 3494 } 3495 3496 // Try an immediate VMVN. 3497 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3498 ((1LL << SplatBitSize) - 1)); 3499 Val = isNEONModifiedImm(NegatedImm, 3500 SplatUndef.getZExtValue(), SplatBitSize, 3501 DAG, VmovVT, VT.is128BitVector(), 3502 VMVNModImm); 3503 if (Val.getNode()) { 3504 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3505 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 3506 } 3507 } 3508 } 3509 3510 // Scan through the operands to see if only one value is used. 3511 unsigned NumElts = VT.getVectorNumElements(); 3512 bool isOnlyLowElement = true; 3513 bool usesOnlyOneValue = true; 3514 bool isConstant = true; 3515 SDValue Value; 3516 for (unsigned i = 0; i < NumElts; ++i) { 3517 SDValue V = Op.getOperand(i); 3518 if (V.getOpcode() == ISD::UNDEF) 3519 continue; 3520 if (i > 0) 3521 isOnlyLowElement = false; 3522 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3523 isConstant = false; 3524 3525 if (!Value.getNode()) 3526 Value = V; 3527 else if (V != Value) 3528 usesOnlyOneValue = false; 3529 } 3530 3531 if (!Value.getNode()) 3532 return DAG.getUNDEF(VT); 3533 3534 if (isOnlyLowElement) 3535 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3536 3537 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3538 3539 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3540 // i32 and try again. 3541 if (usesOnlyOneValue && EltSize <= 32) { 3542 if (!isConstant) 3543 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3544 if (VT.getVectorElementType().isFloatingPoint()) { 3545 SmallVector<SDValue, 8> Ops; 3546 for (unsigned i = 0; i < NumElts; ++i) 3547 Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, 3548 Op.getOperand(i))); 3549 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &Ops[0], 3550 NumElts); 3551 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3552 if (Val.getNode()) 3553 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3554 } 3555 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3556 if (Val.getNode()) 3557 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3558 } 3559 3560 // If all elements are constants and the case above didn't get hit, fall back 3561 // to the default expansion, which will generate a load from the constant 3562 // pool. 3563 if (isConstant) 3564 return SDValue(); 3565 3566 // Vectors with 32- or 64-bit elements can be built by directly assigning 3567 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3568 // will be legalized. 3569 if (EltSize >= 32) { 3570 // Do the expansion with floating-point types, since that is what the VFP 3571 // registers are defined to use, and since i64 is not legal. 3572 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3573 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3574 SmallVector<SDValue, 8> Ops; 3575 for (unsigned i = 0; i < NumElts; ++i) 3576 Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i))); 3577 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3578 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3579 } 3580 3581 return SDValue(); 3582} 3583 3584/// isShuffleMaskLegal - Targets can use this to indicate that they only 3585/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 3586/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 3587/// are assumed to be legal. 3588bool 3589ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 3590 EVT VT) const { 3591 if (VT.getVectorNumElements() == 4 && 3592 (VT.is128BitVector() || VT.is64BitVector())) { 3593 unsigned PFIndexes[4]; 3594 for (unsigned i = 0; i != 4; ++i) { 3595 if (M[i] < 0) 3596 PFIndexes[i] = 8; 3597 else 3598 PFIndexes[i] = M[i]; 3599 } 3600 3601 // Compute the index in the perfect shuffle table. 3602 unsigned PFTableIndex = 3603 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3604 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3605 unsigned Cost = (PFEntry >> 30); 3606 3607 if (Cost <= 4) 3608 return true; 3609 } 3610 3611 bool ReverseVEXT; 3612 unsigned Imm, WhichResult; 3613 3614 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3615 return (EltSize >= 32 || 3616 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 3617 isVREVMask(M, VT, 64) || 3618 isVREVMask(M, VT, 32) || 3619 isVREVMask(M, VT, 16) || 3620 isVEXTMask(M, VT, ReverseVEXT, Imm) || 3621 isVTRNMask(M, VT, WhichResult) || 3622 isVUZPMask(M, VT, WhichResult) || 3623 isVZIPMask(M, VT, WhichResult) || 3624 isVTRN_v_undef_Mask(M, VT, WhichResult) || 3625 isVUZP_v_undef_Mask(M, VT, WhichResult) || 3626 isVZIP_v_undef_Mask(M, VT, WhichResult)); 3627} 3628 3629/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3630/// the specified operations to build the shuffle. 3631static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3632 SDValue RHS, SelectionDAG &DAG, 3633 DebugLoc dl) { 3634 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3635 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3636 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3637 3638 enum { 3639 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3640 OP_VREV, 3641 OP_VDUP0, 3642 OP_VDUP1, 3643 OP_VDUP2, 3644 OP_VDUP3, 3645 OP_VEXT1, 3646 OP_VEXT2, 3647 OP_VEXT3, 3648 OP_VUZPL, // VUZP, left result 3649 OP_VUZPR, // VUZP, right result 3650 OP_VZIPL, // VZIP, left result 3651 OP_VZIPR, // VZIP, right result 3652 OP_VTRNL, // VTRN, left result 3653 OP_VTRNR // VTRN, right result 3654 }; 3655 3656 if (OpNum == OP_COPY) { 3657 if (LHSID == (1*9+2)*9+3) return LHS; 3658 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3659 return RHS; 3660 } 3661 3662 SDValue OpLHS, OpRHS; 3663 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 3664 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 3665 EVT VT = OpLHS.getValueType(); 3666 3667 switch (OpNum) { 3668 default: llvm_unreachable("Unknown shuffle opcode!"); 3669 case OP_VREV: 3670 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 3671 case OP_VDUP0: 3672 case OP_VDUP1: 3673 case OP_VDUP2: 3674 case OP_VDUP3: 3675 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 3676 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 3677 case OP_VEXT1: 3678 case OP_VEXT2: 3679 case OP_VEXT3: 3680 return DAG.getNode(ARMISD::VEXT, dl, VT, 3681 OpLHS, OpRHS, 3682 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 3683 case OP_VUZPL: 3684 case OP_VUZPR: 3685 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3686 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 3687 case OP_VZIPL: 3688 case OP_VZIPR: 3689 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3690 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 3691 case OP_VTRNL: 3692 case OP_VTRNR: 3693 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3694 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 3695 } 3696} 3697 3698static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 3699 SDValue V1 = Op.getOperand(0); 3700 SDValue V2 = Op.getOperand(1); 3701 DebugLoc dl = Op.getDebugLoc(); 3702 EVT VT = Op.getValueType(); 3703 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 3704 SmallVector<int, 8> ShuffleMask; 3705 3706 // Convert shuffles that are directly supported on NEON to target-specific 3707 // DAG nodes, instead of keeping them as shuffles and matching them again 3708 // during code selection. This is more efficient and avoids the possibility 3709 // of inconsistencies between legalization and selection. 3710 // FIXME: floating-point vectors should be canonicalized to integer vectors 3711 // of the same time so that they get CSEd properly. 3712 SVN->getMask(ShuffleMask); 3713 3714 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3715 if (EltSize <= 32) { 3716 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 3717 int Lane = SVN->getSplatIndex(); 3718 // If this is undef splat, generate it via "just" vdup, if possible. 3719 if (Lane == -1) Lane = 0; 3720 3721 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 3722 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 3723 } 3724 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 3725 DAG.getConstant(Lane, MVT::i32)); 3726 } 3727 3728 bool ReverseVEXT; 3729 unsigned Imm; 3730 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 3731 if (ReverseVEXT) 3732 std::swap(V1, V2); 3733 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 3734 DAG.getConstant(Imm, MVT::i32)); 3735 } 3736 3737 if (isVREVMask(ShuffleMask, VT, 64)) 3738 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 3739 if (isVREVMask(ShuffleMask, VT, 32)) 3740 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 3741 if (isVREVMask(ShuffleMask, VT, 16)) 3742 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 3743 3744 // Check for Neon shuffles that modify both input vectors in place. 3745 // If both results are used, i.e., if there are two shuffles with the same 3746 // source operands and with masks corresponding to both results of one of 3747 // these operations, DAG memoization will ensure that a single node is 3748 // used for both shuffles. 3749 unsigned WhichResult; 3750 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 3751 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3752 V1, V2).getValue(WhichResult); 3753 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 3754 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3755 V1, V2).getValue(WhichResult); 3756 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 3757 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3758 V1, V2).getValue(WhichResult); 3759 3760 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3761 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3762 V1, V1).getValue(WhichResult); 3763 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3764 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3765 V1, V1).getValue(WhichResult); 3766 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3767 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3768 V1, V1).getValue(WhichResult); 3769 } 3770 3771 // If the shuffle is not directly supported and it has 4 elements, use 3772 // the PerfectShuffle-generated table to synthesize it from other shuffles. 3773 unsigned NumElts = VT.getVectorNumElements(); 3774 if (NumElts == 4) { 3775 unsigned PFIndexes[4]; 3776 for (unsigned i = 0; i != 4; ++i) { 3777 if (ShuffleMask[i] < 0) 3778 PFIndexes[i] = 8; 3779 else 3780 PFIndexes[i] = ShuffleMask[i]; 3781 } 3782 3783 // Compute the index in the perfect shuffle table. 3784 unsigned PFTableIndex = 3785 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3786 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3787 unsigned Cost = (PFEntry >> 30); 3788 3789 if (Cost <= 4) 3790 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 3791 } 3792 3793 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 3794 if (EltSize >= 32) { 3795 // Do the expansion with floating-point types, since that is what the VFP 3796 // registers are defined to use, and since i64 is not legal. 3797 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3798 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3799 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1); 3800 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2); 3801 SmallVector<SDValue, 8> Ops; 3802 for (unsigned i = 0; i < NumElts; ++i) { 3803 if (ShuffleMask[i] < 0) 3804 Ops.push_back(DAG.getUNDEF(EltVT)); 3805 else 3806 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3807 ShuffleMask[i] < (int)NumElts ? V1 : V2, 3808 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 3809 MVT::i32))); 3810 } 3811 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3812 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3813 } 3814 3815 return SDValue(); 3816} 3817 3818static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 3819 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 3820 SDValue Lane = Op.getOperand(1); 3821 if (!isa<ConstantSDNode>(Lane)) 3822 return SDValue(); 3823 3824 SDValue Vec = Op.getOperand(0); 3825 if (Op.getValueType() == MVT::i32 && 3826 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 3827 DebugLoc dl = Op.getDebugLoc(); 3828 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 3829 } 3830 3831 return Op; 3832} 3833 3834static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 3835 // The only time a CONCAT_VECTORS operation can have legal types is when 3836 // two 64-bit vectors are concatenated to a 128-bit vector. 3837 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 3838 "unexpected CONCAT_VECTORS"); 3839 DebugLoc dl = Op.getDebugLoc(); 3840 SDValue Val = DAG.getUNDEF(MVT::v2f64); 3841 SDValue Op0 = Op.getOperand(0); 3842 SDValue Op1 = Op.getOperand(1); 3843 if (Op0.getOpcode() != ISD::UNDEF) 3844 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3845 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0), 3846 DAG.getIntPtrConstant(0)); 3847 if (Op1.getOpcode() != ISD::UNDEF) 3848 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3849 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1), 3850 DAG.getIntPtrConstant(1)); 3851 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val); 3852} 3853 3854/// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or 3855/// an extending load, return the unextended value. 3856static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 3857 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 3858 return N->getOperand(0); 3859 LoadSDNode *LD = cast<LoadSDNode>(N); 3860 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 3861 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 3862 LD->isNonTemporal(), LD->getAlignment()); 3863} 3864 3865static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 3866 // Multiplications are only custom-lowered for 128-bit vectors so that 3867 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 3868 EVT VT = Op.getValueType(); 3869 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 3870 SDNode *N0 = Op.getOperand(0).getNode(); 3871 SDNode *N1 = Op.getOperand(1).getNode(); 3872 unsigned NewOpc = 0; 3873 if ((N0->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N0)) && 3874 (N1->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N1))) { 3875 NewOpc = ARMISD::VMULLs; 3876 } else if ((N0->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N0)) && 3877 (N1->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N1))) { 3878 NewOpc = ARMISD::VMULLu; 3879 } else if (VT == MVT::v2i64) { 3880 // Fall through to expand this. It is not legal. 3881 return SDValue(); 3882 } else { 3883 // Other vector multiplications are legal. 3884 return Op; 3885 } 3886 3887 // Legalize to a VMULL instruction. 3888 DebugLoc DL = Op.getDebugLoc(); 3889 SDValue Op0 = SkipExtension(N0, DAG); 3890 SDValue Op1 = SkipExtension(N1, DAG); 3891 3892 assert(Op0.getValueType().is64BitVector() && 3893 Op1.getValueType().is64BitVector() && 3894 "unexpected types for extended operands to VMULL"); 3895 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 3896} 3897 3898SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3899 switch (Op.getOpcode()) { 3900 default: llvm_unreachable("Don't know how to custom lower this!"); 3901 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3902 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 3903 case ISD::GlobalAddress: 3904 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 3905 LowerGlobalAddressELF(Op, DAG); 3906 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3907 case ISD::SELECT: return LowerSELECT(Op, DAG); 3908 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 3909 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 3910 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 3911 case ISD::VASTART: return LowerVASTART(Op, DAG); 3912 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 3913 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 3914 case ISD::SINT_TO_FP: 3915 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 3916 case ISD::FP_TO_SINT: 3917 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 3918 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 3919 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3920 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3921 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 3922 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 3923 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 3924 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 3925 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 3926 Subtarget); 3927 case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG); 3928 case ISD::SHL: 3929 case ISD::SRL: 3930 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 3931 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 3932 case ISD::SRL_PARTS: 3933 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 3934 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 3935 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 3936 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 3937 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3938 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 3939 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 3940 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 3941 case ISD::MUL: return LowerMUL(Op, DAG); 3942 } 3943 return SDValue(); 3944} 3945 3946/// ReplaceNodeResults - Replace the results of node with an illegal result 3947/// type with new values built out of custom code. 3948void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 3949 SmallVectorImpl<SDValue>&Results, 3950 SelectionDAG &DAG) const { 3951 SDValue Res; 3952 switch (N->getOpcode()) { 3953 default: 3954 llvm_unreachable("Don't know how to custom expand this!"); 3955 break; 3956 case ISD::BIT_CONVERT: 3957 Res = ExpandBIT_CONVERT(N, DAG); 3958 break; 3959 case ISD::SRL: 3960 case ISD::SRA: 3961 Res = LowerShift(N, DAG, Subtarget); 3962 break; 3963 } 3964 if (Res.getNode()) 3965 Results.push_back(Res); 3966} 3967 3968//===----------------------------------------------------------------------===// 3969// ARM Scheduler Hooks 3970//===----------------------------------------------------------------------===// 3971 3972MachineBasicBlock * 3973ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 3974 MachineBasicBlock *BB, 3975 unsigned Size) const { 3976 unsigned dest = MI->getOperand(0).getReg(); 3977 unsigned ptr = MI->getOperand(1).getReg(); 3978 unsigned oldval = MI->getOperand(2).getReg(); 3979 unsigned newval = MI->getOperand(3).getReg(); 3980 unsigned scratch = BB->getParent()->getRegInfo() 3981 .createVirtualRegister(ARM::GPRRegisterClass); 3982 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3983 DebugLoc dl = MI->getDebugLoc(); 3984 bool isThumb2 = Subtarget->isThumb2(); 3985 3986 unsigned ldrOpc, strOpc; 3987 switch (Size) { 3988 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 3989 case 1: 3990 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 3991 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB; 3992 break; 3993 case 2: 3994 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 3995 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 3996 break; 3997 case 4: 3998 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 3999 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4000 break; 4001 } 4002 4003 MachineFunction *MF = BB->getParent(); 4004 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4005 MachineFunction::iterator It = BB; 4006 ++It; // insert the new blocks after the current block 4007 4008 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4009 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4010 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4011 MF->insert(It, loop1MBB); 4012 MF->insert(It, loop2MBB); 4013 MF->insert(It, exitMBB); 4014 4015 // Transfer the remainder of BB and its successor edges to exitMBB. 4016 exitMBB->splice(exitMBB->begin(), BB, 4017 llvm::next(MachineBasicBlock::iterator(MI)), 4018 BB->end()); 4019 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4020 4021 // thisMBB: 4022 // ... 4023 // fallthrough --> loop1MBB 4024 BB->addSuccessor(loop1MBB); 4025 4026 // loop1MBB: 4027 // ldrex dest, [ptr] 4028 // cmp dest, oldval 4029 // bne exitMBB 4030 BB = loop1MBB; 4031 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4032 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4033 .addReg(dest).addReg(oldval)); 4034 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4035 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4036 BB->addSuccessor(loop2MBB); 4037 BB->addSuccessor(exitMBB); 4038 4039 // loop2MBB: 4040 // strex scratch, newval, [ptr] 4041 // cmp scratch, #0 4042 // bne loop1MBB 4043 BB = loop2MBB; 4044 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 4045 .addReg(ptr)); 4046 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4047 .addReg(scratch).addImm(0)); 4048 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4049 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4050 BB->addSuccessor(loop1MBB); 4051 BB->addSuccessor(exitMBB); 4052 4053 // exitMBB: 4054 // ... 4055 BB = exitMBB; 4056 4057 MI->eraseFromParent(); // The instruction is gone now. 4058 4059 return BB; 4060} 4061 4062MachineBasicBlock * 4063ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4064 unsigned Size, unsigned BinOpcode) const { 4065 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4066 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4067 4068 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4069 MachineFunction *MF = BB->getParent(); 4070 MachineFunction::iterator It = BB; 4071 ++It; 4072 4073 unsigned dest = MI->getOperand(0).getReg(); 4074 unsigned ptr = MI->getOperand(1).getReg(); 4075 unsigned incr = MI->getOperand(2).getReg(); 4076 DebugLoc dl = MI->getDebugLoc(); 4077 4078 bool isThumb2 = Subtarget->isThumb2(); 4079 unsigned ldrOpc, strOpc; 4080 switch (Size) { 4081 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4082 case 1: 4083 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4084 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4085 break; 4086 case 2: 4087 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4088 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4089 break; 4090 case 4: 4091 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4092 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4093 break; 4094 } 4095 4096 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4097 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4098 MF->insert(It, loopMBB); 4099 MF->insert(It, exitMBB); 4100 4101 // Transfer the remainder of BB and its successor edges to exitMBB. 4102 exitMBB->splice(exitMBB->begin(), BB, 4103 llvm::next(MachineBasicBlock::iterator(MI)), 4104 BB->end()); 4105 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4106 4107 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4108 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4109 unsigned scratch2 = (!BinOpcode) ? incr : 4110 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4111 4112 // thisMBB: 4113 // ... 4114 // fallthrough --> loopMBB 4115 BB->addSuccessor(loopMBB); 4116 4117 // loopMBB: 4118 // ldrex dest, ptr 4119 // <binop> scratch2, dest, incr 4120 // strex scratch, scratch2, ptr 4121 // cmp scratch, #0 4122 // bne- loopMBB 4123 // fallthrough --> exitMBB 4124 BB = loopMBB; 4125 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4126 if (BinOpcode) { 4127 // operand order needs to go the other way for NAND 4128 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 4129 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4130 addReg(incr).addReg(dest)).addReg(0); 4131 else 4132 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4133 addReg(dest).addReg(incr)).addReg(0); 4134 } 4135 4136 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 4137 .addReg(ptr)); 4138 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4139 .addReg(scratch).addImm(0)); 4140 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4141 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4142 4143 BB->addSuccessor(loopMBB); 4144 BB->addSuccessor(exitMBB); 4145 4146 // exitMBB: 4147 // ... 4148 BB = exitMBB; 4149 4150 MI->eraseFromParent(); // The instruction is gone now. 4151 4152 return BB; 4153} 4154 4155static 4156MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 4157 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 4158 E = MBB->succ_end(); I != E; ++I) 4159 if (*I != Succ) 4160 return *I; 4161 llvm_unreachable("Expecting a BB with two successors!"); 4162} 4163 4164MachineBasicBlock * 4165ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4166 MachineBasicBlock *BB) const { 4167 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4168 DebugLoc dl = MI->getDebugLoc(); 4169 bool isThumb2 = Subtarget->isThumb2(); 4170 switch (MI->getOpcode()) { 4171 default: 4172 MI->dump(); 4173 llvm_unreachable("Unexpected instr type to insert"); 4174 4175 case ARM::ATOMIC_LOAD_ADD_I8: 4176 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4177 case ARM::ATOMIC_LOAD_ADD_I16: 4178 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4179 case ARM::ATOMIC_LOAD_ADD_I32: 4180 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4181 4182 case ARM::ATOMIC_LOAD_AND_I8: 4183 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4184 case ARM::ATOMIC_LOAD_AND_I16: 4185 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4186 case ARM::ATOMIC_LOAD_AND_I32: 4187 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4188 4189 case ARM::ATOMIC_LOAD_OR_I8: 4190 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4191 case ARM::ATOMIC_LOAD_OR_I16: 4192 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4193 case ARM::ATOMIC_LOAD_OR_I32: 4194 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4195 4196 case ARM::ATOMIC_LOAD_XOR_I8: 4197 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4198 case ARM::ATOMIC_LOAD_XOR_I16: 4199 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4200 case ARM::ATOMIC_LOAD_XOR_I32: 4201 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4202 4203 case ARM::ATOMIC_LOAD_NAND_I8: 4204 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4205 case ARM::ATOMIC_LOAD_NAND_I16: 4206 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4207 case ARM::ATOMIC_LOAD_NAND_I32: 4208 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4209 4210 case ARM::ATOMIC_LOAD_SUB_I8: 4211 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4212 case ARM::ATOMIC_LOAD_SUB_I16: 4213 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4214 case ARM::ATOMIC_LOAD_SUB_I32: 4215 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4216 4217 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 4218 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 4219 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 4220 4221 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 4222 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 4223 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 4224 4225 case ARM::tMOVCCr_pseudo: { 4226 // To "insert" a SELECT_CC instruction, we actually have to insert the 4227 // diamond control-flow pattern. The incoming instruction knows the 4228 // destination vreg to set, the condition code register to branch on, the 4229 // true/false values to select between, and a branch opcode to use. 4230 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4231 MachineFunction::iterator It = BB; 4232 ++It; 4233 4234 // thisMBB: 4235 // ... 4236 // TrueVal = ... 4237 // cmpTY ccX, r1, r2 4238 // bCC copy1MBB 4239 // fallthrough --> copy0MBB 4240 MachineBasicBlock *thisMBB = BB; 4241 MachineFunction *F = BB->getParent(); 4242 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4243 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4244 F->insert(It, copy0MBB); 4245 F->insert(It, sinkMBB); 4246 4247 // Transfer the remainder of BB and its successor edges to sinkMBB. 4248 sinkMBB->splice(sinkMBB->begin(), BB, 4249 llvm::next(MachineBasicBlock::iterator(MI)), 4250 BB->end()); 4251 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 4252 4253 BB->addSuccessor(copy0MBB); 4254 BB->addSuccessor(sinkMBB); 4255 4256 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 4257 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 4258 4259 // copy0MBB: 4260 // %FalseValue = ... 4261 // # fallthrough to sinkMBB 4262 BB = copy0MBB; 4263 4264 // Update machine-CFG edges 4265 BB->addSuccessor(sinkMBB); 4266 4267 // sinkMBB: 4268 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4269 // ... 4270 BB = sinkMBB; 4271 BuildMI(*BB, BB->begin(), dl, 4272 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 4273 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4274 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4275 4276 MI->eraseFromParent(); // The pseudo instruction is gone now. 4277 return BB; 4278 } 4279 4280 case ARM::BCCi64: 4281 case ARM::BCCZi64: { 4282 // Compare both parts that make up the double comparison separately for 4283 // equality. 4284 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 4285 4286 unsigned LHS1 = MI->getOperand(1).getReg(); 4287 unsigned LHS2 = MI->getOperand(2).getReg(); 4288 if (RHSisZero) { 4289 AddDefaultPred(BuildMI(BB, dl, 4290 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4291 .addReg(LHS1).addImm(0)); 4292 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4293 .addReg(LHS2).addImm(0) 4294 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4295 } else { 4296 unsigned RHS1 = MI->getOperand(3).getReg(); 4297 unsigned RHS2 = MI->getOperand(4).getReg(); 4298 AddDefaultPred(BuildMI(BB, dl, 4299 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4300 .addReg(LHS1).addReg(RHS1)); 4301 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4302 .addReg(LHS2).addReg(RHS2) 4303 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4304 } 4305 4306 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 4307 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 4308 if (MI->getOperand(0).getImm() == ARMCC::NE) 4309 std::swap(destMBB, exitMBB); 4310 4311 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4312 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 4313 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 4314 .addMBB(exitMBB); 4315 4316 MI->eraseFromParent(); // The pseudo instruction is gone now. 4317 return BB; 4318 } 4319 } 4320} 4321 4322//===----------------------------------------------------------------------===// 4323// ARM Optimization Hooks 4324//===----------------------------------------------------------------------===// 4325 4326static 4327SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 4328 TargetLowering::DAGCombinerInfo &DCI) { 4329 SelectionDAG &DAG = DCI.DAG; 4330 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4331 EVT VT = N->getValueType(0); 4332 unsigned Opc = N->getOpcode(); 4333 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 4334 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 4335 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 4336 ISD::CondCode CC = ISD::SETCC_INVALID; 4337 4338 if (isSlctCC) { 4339 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 4340 } else { 4341 SDValue CCOp = Slct.getOperand(0); 4342 if (CCOp.getOpcode() == ISD::SETCC) 4343 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 4344 } 4345 4346 bool DoXform = false; 4347 bool InvCC = false; 4348 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 4349 "Bad input!"); 4350 4351 if (LHS.getOpcode() == ISD::Constant && 4352 cast<ConstantSDNode>(LHS)->isNullValue()) { 4353 DoXform = true; 4354 } else if (CC != ISD::SETCC_INVALID && 4355 RHS.getOpcode() == ISD::Constant && 4356 cast<ConstantSDNode>(RHS)->isNullValue()) { 4357 std::swap(LHS, RHS); 4358 SDValue Op0 = Slct.getOperand(0); 4359 EVT OpVT = isSlctCC ? Op0.getValueType() : 4360 Op0.getOperand(0).getValueType(); 4361 bool isInt = OpVT.isInteger(); 4362 CC = ISD::getSetCCInverse(CC, isInt); 4363 4364 if (!TLI.isCondCodeLegal(CC, OpVT)) 4365 return SDValue(); // Inverse operator isn't legal. 4366 4367 DoXform = true; 4368 InvCC = true; 4369 } 4370 4371 if (DoXform) { 4372 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 4373 if (isSlctCC) 4374 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 4375 Slct.getOperand(0), Slct.getOperand(1), CC); 4376 SDValue CCOp = Slct.getOperand(0); 4377 if (InvCC) 4378 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 4379 CCOp.getOperand(0), CCOp.getOperand(1), CC); 4380 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4381 CCOp, OtherOp, Result); 4382 } 4383 return SDValue(); 4384} 4385 4386/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 4387/// operands N0 and N1. This is a helper for PerformADDCombine that is 4388/// called with the default operands, and if that fails, with commuted 4389/// operands. 4390static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 4391 TargetLowering::DAGCombinerInfo &DCI) { 4392 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 4393 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 4394 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 4395 if (Result.getNode()) return Result; 4396 } 4397 return SDValue(); 4398} 4399 4400/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 4401/// 4402static SDValue PerformADDCombine(SDNode *N, 4403 TargetLowering::DAGCombinerInfo &DCI) { 4404 SDValue N0 = N->getOperand(0); 4405 SDValue N1 = N->getOperand(1); 4406 4407 // First try with the default operand order. 4408 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 4409 if (Result.getNode()) 4410 return Result; 4411 4412 // If that didn't work, try again with the operands commuted. 4413 return PerformADDCombineWithOperands(N, N1, N0, DCI); 4414} 4415 4416/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 4417/// 4418static SDValue PerformSUBCombine(SDNode *N, 4419 TargetLowering::DAGCombinerInfo &DCI) { 4420 SDValue N0 = N->getOperand(0); 4421 SDValue N1 = N->getOperand(1); 4422 4423 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 4424 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 4425 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 4426 if (Result.getNode()) return Result; 4427 } 4428 4429 return SDValue(); 4430} 4431 4432static SDValue PerformMULCombine(SDNode *N, 4433 TargetLowering::DAGCombinerInfo &DCI, 4434 const ARMSubtarget *Subtarget) { 4435 SelectionDAG &DAG = DCI.DAG; 4436 4437 if (Subtarget->isThumb1Only()) 4438 return SDValue(); 4439 4440 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 4441 return SDValue(); 4442 4443 EVT VT = N->getValueType(0); 4444 if (VT != MVT::i32) 4445 return SDValue(); 4446 4447 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4448 if (!C) 4449 return SDValue(); 4450 4451 uint64_t MulAmt = C->getZExtValue(); 4452 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 4453 ShiftAmt = ShiftAmt & (32 - 1); 4454 SDValue V = N->getOperand(0); 4455 DebugLoc DL = N->getDebugLoc(); 4456 4457 SDValue Res; 4458 MulAmt >>= ShiftAmt; 4459 if (isPowerOf2_32(MulAmt - 1)) { 4460 // (mul x, 2^N + 1) => (add (shl x, N), x) 4461 Res = DAG.getNode(ISD::ADD, DL, VT, 4462 V, DAG.getNode(ISD::SHL, DL, VT, 4463 V, DAG.getConstant(Log2_32(MulAmt-1), 4464 MVT::i32))); 4465 } else if (isPowerOf2_32(MulAmt + 1)) { 4466 // (mul x, 2^N - 1) => (sub (shl x, N), x) 4467 Res = DAG.getNode(ISD::SUB, DL, VT, 4468 DAG.getNode(ISD::SHL, DL, VT, 4469 V, DAG.getConstant(Log2_32(MulAmt+1), 4470 MVT::i32)), 4471 V); 4472 } else 4473 return SDValue(); 4474 4475 if (ShiftAmt != 0) 4476 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 4477 DAG.getConstant(ShiftAmt, MVT::i32)); 4478 4479 // Do not add new nodes to DAG combiner worklist. 4480 DCI.CombineTo(N, Res, false); 4481 return SDValue(); 4482} 4483 4484static SDValue PerformANDCombine(SDNode *N, 4485 TargetLowering::DAGCombinerInfo &DCI) { 4486 // Attempt to use immediate-form VBIC 4487 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4488 DebugLoc dl = N->getDebugLoc(); 4489 EVT VT = N->getValueType(0); 4490 SelectionDAG &DAG = DCI.DAG; 4491 4492 APInt SplatBits, SplatUndef; 4493 unsigned SplatBitSize; 4494 bool HasAnyUndefs; 4495 if (BVN && 4496 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4497 if (SplatBitSize <= 64) { 4498 EVT VbicVT; 4499 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 4500 SplatUndef.getZExtValue(), SplatBitSize, 4501 DAG, VbicVT, VT.is128BitVector(), 4502 OtherModImm); 4503 if (Val.getNode()) { 4504 SDValue Input = 4505 DAG.getNode(ISD::BIT_CONVERT, dl, VbicVT, N->getOperand(0)); 4506 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 4507 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vbic); 4508 } 4509 } 4510 } 4511 4512 return SDValue(); 4513} 4514 4515/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 4516static SDValue PerformORCombine(SDNode *N, 4517 TargetLowering::DAGCombinerInfo &DCI, 4518 const ARMSubtarget *Subtarget) { 4519 // Attempt to use immediate-form VORR 4520 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 4521 DebugLoc dl = N->getDebugLoc(); 4522 EVT VT = N->getValueType(0); 4523 SelectionDAG &DAG = DCI.DAG; 4524 4525 APInt SplatBits, SplatUndef; 4526 unsigned SplatBitSize; 4527 bool HasAnyUndefs; 4528 if (BVN && Subtarget->hasNEON() && 4529 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4530 if (SplatBitSize <= 64) { 4531 EVT VorrVT; 4532 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4533 SplatUndef.getZExtValue(), SplatBitSize, 4534 DAG, VorrVT, VT.is128BitVector(), 4535 OtherModImm); 4536 if (Val.getNode()) { 4537 SDValue Input = 4538 DAG.getNode(ISD::BIT_CONVERT, dl, VorrVT, N->getOperand(0)); 4539 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 4540 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vorr); 4541 } 4542 } 4543 } 4544 4545 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 4546 // reasonable. 4547 4548 // BFI is only available on V6T2+ 4549 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 4550 return SDValue(); 4551 4552 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 4553 DebugLoc DL = N->getDebugLoc(); 4554 // 1) or (and A, mask), val => ARMbfi A, val, mask 4555 // iff (val & mask) == val 4556 // 4557 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4558 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 4559 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4560 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 4561 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4562 // (i.e., copy a bitfield value into another bitfield of the same width) 4563 if (N0.getOpcode() != ISD::AND) 4564 return SDValue(); 4565 4566 if (VT != MVT::i32) 4567 return SDValue(); 4568 4569 4570 // The value and the mask need to be constants so we can verify this is 4571 // actually a bitfield set. If the mask is 0xffff, we can do better 4572 // via a movt instruction, so don't use BFI in that case. 4573 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 4574 if (!C) 4575 return SDValue(); 4576 unsigned Mask = C->getZExtValue(); 4577 if (Mask == 0xffff) 4578 return SDValue(); 4579 SDValue Res; 4580 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 4581 if ((C = dyn_cast<ConstantSDNode>(N1))) { 4582 unsigned Val = C->getZExtValue(); 4583 if (!ARM::isBitFieldInvertedMask(Mask) || (Val & ~Mask) != Val) 4584 return SDValue(); 4585 Val >>= CountTrailingZeros_32(~Mask); 4586 4587 Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0), 4588 DAG.getConstant(Val, MVT::i32), 4589 DAG.getConstant(Mask, MVT::i32)); 4590 4591 // Do not add new nodes to DAG combiner worklist. 4592 DCI.CombineTo(N, Res, false); 4593 } else if (N1.getOpcode() == ISD::AND) { 4594 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4595 C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4596 if (!C) 4597 return SDValue(); 4598 unsigned Mask2 = C->getZExtValue(); 4599 4600 if (ARM::isBitFieldInvertedMask(Mask) && 4601 ARM::isBitFieldInvertedMask(~Mask2) && 4602 (CountPopulation_32(Mask) == CountPopulation_32(~Mask2))) { 4603 // The pack halfword instruction works better for masks that fit it, 4604 // so use that when it's available. 4605 if (Subtarget->hasT2ExtractPack() && 4606 (Mask == 0xffff || Mask == 0xffff0000)) 4607 return SDValue(); 4608 // 2a 4609 unsigned lsb = CountTrailingZeros_32(Mask2); 4610 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 4611 DAG.getConstant(lsb, MVT::i32)); 4612 Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0), Res, 4613 DAG.getConstant(Mask, MVT::i32)); 4614 // Do not add new nodes to DAG combiner worklist. 4615 DCI.CombineTo(N, Res, false); 4616 } else if (ARM::isBitFieldInvertedMask(~Mask) && 4617 ARM::isBitFieldInvertedMask(Mask2) && 4618 (CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) { 4619 // The pack halfword instruction works better for masks that fit it, 4620 // so use that when it's available. 4621 if (Subtarget->hasT2ExtractPack() && 4622 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 4623 return SDValue(); 4624 // 2b 4625 unsigned lsb = CountTrailingZeros_32(Mask); 4626 Res = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), 4627 DAG.getConstant(lsb, MVT::i32)); 4628 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 4629 DAG.getConstant(Mask2, MVT::i32)); 4630 // Do not add new nodes to DAG combiner worklist. 4631 DCI.CombineTo(N, Res, false); 4632 } 4633 } 4634 4635 return SDValue(); 4636} 4637 4638/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 4639/// ARMISD::VMOVRRD. 4640static SDValue PerformVMOVRRDCombine(SDNode *N, 4641 TargetLowering::DAGCombinerInfo &DCI) { 4642 // vmovrrd(vmovdrr x, y) -> x,y 4643 SDValue InDouble = N->getOperand(0); 4644 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 4645 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 4646 return SDValue(); 4647} 4648 4649/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 4650/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 4651static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 4652 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 4653 SDValue Op0 = N->getOperand(0); 4654 SDValue Op1 = N->getOperand(1); 4655 if (Op0.getOpcode() == ISD::BIT_CONVERT) 4656 Op0 = Op0.getOperand(0); 4657 if (Op1.getOpcode() == ISD::BIT_CONVERT) 4658 Op1 = Op1.getOperand(0); 4659 if (Op0.getOpcode() == ARMISD::VMOVRRD && 4660 Op0.getNode() == Op1.getNode() && 4661 Op0.getResNo() == 0 && Op1.getResNo() == 1) 4662 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), 4663 N->getValueType(0), Op0.getOperand(0)); 4664 return SDValue(); 4665} 4666 4667/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 4668/// ISD::BUILD_VECTOR. 4669static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG) { 4670 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 4671 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 4672 // into a pair of GPRs, which is fine when the value is used as a scalar, 4673 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 4674 if (N->getNumOperands() == 2) 4675 return PerformVMOVDRRCombine(N, DAG); 4676 4677 return SDValue(); 4678} 4679 4680/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 4681/// ISD::VECTOR_SHUFFLE. 4682static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 4683 // The LLVM shufflevector instruction does not require the shuffle mask 4684 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 4685 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 4686 // operands do not match the mask length, they are extended by concatenating 4687 // them with undef vectors. That is probably the right thing for other 4688 // targets, but for NEON it is better to concatenate two double-register 4689 // size vector operands into a single quad-register size vector. Do that 4690 // transformation here: 4691 // shuffle(concat(v1, undef), concat(v2, undef)) -> 4692 // shuffle(concat(v1, v2), undef) 4693 SDValue Op0 = N->getOperand(0); 4694 SDValue Op1 = N->getOperand(1); 4695 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 4696 Op1.getOpcode() != ISD::CONCAT_VECTORS || 4697 Op0.getNumOperands() != 2 || 4698 Op1.getNumOperands() != 2) 4699 return SDValue(); 4700 SDValue Concat0Op1 = Op0.getOperand(1); 4701 SDValue Concat1Op1 = Op1.getOperand(1); 4702 if (Concat0Op1.getOpcode() != ISD::UNDEF || 4703 Concat1Op1.getOpcode() != ISD::UNDEF) 4704 return SDValue(); 4705 // Skip the transformation if any of the types are illegal. 4706 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4707 EVT VT = N->getValueType(0); 4708 if (!TLI.isTypeLegal(VT) || 4709 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 4710 !TLI.isTypeLegal(Concat1Op1.getValueType())) 4711 return SDValue(); 4712 4713 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 4714 Op0.getOperand(0), Op1.getOperand(0)); 4715 // Translate the shuffle mask. 4716 SmallVector<int, 16> NewMask; 4717 unsigned NumElts = VT.getVectorNumElements(); 4718 unsigned HalfElts = NumElts/2; 4719 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 4720 for (unsigned n = 0; n < NumElts; ++n) { 4721 int MaskElt = SVN->getMaskElt(n); 4722 int NewElt = -1; 4723 if (MaskElt < (int)HalfElts) 4724 NewElt = MaskElt; 4725 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 4726 NewElt = HalfElts + MaskElt - NumElts; 4727 NewMask.push_back(NewElt); 4728 } 4729 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 4730 DAG.getUNDEF(VT), NewMask.data()); 4731} 4732 4733/// PerformVDUPLANECombine - Target-specific dag combine xforms for 4734/// ARMISD::VDUPLANE. 4735static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) { 4736 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 4737 // redundant. 4738 SDValue Op = N->getOperand(0); 4739 EVT VT = N->getValueType(0); 4740 4741 // Ignore bit_converts. 4742 while (Op.getOpcode() == ISD::BIT_CONVERT) 4743 Op = Op.getOperand(0); 4744 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 4745 return SDValue(); 4746 4747 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 4748 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 4749 // The canonical VMOV for a zero vector uses a 32-bit element size. 4750 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4751 unsigned EltBits; 4752 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 4753 EltSize = 8; 4754 if (EltSize > VT.getVectorElementType().getSizeInBits()) 4755 return SDValue(); 4756 4757 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op); 4758} 4759 4760/// getVShiftImm - Check if this is a valid build_vector for the immediate 4761/// operand of a vector shift operation, where all the elements of the 4762/// build_vector must have the same constant integer value. 4763static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 4764 // Ignore bit_converts. 4765 while (Op.getOpcode() == ISD::BIT_CONVERT) 4766 Op = Op.getOperand(0); 4767 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 4768 APInt SplatBits, SplatUndef; 4769 unsigned SplatBitSize; 4770 bool HasAnyUndefs; 4771 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 4772 HasAnyUndefs, ElementBits) || 4773 SplatBitSize > ElementBits) 4774 return false; 4775 Cnt = SplatBits.getSExtValue(); 4776 return true; 4777} 4778 4779/// isVShiftLImm - Check if this is a valid build_vector for the immediate 4780/// operand of a vector shift left operation. That value must be in the range: 4781/// 0 <= Value < ElementBits for a left shift; or 4782/// 0 <= Value <= ElementBits for a long left shift. 4783static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 4784 assert(VT.isVector() && "vector shift count is not a vector type"); 4785 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 4786 if (! getVShiftImm(Op, ElementBits, Cnt)) 4787 return false; 4788 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 4789} 4790 4791/// isVShiftRImm - Check if this is a valid build_vector for the immediate 4792/// operand of a vector shift right operation. For a shift opcode, the value 4793/// is positive, but for an intrinsic the value count must be negative. The 4794/// absolute value must be in the range: 4795/// 1 <= |Value| <= ElementBits for a right shift; or 4796/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 4797static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 4798 int64_t &Cnt) { 4799 assert(VT.isVector() && "vector shift count is not a vector type"); 4800 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 4801 if (! getVShiftImm(Op, ElementBits, Cnt)) 4802 return false; 4803 if (isIntrinsic) 4804 Cnt = -Cnt; 4805 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 4806} 4807 4808/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 4809static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 4810 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 4811 switch (IntNo) { 4812 default: 4813 // Don't do anything for most intrinsics. 4814 break; 4815 4816 // Vector shifts: check for immediate versions and lower them. 4817 // Note: This is done during DAG combining instead of DAG legalizing because 4818 // the build_vectors for 64-bit vector element shift counts are generally 4819 // not legal, and it is hard to see their values after they get legalized to 4820 // loads from a constant pool. 4821 case Intrinsic::arm_neon_vshifts: 4822 case Intrinsic::arm_neon_vshiftu: 4823 case Intrinsic::arm_neon_vshiftls: 4824 case Intrinsic::arm_neon_vshiftlu: 4825 case Intrinsic::arm_neon_vshiftn: 4826 case Intrinsic::arm_neon_vrshifts: 4827 case Intrinsic::arm_neon_vrshiftu: 4828 case Intrinsic::arm_neon_vrshiftn: 4829 case Intrinsic::arm_neon_vqshifts: 4830 case Intrinsic::arm_neon_vqshiftu: 4831 case Intrinsic::arm_neon_vqshiftsu: 4832 case Intrinsic::arm_neon_vqshiftns: 4833 case Intrinsic::arm_neon_vqshiftnu: 4834 case Intrinsic::arm_neon_vqshiftnsu: 4835 case Intrinsic::arm_neon_vqrshiftns: 4836 case Intrinsic::arm_neon_vqrshiftnu: 4837 case Intrinsic::arm_neon_vqrshiftnsu: { 4838 EVT VT = N->getOperand(1).getValueType(); 4839 int64_t Cnt; 4840 unsigned VShiftOpc = 0; 4841 4842 switch (IntNo) { 4843 case Intrinsic::arm_neon_vshifts: 4844 case Intrinsic::arm_neon_vshiftu: 4845 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 4846 VShiftOpc = ARMISD::VSHL; 4847 break; 4848 } 4849 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 4850 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 4851 ARMISD::VSHRs : ARMISD::VSHRu); 4852 break; 4853 } 4854 return SDValue(); 4855 4856 case Intrinsic::arm_neon_vshiftls: 4857 case Intrinsic::arm_neon_vshiftlu: 4858 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 4859 break; 4860 llvm_unreachable("invalid shift count for vshll intrinsic"); 4861 4862 case Intrinsic::arm_neon_vrshifts: 4863 case Intrinsic::arm_neon_vrshiftu: 4864 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 4865 break; 4866 return SDValue(); 4867 4868 case Intrinsic::arm_neon_vqshifts: 4869 case Intrinsic::arm_neon_vqshiftu: 4870 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 4871 break; 4872 return SDValue(); 4873 4874 case Intrinsic::arm_neon_vqshiftsu: 4875 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 4876 break; 4877 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 4878 4879 case Intrinsic::arm_neon_vshiftn: 4880 case Intrinsic::arm_neon_vrshiftn: 4881 case Intrinsic::arm_neon_vqshiftns: 4882 case Intrinsic::arm_neon_vqshiftnu: 4883 case Intrinsic::arm_neon_vqshiftnsu: 4884 case Intrinsic::arm_neon_vqrshiftns: 4885 case Intrinsic::arm_neon_vqrshiftnu: 4886 case Intrinsic::arm_neon_vqrshiftnsu: 4887 // Narrowing shifts require an immediate right shift. 4888 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 4889 break; 4890 llvm_unreachable("invalid shift count for narrowing vector shift " 4891 "intrinsic"); 4892 4893 default: 4894 llvm_unreachable("unhandled vector shift"); 4895 } 4896 4897 switch (IntNo) { 4898 case Intrinsic::arm_neon_vshifts: 4899 case Intrinsic::arm_neon_vshiftu: 4900 // Opcode already set above. 4901 break; 4902 case Intrinsic::arm_neon_vshiftls: 4903 case Intrinsic::arm_neon_vshiftlu: 4904 if (Cnt == VT.getVectorElementType().getSizeInBits()) 4905 VShiftOpc = ARMISD::VSHLLi; 4906 else 4907 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 4908 ARMISD::VSHLLs : ARMISD::VSHLLu); 4909 break; 4910 case Intrinsic::arm_neon_vshiftn: 4911 VShiftOpc = ARMISD::VSHRN; break; 4912 case Intrinsic::arm_neon_vrshifts: 4913 VShiftOpc = ARMISD::VRSHRs; break; 4914 case Intrinsic::arm_neon_vrshiftu: 4915 VShiftOpc = ARMISD::VRSHRu; break; 4916 case Intrinsic::arm_neon_vrshiftn: 4917 VShiftOpc = ARMISD::VRSHRN; break; 4918 case Intrinsic::arm_neon_vqshifts: 4919 VShiftOpc = ARMISD::VQSHLs; break; 4920 case Intrinsic::arm_neon_vqshiftu: 4921 VShiftOpc = ARMISD::VQSHLu; break; 4922 case Intrinsic::arm_neon_vqshiftsu: 4923 VShiftOpc = ARMISD::VQSHLsu; break; 4924 case Intrinsic::arm_neon_vqshiftns: 4925 VShiftOpc = ARMISD::VQSHRNs; break; 4926 case Intrinsic::arm_neon_vqshiftnu: 4927 VShiftOpc = ARMISD::VQSHRNu; break; 4928 case Intrinsic::arm_neon_vqshiftnsu: 4929 VShiftOpc = ARMISD::VQSHRNsu; break; 4930 case Intrinsic::arm_neon_vqrshiftns: 4931 VShiftOpc = ARMISD::VQRSHRNs; break; 4932 case Intrinsic::arm_neon_vqrshiftnu: 4933 VShiftOpc = ARMISD::VQRSHRNu; break; 4934 case Intrinsic::arm_neon_vqrshiftnsu: 4935 VShiftOpc = ARMISD::VQRSHRNsu; break; 4936 } 4937 4938 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 4939 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 4940 } 4941 4942 case Intrinsic::arm_neon_vshiftins: { 4943 EVT VT = N->getOperand(1).getValueType(); 4944 int64_t Cnt; 4945 unsigned VShiftOpc = 0; 4946 4947 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 4948 VShiftOpc = ARMISD::VSLI; 4949 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 4950 VShiftOpc = ARMISD::VSRI; 4951 else { 4952 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 4953 } 4954 4955 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 4956 N->getOperand(1), N->getOperand(2), 4957 DAG.getConstant(Cnt, MVT::i32)); 4958 } 4959 4960 case Intrinsic::arm_neon_vqrshifts: 4961 case Intrinsic::arm_neon_vqrshiftu: 4962 // No immediate versions of these to check for. 4963 break; 4964 } 4965 4966 return SDValue(); 4967} 4968 4969/// PerformShiftCombine - Checks for immediate versions of vector shifts and 4970/// lowers them. As with the vector shift intrinsics, this is done during DAG 4971/// combining instead of DAG legalizing because the build_vectors for 64-bit 4972/// vector element shift counts are generally not legal, and it is hard to see 4973/// their values after they get legalized to loads from a constant pool. 4974static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 4975 const ARMSubtarget *ST) { 4976 EVT VT = N->getValueType(0); 4977 4978 // Nothing to be done for scalar shifts. 4979 if (! VT.isVector()) 4980 return SDValue(); 4981 4982 assert(ST->hasNEON() && "unexpected vector shift"); 4983 int64_t Cnt; 4984 4985 switch (N->getOpcode()) { 4986 default: llvm_unreachable("unexpected shift opcode"); 4987 4988 case ISD::SHL: 4989 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 4990 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 4991 DAG.getConstant(Cnt, MVT::i32)); 4992 break; 4993 4994 case ISD::SRA: 4995 case ISD::SRL: 4996 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 4997 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 4998 ARMISD::VSHRs : ARMISD::VSHRu); 4999 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 5000 DAG.getConstant(Cnt, MVT::i32)); 5001 } 5002 } 5003 return SDValue(); 5004} 5005 5006/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 5007/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 5008static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 5009 const ARMSubtarget *ST) { 5010 SDValue N0 = N->getOperand(0); 5011 5012 // Check for sign- and zero-extensions of vector extract operations of 8- 5013 // and 16-bit vector elements. NEON supports these directly. They are 5014 // handled during DAG combining because type legalization will promote them 5015 // to 32-bit types and it is messy to recognize the operations after that. 5016 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 5017 SDValue Vec = N0.getOperand(0); 5018 SDValue Lane = N0.getOperand(1); 5019 EVT VT = N->getValueType(0); 5020 EVT EltVT = N0.getValueType(); 5021 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5022 5023 if (VT == MVT::i32 && 5024 (EltVT == MVT::i8 || EltVT == MVT::i16) && 5025 TLI.isTypeLegal(Vec.getValueType()) && 5026 isa<ConstantSDNode>(Lane)) { 5027 5028 unsigned Opc = 0; 5029 switch (N->getOpcode()) { 5030 default: llvm_unreachable("unexpected opcode"); 5031 case ISD::SIGN_EXTEND: 5032 Opc = ARMISD::VGETLANEs; 5033 break; 5034 case ISD::ZERO_EXTEND: 5035 case ISD::ANY_EXTEND: 5036 Opc = ARMISD::VGETLANEu; 5037 break; 5038 } 5039 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 5040 } 5041 } 5042 5043 return SDValue(); 5044} 5045 5046/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 5047/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 5048static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 5049 const ARMSubtarget *ST) { 5050 // If the target supports NEON, try to use vmax/vmin instructions for f32 5051 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 5052 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 5053 // a NaN; only do the transformation when it matches that behavior. 5054 5055 // For now only do this when using NEON for FP operations; if using VFP, it 5056 // is not obvious that the benefit outweighs the cost of switching to the 5057 // NEON pipeline. 5058 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 5059 N->getValueType(0) != MVT::f32) 5060 return SDValue(); 5061 5062 SDValue CondLHS = N->getOperand(0); 5063 SDValue CondRHS = N->getOperand(1); 5064 SDValue LHS = N->getOperand(2); 5065 SDValue RHS = N->getOperand(3); 5066 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 5067 5068 unsigned Opcode = 0; 5069 bool IsReversed; 5070 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 5071 IsReversed = false; // x CC y ? x : y 5072 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 5073 IsReversed = true ; // x CC y ? y : x 5074 } else { 5075 return SDValue(); 5076 } 5077 5078 bool IsUnordered; 5079 switch (CC) { 5080 default: break; 5081 case ISD::SETOLT: 5082 case ISD::SETOLE: 5083 case ISD::SETLT: 5084 case ISD::SETLE: 5085 case ISD::SETULT: 5086 case ISD::SETULE: 5087 // If LHS is NaN, an ordered comparison will be false and the result will 5088 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 5089 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5090 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 5091 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5092 break; 5093 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 5094 // will return -0, so vmin can only be used for unsafe math or if one of 5095 // the operands is known to be nonzero. 5096 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 5097 !UnsafeFPMath && 5098 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5099 break; 5100 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 5101 break; 5102 5103 case ISD::SETOGT: 5104 case ISD::SETOGE: 5105 case ISD::SETGT: 5106 case ISD::SETGE: 5107 case ISD::SETUGT: 5108 case ISD::SETUGE: 5109 // If LHS is NaN, an ordered comparison will be false and the result will 5110 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 5111 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 5112 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 5113 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 5114 break; 5115 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 5116 // will return +0, so vmax can only be used for unsafe math or if one of 5117 // the operands is known to be nonzero. 5118 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 5119 !UnsafeFPMath && 5120 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 5121 break; 5122 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 5123 break; 5124 } 5125 5126 if (!Opcode) 5127 return SDValue(); 5128 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 5129} 5130 5131SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 5132 DAGCombinerInfo &DCI) const { 5133 switch (N->getOpcode()) { 5134 default: break; 5135 case ISD::ADD: return PerformADDCombine(N, DCI); 5136 case ISD::SUB: return PerformSUBCombine(N, DCI); 5137 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 5138 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 5139 case ISD::AND: return PerformANDCombine(N, DCI); 5140 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 5141 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 5142 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI.DAG); 5143 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 5144 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI.DAG); 5145 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 5146 case ISD::SHL: 5147 case ISD::SRA: 5148 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 5149 case ISD::SIGN_EXTEND: 5150 case ISD::ZERO_EXTEND: 5151 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 5152 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 5153 } 5154 return SDValue(); 5155} 5156 5157bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 5158 if (!Subtarget->allowsUnalignedMem()) 5159 return false; 5160 5161 switch (VT.getSimpleVT().SimpleTy) { 5162 default: 5163 return false; 5164 case MVT::i8: 5165 case MVT::i16: 5166 case MVT::i32: 5167 return true; 5168 // FIXME: VLD1 etc with standard alignment is legal. 5169 } 5170} 5171 5172static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 5173 if (V < 0) 5174 return false; 5175 5176 unsigned Scale = 1; 5177 switch (VT.getSimpleVT().SimpleTy) { 5178 default: return false; 5179 case MVT::i1: 5180 case MVT::i8: 5181 // Scale == 1; 5182 break; 5183 case MVT::i16: 5184 // Scale == 2; 5185 Scale = 2; 5186 break; 5187 case MVT::i32: 5188 // Scale == 4; 5189 Scale = 4; 5190 break; 5191 } 5192 5193 if ((V & (Scale - 1)) != 0) 5194 return false; 5195 V /= Scale; 5196 return V == (V & ((1LL << 5) - 1)); 5197} 5198 5199static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 5200 const ARMSubtarget *Subtarget) { 5201 bool isNeg = false; 5202 if (V < 0) { 5203 isNeg = true; 5204 V = - V; 5205 } 5206 5207 switch (VT.getSimpleVT().SimpleTy) { 5208 default: return false; 5209 case MVT::i1: 5210 case MVT::i8: 5211 case MVT::i16: 5212 case MVT::i32: 5213 // + imm12 or - imm8 5214 if (isNeg) 5215 return V == (V & ((1LL << 8) - 1)); 5216 return V == (V & ((1LL << 12) - 1)); 5217 case MVT::f32: 5218 case MVT::f64: 5219 // Same as ARM mode. FIXME: NEON? 5220 if (!Subtarget->hasVFP2()) 5221 return false; 5222 if ((V & 3) != 0) 5223 return false; 5224 V >>= 2; 5225 return V == (V & ((1LL << 8) - 1)); 5226 } 5227} 5228 5229/// isLegalAddressImmediate - Return true if the integer value can be used 5230/// as the offset of the target addressing mode for load / store of the 5231/// given type. 5232static bool isLegalAddressImmediate(int64_t V, EVT VT, 5233 const ARMSubtarget *Subtarget) { 5234 if (V == 0) 5235 return true; 5236 5237 if (!VT.isSimple()) 5238 return false; 5239 5240 if (Subtarget->isThumb1Only()) 5241 return isLegalT1AddressImmediate(V, VT); 5242 else if (Subtarget->isThumb2()) 5243 return isLegalT2AddressImmediate(V, VT, Subtarget); 5244 5245 // ARM mode. 5246 if (V < 0) 5247 V = - V; 5248 switch (VT.getSimpleVT().SimpleTy) { 5249 default: return false; 5250 case MVT::i1: 5251 case MVT::i8: 5252 case MVT::i32: 5253 // +- imm12 5254 return V == (V & ((1LL << 12) - 1)); 5255 case MVT::i16: 5256 // +- imm8 5257 return V == (V & ((1LL << 8) - 1)); 5258 case MVT::f32: 5259 case MVT::f64: 5260 if (!Subtarget->hasVFP2()) // FIXME: NEON? 5261 return false; 5262 if ((V & 3) != 0) 5263 return false; 5264 V >>= 2; 5265 return V == (V & ((1LL << 8) - 1)); 5266 } 5267} 5268 5269bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 5270 EVT VT) const { 5271 int Scale = AM.Scale; 5272 if (Scale < 0) 5273 return false; 5274 5275 switch (VT.getSimpleVT().SimpleTy) { 5276 default: return false; 5277 case MVT::i1: 5278 case MVT::i8: 5279 case MVT::i16: 5280 case MVT::i32: 5281 if (Scale == 1) 5282 return true; 5283 // r + r << imm 5284 Scale = Scale & ~1; 5285 return Scale == 2 || Scale == 4 || Scale == 8; 5286 case MVT::i64: 5287 // r + r 5288 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5289 return true; 5290 return false; 5291 case MVT::isVoid: 5292 // Note, we allow "void" uses (basically, uses that aren't loads or 5293 // stores), because arm allows folding a scale into many arithmetic 5294 // operations. This should be made more precise and revisited later. 5295 5296 // Allow r << imm, but the imm has to be a multiple of two. 5297 if (Scale & 1) return false; 5298 return isPowerOf2_32(Scale); 5299 } 5300} 5301 5302/// isLegalAddressingMode - Return true if the addressing mode represented 5303/// by AM is legal for this target, for a load/store of the specified type. 5304bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5305 const Type *Ty) const { 5306 EVT VT = getValueType(Ty, true); 5307 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 5308 return false; 5309 5310 // Can never fold addr of global into load/store. 5311 if (AM.BaseGV) 5312 return false; 5313 5314 switch (AM.Scale) { 5315 case 0: // no scale reg, must be "r+i" or "r", or "i". 5316 break; 5317 case 1: 5318 if (Subtarget->isThumb1Only()) 5319 return false; 5320 // FALL THROUGH. 5321 default: 5322 // ARM doesn't support any R+R*scale+imm addr modes. 5323 if (AM.BaseOffs) 5324 return false; 5325 5326 if (!VT.isSimple()) 5327 return false; 5328 5329 if (Subtarget->isThumb2()) 5330 return isLegalT2ScaledAddressingMode(AM, VT); 5331 5332 int Scale = AM.Scale; 5333 switch (VT.getSimpleVT().SimpleTy) { 5334 default: return false; 5335 case MVT::i1: 5336 case MVT::i8: 5337 case MVT::i32: 5338 if (Scale < 0) Scale = -Scale; 5339 if (Scale == 1) 5340 return true; 5341 // r + r << imm 5342 return isPowerOf2_32(Scale & ~1); 5343 case MVT::i16: 5344 case MVT::i64: 5345 // r + r 5346 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5347 return true; 5348 return false; 5349 5350 case MVT::isVoid: 5351 // Note, we allow "void" uses (basically, uses that aren't loads or 5352 // stores), because arm allows folding a scale into many arithmetic 5353 // operations. This should be made more precise and revisited later. 5354 5355 // Allow r << imm, but the imm has to be a multiple of two. 5356 if (Scale & 1) return false; 5357 return isPowerOf2_32(Scale); 5358 } 5359 break; 5360 } 5361 return true; 5362} 5363 5364/// isLegalICmpImmediate - Return true if the specified immediate is legal 5365/// icmp immediate, that is the target has icmp instructions which can compare 5366/// a register against the immediate without having to materialize the 5367/// immediate into a register. 5368bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 5369 if (!Subtarget->isThumb()) 5370 return ARM_AM::getSOImmVal(Imm) != -1; 5371 if (Subtarget->isThumb2()) 5372 return ARM_AM::getT2SOImmVal(Imm) != -1; 5373 return Imm >= 0 && Imm <= 255; 5374} 5375 5376static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 5377 bool isSEXTLoad, SDValue &Base, 5378 SDValue &Offset, bool &isInc, 5379 SelectionDAG &DAG) { 5380 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5381 return false; 5382 5383 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 5384 // AddressingMode 3 5385 Base = Ptr->getOperand(0); 5386 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5387 int RHSC = (int)RHS->getZExtValue(); 5388 if (RHSC < 0 && RHSC > -256) { 5389 assert(Ptr->getOpcode() == ISD::ADD); 5390 isInc = false; 5391 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5392 return true; 5393 } 5394 } 5395 isInc = (Ptr->getOpcode() == ISD::ADD); 5396 Offset = Ptr->getOperand(1); 5397 return true; 5398 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 5399 // AddressingMode 2 5400 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5401 int RHSC = (int)RHS->getZExtValue(); 5402 if (RHSC < 0 && RHSC > -0x1000) { 5403 assert(Ptr->getOpcode() == ISD::ADD); 5404 isInc = false; 5405 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5406 Base = Ptr->getOperand(0); 5407 return true; 5408 } 5409 } 5410 5411 if (Ptr->getOpcode() == ISD::ADD) { 5412 isInc = true; 5413 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 5414 if (ShOpcVal != ARM_AM::no_shift) { 5415 Base = Ptr->getOperand(1); 5416 Offset = Ptr->getOperand(0); 5417 } else { 5418 Base = Ptr->getOperand(0); 5419 Offset = Ptr->getOperand(1); 5420 } 5421 return true; 5422 } 5423 5424 isInc = (Ptr->getOpcode() == ISD::ADD); 5425 Base = Ptr->getOperand(0); 5426 Offset = Ptr->getOperand(1); 5427 return true; 5428 } 5429 5430 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 5431 return false; 5432} 5433 5434static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 5435 bool isSEXTLoad, SDValue &Base, 5436 SDValue &Offset, bool &isInc, 5437 SelectionDAG &DAG) { 5438 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5439 return false; 5440 5441 Base = Ptr->getOperand(0); 5442 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5443 int RHSC = (int)RHS->getZExtValue(); 5444 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 5445 assert(Ptr->getOpcode() == ISD::ADD); 5446 isInc = false; 5447 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5448 return true; 5449 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 5450 isInc = Ptr->getOpcode() == ISD::ADD; 5451 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 5452 return true; 5453 } 5454 } 5455 5456 return false; 5457} 5458 5459/// getPreIndexedAddressParts - returns true by value, base pointer and 5460/// offset pointer and addressing mode by reference if the node's address 5461/// can be legally represented as pre-indexed load / store address. 5462bool 5463ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 5464 SDValue &Offset, 5465 ISD::MemIndexedMode &AM, 5466 SelectionDAG &DAG) const { 5467 if (Subtarget->isThumb1Only()) 5468 return false; 5469 5470 EVT VT; 5471 SDValue Ptr; 5472 bool isSEXTLoad = false; 5473 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5474 Ptr = LD->getBasePtr(); 5475 VT = LD->getMemoryVT(); 5476 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5477 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5478 Ptr = ST->getBasePtr(); 5479 VT = ST->getMemoryVT(); 5480 } else 5481 return false; 5482 5483 bool isInc; 5484 bool isLegal = false; 5485 if (Subtarget->isThumb2()) 5486 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5487 Offset, isInc, DAG); 5488 else 5489 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5490 Offset, isInc, DAG); 5491 if (!isLegal) 5492 return false; 5493 5494 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 5495 return true; 5496} 5497 5498/// getPostIndexedAddressParts - returns true by value, base pointer and 5499/// offset pointer and addressing mode by reference if this node can be 5500/// combined with a load / store to form a post-indexed load / store. 5501bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 5502 SDValue &Base, 5503 SDValue &Offset, 5504 ISD::MemIndexedMode &AM, 5505 SelectionDAG &DAG) const { 5506 if (Subtarget->isThumb1Only()) 5507 return false; 5508 5509 EVT VT; 5510 SDValue Ptr; 5511 bool isSEXTLoad = false; 5512 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5513 VT = LD->getMemoryVT(); 5514 Ptr = LD->getBasePtr(); 5515 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5516 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5517 VT = ST->getMemoryVT(); 5518 Ptr = ST->getBasePtr(); 5519 } else 5520 return false; 5521 5522 bool isInc; 5523 bool isLegal = false; 5524 if (Subtarget->isThumb2()) 5525 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5526 isInc, DAG); 5527 else 5528 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5529 isInc, DAG); 5530 if (!isLegal) 5531 return false; 5532 5533 if (Ptr != Base) { 5534 // Swap base ptr and offset to catch more post-index load / store when 5535 // it's legal. In Thumb2 mode, offset must be an immediate. 5536 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 5537 !Subtarget->isThumb2()) 5538 std::swap(Base, Offset); 5539 5540 // Post-indexed load / store update the base pointer. 5541 if (Ptr != Base) 5542 return false; 5543 } 5544 5545 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 5546 return true; 5547} 5548 5549void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5550 const APInt &Mask, 5551 APInt &KnownZero, 5552 APInt &KnownOne, 5553 const SelectionDAG &DAG, 5554 unsigned Depth) const { 5555 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5556 switch (Op.getOpcode()) { 5557 default: break; 5558 case ARMISD::CMOV: { 5559 // Bits are known zero/one if known on the LHS and RHS. 5560 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 5561 if (KnownZero == 0 && KnownOne == 0) return; 5562 5563 APInt KnownZeroRHS, KnownOneRHS; 5564 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 5565 KnownZeroRHS, KnownOneRHS, Depth+1); 5566 KnownZero &= KnownZeroRHS; 5567 KnownOne &= KnownOneRHS; 5568 return; 5569 } 5570 } 5571} 5572 5573//===----------------------------------------------------------------------===// 5574// ARM Inline Assembly Support 5575//===----------------------------------------------------------------------===// 5576 5577/// getConstraintType - Given a constraint letter, return the type of 5578/// constraint it is for this target. 5579ARMTargetLowering::ConstraintType 5580ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 5581 if (Constraint.size() == 1) { 5582 switch (Constraint[0]) { 5583 default: break; 5584 case 'l': return C_RegisterClass; 5585 case 'w': return C_RegisterClass; 5586 } 5587 } 5588 return TargetLowering::getConstraintType(Constraint); 5589} 5590 5591/// Examine constraint type and operand type and determine a weight value. 5592/// This object must already have been set up with the operand type 5593/// and the current alternative constraint selected. 5594TargetLowering::ConstraintWeight 5595ARMTargetLowering::getSingleConstraintMatchWeight( 5596 AsmOperandInfo &info, const char *constraint) const { 5597 ConstraintWeight weight = CW_Invalid; 5598 Value *CallOperandVal = info.CallOperandVal; 5599 // If we don't have a value, we can't do a match, 5600 // but allow it at the lowest weight. 5601 if (CallOperandVal == NULL) 5602 return CW_Default; 5603 const Type *type = CallOperandVal->getType(); 5604 // Look at the constraint type. 5605 switch (*constraint) { 5606 default: 5607 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 5608 break; 5609 case 'l': 5610 if (type->isIntegerTy()) { 5611 if (Subtarget->isThumb()) 5612 weight = CW_SpecificReg; 5613 else 5614 weight = CW_Register; 5615 } 5616 break; 5617 case 'w': 5618 if (type->isFloatingPointTy()) 5619 weight = CW_Register; 5620 break; 5621 } 5622 return weight; 5623} 5624 5625std::pair<unsigned, const TargetRegisterClass*> 5626ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5627 EVT VT) const { 5628 if (Constraint.size() == 1) { 5629 // GCC ARM Constraint Letters 5630 switch (Constraint[0]) { 5631 case 'l': 5632 if (Subtarget->isThumb()) 5633 return std::make_pair(0U, ARM::tGPRRegisterClass); 5634 else 5635 return std::make_pair(0U, ARM::GPRRegisterClass); 5636 case 'r': 5637 return std::make_pair(0U, ARM::GPRRegisterClass); 5638 case 'w': 5639 if (VT == MVT::f32) 5640 return std::make_pair(0U, ARM::SPRRegisterClass); 5641 if (VT.getSizeInBits() == 64) 5642 return std::make_pair(0U, ARM::DPRRegisterClass); 5643 if (VT.getSizeInBits() == 128) 5644 return std::make_pair(0U, ARM::QPRRegisterClass); 5645 break; 5646 } 5647 } 5648 if (StringRef("{cc}").equals_lower(Constraint)) 5649 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 5650 5651 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5652} 5653 5654std::vector<unsigned> ARMTargetLowering:: 5655getRegClassForInlineAsmConstraint(const std::string &Constraint, 5656 EVT VT) const { 5657 if (Constraint.size() != 1) 5658 return std::vector<unsigned>(); 5659 5660 switch (Constraint[0]) { // GCC ARM Constraint Letters 5661 default: break; 5662 case 'l': 5663 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5664 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5665 0); 5666 case 'r': 5667 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5668 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5669 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 5670 ARM::R12, ARM::LR, 0); 5671 case 'w': 5672 if (VT == MVT::f32) 5673 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 5674 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 5675 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 5676 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 5677 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 5678 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 5679 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 5680 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 5681 if (VT.getSizeInBits() == 64) 5682 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 5683 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 5684 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 5685 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 5686 if (VT.getSizeInBits() == 128) 5687 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 5688 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 5689 break; 5690 } 5691 5692 return std::vector<unsigned>(); 5693} 5694 5695/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5696/// vector. If it is invalid, don't add anything to Ops. 5697void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5698 char Constraint, 5699 std::vector<SDValue>&Ops, 5700 SelectionDAG &DAG) const { 5701 SDValue Result(0, 0); 5702 5703 switch (Constraint) { 5704 default: break; 5705 case 'I': case 'J': case 'K': case 'L': 5706 case 'M': case 'N': case 'O': 5707 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 5708 if (!C) 5709 return; 5710 5711 int64_t CVal64 = C->getSExtValue(); 5712 int CVal = (int) CVal64; 5713 // None of these constraints allow values larger than 32 bits. Check 5714 // that the value fits in an int. 5715 if (CVal != CVal64) 5716 return; 5717 5718 switch (Constraint) { 5719 case 'I': 5720 if (Subtarget->isThumb1Only()) { 5721 // This must be a constant between 0 and 255, for ADD 5722 // immediates. 5723 if (CVal >= 0 && CVal <= 255) 5724 break; 5725 } else if (Subtarget->isThumb2()) { 5726 // A constant that can be used as an immediate value in a 5727 // data-processing instruction. 5728 if (ARM_AM::getT2SOImmVal(CVal) != -1) 5729 break; 5730 } else { 5731 // A constant that can be used as an immediate value in a 5732 // data-processing instruction. 5733 if (ARM_AM::getSOImmVal(CVal) != -1) 5734 break; 5735 } 5736 return; 5737 5738 case 'J': 5739 if (Subtarget->isThumb()) { // FIXME thumb2 5740 // This must be a constant between -255 and -1, for negated ADD 5741 // immediates. This can be used in GCC with an "n" modifier that 5742 // prints the negated value, for use with SUB instructions. It is 5743 // not useful otherwise but is implemented for compatibility. 5744 if (CVal >= -255 && CVal <= -1) 5745 break; 5746 } else { 5747 // This must be a constant between -4095 and 4095. It is not clear 5748 // what this constraint is intended for. Implemented for 5749 // compatibility with GCC. 5750 if (CVal >= -4095 && CVal <= 4095) 5751 break; 5752 } 5753 return; 5754 5755 case 'K': 5756 if (Subtarget->isThumb1Only()) { 5757 // A 32-bit value where only one byte has a nonzero value. Exclude 5758 // zero to match GCC. This constraint is used by GCC internally for 5759 // constants that can be loaded with a move/shift combination. 5760 // It is not useful otherwise but is implemented for compatibility. 5761 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 5762 break; 5763 } else if (Subtarget->isThumb2()) { 5764 // A constant whose bitwise inverse can be used as an immediate 5765 // value in a data-processing instruction. This can be used in GCC 5766 // with a "B" modifier that prints the inverted value, for use with 5767 // BIC and MVN instructions. It is not useful otherwise but is 5768 // implemented for compatibility. 5769 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 5770 break; 5771 } else { 5772 // A constant whose bitwise inverse can be used as an immediate 5773 // value in a data-processing instruction. This can be used in GCC 5774 // with a "B" modifier that prints the inverted value, for use with 5775 // BIC and MVN instructions. It is not useful otherwise but is 5776 // implemented for compatibility. 5777 if (ARM_AM::getSOImmVal(~CVal) != -1) 5778 break; 5779 } 5780 return; 5781 5782 case 'L': 5783 if (Subtarget->isThumb1Only()) { 5784 // This must be a constant between -7 and 7, 5785 // for 3-operand ADD/SUB immediate instructions. 5786 if (CVal >= -7 && CVal < 7) 5787 break; 5788 } else if (Subtarget->isThumb2()) { 5789 // A constant whose negation can be used as an immediate value in a 5790 // data-processing instruction. This can be used in GCC with an "n" 5791 // modifier that prints the negated value, for use with SUB 5792 // instructions. It is not useful otherwise but is implemented for 5793 // compatibility. 5794 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 5795 break; 5796 } else { 5797 // A constant whose negation can be used as an immediate value in a 5798 // data-processing instruction. This can be used in GCC with an "n" 5799 // modifier that prints the negated value, for use with SUB 5800 // instructions. It is not useful otherwise but is implemented for 5801 // compatibility. 5802 if (ARM_AM::getSOImmVal(-CVal) != -1) 5803 break; 5804 } 5805 return; 5806 5807 case 'M': 5808 if (Subtarget->isThumb()) { // FIXME thumb2 5809 // This must be a multiple of 4 between 0 and 1020, for 5810 // ADD sp + immediate. 5811 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 5812 break; 5813 } else { 5814 // A power of two or a constant between 0 and 32. This is used in 5815 // GCC for the shift amount on shifted register operands, but it is 5816 // useful in general for any shift amounts. 5817 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 5818 break; 5819 } 5820 return; 5821 5822 case 'N': 5823 if (Subtarget->isThumb()) { // FIXME thumb2 5824 // This must be a constant between 0 and 31, for shift amounts. 5825 if (CVal >= 0 && CVal <= 31) 5826 break; 5827 } 5828 return; 5829 5830 case 'O': 5831 if (Subtarget->isThumb()) { // FIXME thumb2 5832 // This must be a multiple of 4 between -508 and 508, for 5833 // ADD/SUB sp = sp + immediate. 5834 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 5835 break; 5836 } 5837 return; 5838 } 5839 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 5840 break; 5841 } 5842 5843 if (Result.getNode()) { 5844 Ops.push_back(Result); 5845 return; 5846 } 5847 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5848} 5849 5850bool 5851ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 5852 // The ARM target isn't yet aware of offsets. 5853 return false; 5854} 5855 5856int ARM::getVFPf32Imm(const APFloat &FPImm) { 5857 APInt Imm = FPImm.bitcastToAPInt(); 5858 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 5859 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 5860 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 5861 5862 // We can handle 4 bits of mantissa. 5863 // mantissa = (16+UInt(e:f:g:h))/16. 5864 if (Mantissa & 0x7ffff) 5865 return -1; 5866 Mantissa >>= 19; 5867 if ((Mantissa & 0xf) != Mantissa) 5868 return -1; 5869 5870 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 5871 if (Exp < -3 || Exp > 4) 5872 return -1; 5873 Exp = ((Exp+3) & 0x7) ^ 4; 5874 5875 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 5876} 5877 5878int ARM::getVFPf64Imm(const APFloat &FPImm) { 5879 APInt Imm = FPImm.bitcastToAPInt(); 5880 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 5881 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 5882 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 5883 5884 // We can handle 4 bits of mantissa. 5885 // mantissa = (16+UInt(e:f:g:h))/16. 5886 if (Mantissa & 0xffffffffffffLL) 5887 return -1; 5888 Mantissa >>= 48; 5889 if ((Mantissa & 0xf) != Mantissa) 5890 return -1; 5891 5892 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 5893 if (Exp < -3 || Exp > 4) 5894 return -1; 5895 Exp = ((Exp+3) & 0x7) ^ 4; 5896 5897 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 5898} 5899 5900bool ARM::isBitFieldInvertedMask(unsigned v) { 5901 if (v == 0xffffffff) 5902 return 0; 5903 // there can be 1's on either or both "outsides", all the "inside" 5904 // bits must be 0's 5905 unsigned int lsb = 0, msb = 31; 5906 while (v & (1 << msb)) --msb; 5907 while (v & (1 << lsb)) ++lsb; 5908 for (unsigned int i = lsb; i <= msb; ++i) { 5909 if (v & (1 << i)) 5910 return 0; 5911 } 5912 return 1; 5913} 5914 5915/// isFPImmLegal - Returns true if the target can instruction select the 5916/// specified FP immediate natively. If false, the legalizer will 5917/// materialize the FP immediate as a load from a constant pool. 5918bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 5919 if (!Subtarget->hasVFP3()) 5920 return false; 5921 if (VT == MVT::f32) 5922 return ARM::getVFPf32Imm(Imm) != -1; 5923 if (VT == MVT::f64) 5924 return ARM::getVFPf64Imm(Imm) != -1; 5925 return false; 5926} 5927 5928/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 5929/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 5930/// specified in the intrinsic calls. 5931bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 5932 const CallInst &I, 5933 unsigned Intrinsic) const { 5934 switch (Intrinsic) { 5935 case Intrinsic::arm_neon_vld1: 5936 case Intrinsic::arm_neon_vld2: 5937 case Intrinsic::arm_neon_vld3: 5938 case Intrinsic::arm_neon_vld4: 5939 case Intrinsic::arm_neon_vld2lane: 5940 case Intrinsic::arm_neon_vld3lane: 5941 case Intrinsic::arm_neon_vld4lane: { 5942 Info.opc = ISD::INTRINSIC_W_CHAIN; 5943 // Conservatively set memVT to the entire set of vectors loaded. 5944 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 5945 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 5946 Info.ptrVal = I.getArgOperand(0); 5947 Info.offset = 0; 5948 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 5949 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 5950 Info.vol = false; // volatile loads with NEON intrinsics not supported 5951 Info.readMem = true; 5952 Info.writeMem = false; 5953 return true; 5954 } 5955 case Intrinsic::arm_neon_vst1: 5956 case Intrinsic::arm_neon_vst2: 5957 case Intrinsic::arm_neon_vst3: 5958 case Intrinsic::arm_neon_vst4: 5959 case Intrinsic::arm_neon_vst2lane: 5960 case Intrinsic::arm_neon_vst3lane: 5961 case Intrinsic::arm_neon_vst4lane: { 5962 Info.opc = ISD::INTRINSIC_VOID; 5963 // Conservatively set memVT to the entire set of vectors stored. 5964 unsigned NumElts = 0; 5965 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 5966 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 5967 if (!ArgTy->isVectorTy()) 5968 break; 5969 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 5970 } 5971 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 5972 Info.ptrVal = I.getArgOperand(0); 5973 Info.offset = 0; 5974 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 5975 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 5976 Info.vol = false; // volatile stores with NEON intrinsics not supported 5977 Info.readMem = false; 5978 Info.writeMem = true; 5979 return true; 5980 } 5981 default: 5982 break; 5983 } 5984 5985 return false; 5986} 5987