ARMISelLowering.cpp revision 44ab89eb376af838d1123293a79975aede501464
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/MachineBasicBlock.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineInstrBuilder.h" 40#include "llvm/CodeGen/MachineRegisterInfo.h" 41#include "llvm/CodeGen/PseudoSourceValue.h" 42#include "llvm/CodeGen/SelectionDAG.h" 43#include "llvm/MC/MCSectionMachO.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/ADT/VectorExtras.h" 46#include "llvm/ADT/Statistic.h" 47#include "llvm/Support/CommandLine.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/raw_ostream.h" 51#include <sstream> 52using namespace llvm; 53 54STATISTIC(NumTailCalls, "Number of tail calls"); 55 56// This option should go away when tail calls fully work. 57static cl::opt<bool> 58EnableARMTailCalls("arm-tail-calls", cl::Hidden, 59 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 60 cl::init(false)); 61 62static cl::opt<bool> 63EnableARMLongCalls("arm-long-calls", cl::Hidden, 64 cl::desc("Generate calls via indirect call instructions"), 65 cl::init(false)); 66 67static cl::opt<bool> 68ARMInterworking("arm-interworking", cl::Hidden, 69 cl::desc("Enable / disable ARM interworking (for debugging only)"), 70 cl::init(true)); 71 72void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 73 EVT PromotedBitwiseVT) { 74 if (VT != PromotedLdStVT) { 75 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 76 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 77 PromotedLdStVT.getSimpleVT()); 78 79 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 80 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 81 PromotedLdStVT.getSimpleVT()); 82 } 83 84 EVT ElemTy = VT.getVectorElementType(); 85 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 86 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 87 if (ElemTy == MVT::i8 || ElemTy == MVT::i16) 88 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 89 if (ElemTy != MVT::i32) { 90 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 91 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 92 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 93 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 94 } 95 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 96 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 97 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 98 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 100 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 101 if (VT.isInteger()) { 102 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 104 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 105 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 106 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 107 } 108 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 109 110 // Promote all bit-wise operations. 111 if (VT.isInteger() && VT != PromotedBitwiseVT) { 112 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 113 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 114 PromotedBitwiseVT.getSimpleVT()); 115 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 116 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 117 PromotedBitwiseVT.getSimpleVT()); 118 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 119 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 120 PromotedBitwiseVT.getSimpleVT()); 121 } 122 123 // Neon does not support vector divide/remainder operations. 124 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 125 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 126 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 127 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 128 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 129 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 130} 131 132void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 133 addRegisterClass(VT, ARM::DPRRegisterClass); 134 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 135} 136 137void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 138 addRegisterClass(VT, ARM::QPRRegisterClass); 139 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 140} 141 142static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 143 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 144 return new TargetLoweringObjectFileMachO(); 145 146 return new ARMElfTargetObjectFile(); 147} 148 149ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 150 : TargetLowering(TM, createTLOF(TM)) { 151 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 152 RegInfo = TM.getRegisterInfo(); 153 Itins = TM.getInstrItineraryData(); 154 155 if (Subtarget->isTargetDarwin()) { 156 // Uses VFP for Thumb libfuncs if available. 157 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 158 // Single-precision floating-point arithmetic. 159 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 160 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 161 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 162 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 163 164 // Double-precision floating-point arithmetic. 165 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 166 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 167 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 168 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 169 170 // Single-precision comparisons. 171 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 172 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 173 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 174 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 175 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 176 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 177 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 178 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 179 180 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 181 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 182 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 183 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 184 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 185 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 186 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 187 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 188 189 // Double-precision comparisons. 190 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 191 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 192 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 193 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 194 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 195 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 196 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 197 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 198 199 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 200 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 201 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 202 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 207 208 // Floating-point to integer conversions. 209 // i64 conversions are done via library routines even when generating VFP 210 // instructions, so use the same ones. 211 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 212 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 213 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 214 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 215 216 // Conversions between floating types. 217 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 218 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 219 220 // Integer to floating-point conversions. 221 // i64 conversions are done via library routines even when generating VFP 222 // instructions, so use the same ones. 223 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 224 // e.g., __floatunsidf vs. __floatunssidfvfp. 225 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 226 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 227 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 228 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 229 } 230 } 231 232 // These libcalls are not available in 32-bit. 233 setLibcallName(RTLIB::SHL_I128, 0); 234 setLibcallName(RTLIB::SRL_I128, 0); 235 setLibcallName(RTLIB::SRA_I128, 0); 236 237 if (Subtarget->isAAPCS_ABI()) { 238 // Double-precision floating-point arithmetic helper functions 239 // RTABI chapter 4.1.2, Table 2 240 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 241 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 242 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 243 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 244 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 245 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 246 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 247 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 248 249 // Double-precision floating-point comparison helper functions 250 // RTABI chapter 4.1.2, Table 3 251 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 252 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 253 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 254 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 255 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 256 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 257 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 258 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 259 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 260 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 261 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 262 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 263 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 264 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 265 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 266 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 267 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 268 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 269 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 270 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 271 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 272 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 273 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 274 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 275 276 // Single-precision floating-point arithmetic helper functions 277 // RTABI chapter 4.1.2, Table 4 278 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 279 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 280 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 281 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 282 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 283 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 284 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 285 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 286 287 // Single-precision floating-point comparison helper functions 288 // RTABI chapter 4.1.2, Table 5 289 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 290 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 291 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 292 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 293 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 294 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 295 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 296 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 297 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 298 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 299 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 300 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 301 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 302 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 303 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 304 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 305 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 306 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 307 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 308 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 309 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 310 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 311 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 312 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 313 314 // Floating-point to integer conversions. 315 // RTABI chapter 4.1.2, Table 6 316 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 317 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 318 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 319 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 320 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 321 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 322 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 323 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 324 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 325 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 326 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 327 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 332 333 // Conversions between floating types. 334 // RTABI chapter 4.1.2, Table 7 335 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 336 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 337 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 339 340 // Integer to floating-point conversions. 341 // RTABI chapter 4.1.2, Table 8 342 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 343 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 344 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 345 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 346 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 347 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 348 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 349 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 350 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 351 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 352 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 353 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 354 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 355 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 356 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 357 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 358 359 // Long long helper functions 360 // RTABI chapter 4.2, Table 9 361 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 362 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 363 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 364 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 365 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 366 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 367 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 368 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 369 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 370 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 371 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 372 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 373 374 // Integer division functions 375 // RTABI chapter 4.3.1 376 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 377 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 378 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 379 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 380 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 381 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 382 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 384 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 385 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 386 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 387 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 388 } 389 390 if (Subtarget->isThumb1Only()) 391 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 392 else 393 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 394 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 395 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 396 if (!Subtarget->isFPOnlySP()) 397 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 398 399 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 400 } 401 402 if (Subtarget->hasNEON()) { 403 addDRTypeForNEON(MVT::v2f32); 404 addDRTypeForNEON(MVT::v8i8); 405 addDRTypeForNEON(MVT::v4i16); 406 addDRTypeForNEON(MVT::v2i32); 407 addDRTypeForNEON(MVT::v1i64); 408 409 addQRTypeForNEON(MVT::v4f32); 410 addQRTypeForNEON(MVT::v2f64); 411 addQRTypeForNEON(MVT::v16i8); 412 addQRTypeForNEON(MVT::v8i16); 413 addQRTypeForNEON(MVT::v4i32); 414 addQRTypeForNEON(MVT::v2i64); 415 416 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 417 // neither Neon nor VFP support any arithmetic operations on it. 418 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 419 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 420 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 421 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 422 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 423 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 424 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 425 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 426 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 427 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 428 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 429 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 430 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 431 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 432 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 433 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 434 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 435 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 436 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 437 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 438 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 439 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 440 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 441 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 442 443 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 444 445 // Neon does not support some operations on v1i64 and v2i64 types. 446 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 447 // Custom handling for some quad-vector types to detect VMULL. 448 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 449 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 450 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 451 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 452 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 453 454 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 455 setTargetDAGCombine(ISD::SHL); 456 setTargetDAGCombine(ISD::SRL); 457 setTargetDAGCombine(ISD::SRA); 458 setTargetDAGCombine(ISD::SIGN_EXTEND); 459 setTargetDAGCombine(ISD::ZERO_EXTEND); 460 setTargetDAGCombine(ISD::ANY_EXTEND); 461 setTargetDAGCombine(ISD::SELECT_CC); 462 setTargetDAGCombine(ISD::BUILD_VECTOR); 463 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 464 } 465 466 computeRegisterProperties(); 467 468 // ARM does not have f32 extending load. 469 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 470 471 // ARM does not have i1 sign extending load. 472 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 473 474 // ARM supports all 4 flavors of integer indexed load / store. 475 if (!Subtarget->isThumb1Only()) { 476 for (unsigned im = (unsigned)ISD::PRE_INC; 477 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 478 setIndexedLoadAction(im, MVT::i1, Legal); 479 setIndexedLoadAction(im, MVT::i8, Legal); 480 setIndexedLoadAction(im, MVT::i16, Legal); 481 setIndexedLoadAction(im, MVT::i32, Legal); 482 setIndexedStoreAction(im, MVT::i1, Legal); 483 setIndexedStoreAction(im, MVT::i8, Legal); 484 setIndexedStoreAction(im, MVT::i16, Legal); 485 setIndexedStoreAction(im, MVT::i32, Legal); 486 } 487 } 488 489 // i64 operation support. 490 if (Subtarget->isThumb1Only()) { 491 setOperationAction(ISD::MUL, MVT::i64, Expand); 492 setOperationAction(ISD::MULHU, MVT::i32, Expand); 493 setOperationAction(ISD::MULHS, MVT::i32, Expand); 494 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 495 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 496 } else { 497 setOperationAction(ISD::MUL, MVT::i64, Expand); 498 setOperationAction(ISD::MULHU, MVT::i32, Expand); 499 if (!Subtarget->hasV6Ops()) 500 setOperationAction(ISD::MULHS, MVT::i32, Expand); 501 } 502 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 503 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 504 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 505 setOperationAction(ISD::SRL, MVT::i64, Custom); 506 setOperationAction(ISD::SRA, MVT::i64, Custom); 507 508 // ARM does not have ROTL. 509 setOperationAction(ISD::ROTL, MVT::i32, Expand); 510 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 511 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 512 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 513 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 514 515 // Only ARMv6 has BSWAP. 516 if (!Subtarget->hasV6Ops()) 517 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 518 519 // These are expanded into libcalls. 520 if (!Subtarget->hasDivide()) { 521 // v7M has a hardware divider 522 setOperationAction(ISD::SDIV, MVT::i32, Expand); 523 setOperationAction(ISD::UDIV, MVT::i32, Expand); 524 } 525 setOperationAction(ISD::SREM, MVT::i32, Expand); 526 setOperationAction(ISD::UREM, MVT::i32, Expand); 527 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 528 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 529 530 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 531 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 532 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 533 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 534 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 535 536 setOperationAction(ISD::TRAP, MVT::Other, Legal); 537 538 // Use the default implementation. 539 setOperationAction(ISD::VASTART, MVT::Other, Custom); 540 setOperationAction(ISD::VAARG, MVT::Other, Expand); 541 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 542 setOperationAction(ISD::VAEND, MVT::Other, Expand); 543 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 544 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 545 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 546 // FIXME: Shouldn't need this, since no register is used, but the legalizer 547 // doesn't yet know how to not do that for SjLj. 548 setExceptionSelectorRegister(ARM::R0); 549 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 550 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 551 // the default expansion. 552 if (Subtarget->hasDataBarrier() || 553 (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())) { 554 // membarrier needs custom lowering; the rest are legal and handled 555 // normally. 556 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 557 } else { 558 // Set them all for expansion, which will force libcalls. 559 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 560 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 561 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 562 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 563 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 564 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 565 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 566 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 567 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 568 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 569 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 570 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 571 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 572 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 573 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 574 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 575 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 576 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 577 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 578 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 579 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 580 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 581 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 582 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 583 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 584 // Since the libcalls include locking, fold in the fences 585 setShouldFoldAtomicFences(true); 586 } 587 // 64-bit versions are always libcalls (for now) 588 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 589 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 590 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 591 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 592 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 593 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 594 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 595 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 596 597 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 598 if (!Subtarget->hasV6Ops()) { 599 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 600 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 601 } 602 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 603 604 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 605 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 606 // iff target supports vfp2. 607 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom); 608 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 609 } 610 611 // We want to custom lower some of our intrinsics. 612 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 613 if (Subtarget->isTargetDarwin()) { 614 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 615 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 616 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 617 } 618 619 setOperationAction(ISD::SETCC, MVT::i32, Expand); 620 setOperationAction(ISD::SETCC, MVT::f32, Expand); 621 setOperationAction(ISD::SETCC, MVT::f64, Expand); 622 setOperationAction(ISD::SELECT, MVT::i32, Custom); 623 setOperationAction(ISD::SELECT, MVT::f32, Custom); 624 setOperationAction(ISD::SELECT, MVT::f64, Custom); 625 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 626 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 627 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 628 629 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 630 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 631 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 632 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 633 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 634 635 // We don't support sin/cos/fmod/copysign/pow 636 setOperationAction(ISD::FSIN, MVT::f64, Expand); 637 setOperationAction(ISD::FSIN, MVT::f32, Expand); 638 setOperationAction(ISD::FCOS, MVT::f32, Expand); 639 setOperationAction(ISD::FCOS, MVT::f64, Expand); 640 setOperationAction(ISD::FREM, MVT::f64, Expand); 641 setOperationAction(ISD::FREM, MVT::f32, Expand); 642 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 643 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 644 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 645 } 646 setOperationAction(ISD::FPOW, MVT::f64, Expand); 647 setOperationAction(ISD::FPOW, MVT::f32, Expand); 648 649 // Various VFP goodness 650 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 651 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 652 if (Subtarget->hasVFP2()) { 653 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 654 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 655 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 656 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 657 } 658 // Special handling for half-precision FP. 659 if (!Subtarget->hasFP16()) { 660 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 661 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 662 } 663 } 664 665 // We have target-specific dag combine patterns for the following nodes: 666 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 667 setTargetDAGCombine(ISD::ADD); 668 setTargetDAGCombine(ISD::SUB); 669 setTargetDAGCombine(ISD::MUL); 670 671 if (Subtarget->hasV6T2Ops()) 672 setTargetDAGCombine(ISD::OR); 673 674 setStackPointerRegisterToSaveRestore(ARM::SP); 675 676 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 677 setSchedulingPreference(Sched::RegPressure); 678 else 679 setSchedulingPreference(Sched::Hybrid); 680 681 maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type 682 683 // On ARM arguments smaller than 4 bytes are extended, so all arguments 684 // are at least 4 bytes aligned. 685 setMinStackArgumentAlignment(4); 686 687 benefitFromCodePlacementOpt = true; 688} 689 690std::pair<const TargetRegisterClass*, uint8_t> 691ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 692 const TargetRegisterClass *RRC = 0; 693 uint8_t Cost = 1; 694 switch (VT.getSimpleVT().SimpleTy) { 695 default: 696 return TargetLowering::findRepresentativeClass(VT); 697 // Use DPR as representative register class for all floating point 698 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 699 // the cost is 1 for both f32 and f64. 700 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 701 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 702 RRC = ARM::DPRRegisterClass; 703 break; 704 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 705 case MVT::v4f32: case MVT::v2f64: 706 RRC = ARM::DPRRegisterClass; 707 Cost = 2; 708 break; 709 case MVT::v4i64: 710 RRC = ARM::DPRRegisterClass; 711 Cost = 4; 712 break; 713 case MVT::v8i64: 714 RRC = ARM::DPRRegisterClass; 715 Cost = 8; 716 break; 717 } 718 return std::make_pair(RRC, Cost); 719} 720 721const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 722 switch (Opcode) { 723 default: return 0; 724 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 725 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 726 case ARMISD::CALL: return "ARMISD::CALL"; 727 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 728 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 729 case ARMISD::tCALL: return "ARMISD::tCALL"; 730 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 731 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 732 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 733 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 734 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 735 case ARMISD::CMP: return "ARMISD::CMP"; 736 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 737 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 738 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 739 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 740 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 741 case ARMISD::CMOV: return "ARMISD::CMOV"; 742 case ARMISD::CNEG: return "ARMISD::CNEG"; 743 744 case ARMISD::RBIT: return "ARMISD::RBIT"; 745 746 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 747 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 748 case ARMISD::SITOF: return "ARMISD::SITOF"; 749 case ARMISD::UITOF: return "ARMISD::UITOF"; 750 751 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 752 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 753 case ARMISD::RRX: return "ARMISD::RRX"; 754 755 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 756 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 757 758 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 759 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 760 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 761 762 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 763 764 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 765 766 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 767 768 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 769 case ARMISD::SYNCBARRIER: return "ARMISD::SYNCBARRIER"; 770 771 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 772 case ARMISD::VCGE: return "ARMISD::VCGE"; 773 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 774 case ARMISD::VCGT: return "ARMISD::VCGT"; 775 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 776 case ARMISD::VTST: return "ARMISD::VTST"; 777 778 case ARMISD::VSHL: return "ARMISD::VSHL"; 779 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 780 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 781 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 782 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 783 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 784 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 785 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 786 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 787 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 788 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 789 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 790 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 791 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 792 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 793 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 794 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 795 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 796 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 797 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 798 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 799 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 800 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 801 case ARMISD::VDUP: return "ARMISD::VDUP"; 802 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 803 case ARMISD::VEXT: return "ARMISD::VEXT"; 804 case ARMISD::VREV64: return "ARMISD::VREV64"; 805 case ARMISD::VREV32: return "ARMISD::VREV32"; 806 case ARMISD::VREV16: return "ARMISD::VREV16"; 807 case ARMISD::VZIP: return "ARMISD::VZIP"; 808 case ARMISD::VUZP: return "ARMISD::VUZP"; 809 case ARMISD::VTRN: return "ARMISD::VTRN"; 810 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 811 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 812 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 813 case ARMISD::FMAX: return "ARMISD::FMAX"; 814 case ARMISD::FMIN: return "ARMISD::FMIN"; 815 case ARMISD::BFI: return "ARMISD::BFI"; 816 } 817} 818 819/// getRegClassFor - Return the register class that should be used for the 820/// specified value type. 821TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 822 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 823 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 824 // load / store 4 to 8 consecutive D registers. 825 if (Subtarget->hasNEON()) { 826 if (VT == MVT::v4i64) 827 return ARM::QQPRRegisterClass; 828 else if (VT == MVT::v8i64) 829 return ARM::QQQQPRRegisterClass; 830 } 831 return TargetLowering::getRegClassFor(VT); 832} 833 834// Create a fast isel object. 835FastISel * 836ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 837 return ARM::createFastISel(funcInfo); 838} 839 840/// getFunctionAlignment - Return the Log2 alignment of this function. 841unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 842 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 843} 844 845/// getMaximalGlobalOffset - Returns the maximal possible offset which can 846/// be used for loads / stores from the global. 847unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 848 return (Subtarget->isThumb1Only() ? 127 : 4095); 849} 850 851Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 852 unsigned NumVals = N->getNumValues(); 853 if (!NumVals) 854 return Sched::RegPressure; 855 856 for (unsigned i = 0; i != NumVals; ++i) { 857 EVT VT = N->getValueType(i); 858 if (VT.isFloatingPoint() || VT.isVector()) 859 return Sched::Latency; 860 } 861 862 if (!N->isMachineOpcode()) 863 return Sched::RegPressure; 864 865 // Load are scheduled for latency even if there instruction itinerary 866 // is not available. 867 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 868 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 869 if (TID.mayLoad()) 870 return Sched::Latency; 871 872 if (!Itins->isEmpty() && Itins->getStageLatency(TID.getSchedClass()) > 2) 873 return Sched::Latency; 874 return Sched::RegPressure; 875} 876 877unsigned 878ARMTargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, 879 MachineFunction &MF) const { 880 switch (RC->getID()) { 881 default: 882 return 0; 883 case ARM::tGPRRegClassID: 884 return RegInfo->hasFP(MF) ? 4 : 5; 885 case ARM::GPRRegClassID: { 886 unsigned FP = RegInfo->hasFP(MF) ? 1 : 0; 887 return 10 - FP - (Subtarget->isR9Reserved() ? 1 : 0); 888 } 889 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 890 case ARM::DPRRegClassID: 891 return 32 - 10; 892 } 893} 894 895//===----------------------------------------------------------------------===// 896// Lowering Code 897//===----------------------------------------------------------------------===// 898 899/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 900static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 901 switch (CC) { 902 default: llvm_unreachable("Unknown condition code!"); 903 case ISD::SETNE: return ARMCC::NE; 904 case ISD::SETEQ: return ARMCC::EQ; 905 case ISD::SETGT: return ARMCC::GT; 906 case ISD::SETGE: return ARMCC::GE; 907 case ISD::SETLT: return ARMCC::LT; 908 case ISD::SETLE: return ARMCC::LE; 909 case ISD::SETUGT: return ARMCC::HI; 910 case ISD::SETUGE: return ARMCC::HS; 911 case ISD::SETULT: return ARMCC::LO; 912 case ISD::SETULE: return ARMCC::LS; 913 } 914} 915 916/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 917static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 918 ARMCC::CondCodes &CondCode2) { 919 CondCode2 = ARMCC::AL; 920 switch (CC) { 921 default: llvm_unreachable("Unknown FP condition!"); 922 case ISD::SETEQ: 923 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 924 case ISD::SETGT: 925 case ISD::SETOGT: CondCode = ARMCC::GT; break; 926 case ISD::SETGE: 927 case ISD::SETOGE: CondCode = ARMCC::GE; break; 928 case ISD::SETOLT: CondCode = ARMCC::MI; break; 929 case ISD::SETOLE: CondCode = ARMCC::LS; break; 930 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 931 case ISD::SETO: CondCode = ARMCC::VC; break; 932 case ISD::SETUO: CondCode = ARMCC::VS; break; 933 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 934 case ISD::SETUGT: CondCode = ARMCC::HI; break; 935 case ISD::SETUGE: CondCode = ARMCC::PL; break; 936 case ISD::SETLT: 937 case ISD::SETULT: CondCode = ARMCC::LT; break; 938 case ISD::SETLE: 939 case ISD::SETULE: CondCode = ARMCC::LE; break; 940 case ISD::SETNE: 941 case ISD::SETUNE: CondCode = ARMCC::NE; break; 942 } 943} 944 945//===----------------------------------------------------------------------===// 946// Calling Convention Implementation 947//===----------------------------------------------------------------------===// 948 949#include "ARMGenCallingConv.inc" 950 951/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 952/// given CallingConvention value. 953CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 954 bool Return, 955 bool isVarArg) const { 956 switch (CC) { 957 default: 958 llvm_unreachable("Unsupported calling convention"); 959 case CallingConv::Fast: 960 if (Subtarget->hasVFP2() && !isVarArg) { 961 if (!Subtarget->isAAPCS_ABI()) 962 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 963 // For AAPCS ABI targets, just use VFP variant of the calling convention. 964 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 965 } 966 // Fallthrough 967 case CallingConv::C: { 968 // Use target triple & subtarget features to do actual dispatch. 969 if (!Subtarget->isAAPCS_ABI()) 970 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 971 else if (Subtarget->hasVFP2() && 972 FloatABIType == FloatABI::Hard && !isVarArg) 973 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 974 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 975 } 976 case CallingConv::ARM_AAPCS_VFP: 977 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 978 case CallingConv::ARM_AAPCS: 979 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 980 case CallingConv::ARM_APCS: 981 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 982 } 983} 984 985/// LowerCallResult - Lower the result values of a call into the 986/// appropriate copies out of appropriate physical registers. 987SDValue 988ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 989 CallingConv::ID CallConv, bool isVarArg, 990 const SmallVectorImpl<ISD::InputArg> &Ins, 991 DebugLoc dl, SelectionDAG &DAG, 992 SmallVectorImpl<SDValue> &InVals) const { 993 994 // Assign locations to each value returned by this call. 995 SmallVector<CCValAssign, 16> RVLocs; 996 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 997 RVLocs, *DAG.getContext()); 998 CCInfo.AnalyzeCallResult(Ins, 999 CCAssignFnForNode(CallConv, /* Return*/ true, 1000 isVarArg)); 1001 1002 // Copy all of the result registers out of their specified physreg. 1003 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1004 CCValAssign VA = RVLocs[i]; 1005 1006 SDValue Val; 1007 if (VA.needsCustom()) { 1008 // Handle f64 or half of a v2f64. 1009 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1010 InFlag); 1011 Chain = Lo.getValue(1); 1012 InFlag = Lo.getValue(2); 1013 VA = RVLocs[++i]; // skip ahead to next loc 1014 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1015 InFlag); 1016 Chain = Hi.getValue(1); 1017 InFlag = Hi.getValue(2); 1018 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1019 1020 if (VA.getLocVT() == MVT::v2f64) { 1021 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1022 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1023 DAG.getConstant(0, MVT::i32)); 1024 1025 VA = RVLocs[++i]; // skip ahead to next loc 1026 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1027 Chain = Lo.getValue(1); 1028 InFlag = Lo.getValue(2); 1029 VA = RVLocs[++i]; // skip ahead to next loc 1030 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1031 Chain = Hi.getValue(1); 1032 InFlag = Hi.getValue(2); 1033 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1034 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1035 DAG.getConstant(1, MVT::i32)); 1036 } 1037 } else { 1038 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1039 InFlag); 1040 Chain = Val.getValue(1); 1041 InFlag = Val.getValue(2); 1042 } 1043 1044 switch (VA.getLocInfo()) { 1045 default: llvm_unreachable("Unknown loc info!"); 1046 case CCValAssign::Full: break; 1047 case CCValAssign::BCvt: 1048 Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val); 1049 break; 1050 } 1051 1052 InVals.push_back(Val); 1053 } 1054 1055 return Chain; 1056} 1057 1058/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1059/// by "Src" to address "Dst" of size "Size". Alignment information is 1060/// specified by the specific parameter attribute. The copy will be passed as 1061/// a byval function parameter. 1062/// Sometimes what we are copying is the end of a larger object, the part that 1063/// does not fit in registers. 1064static SDValue 1065CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1066 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1067 DebugLoc dl) { 1068 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1069 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1070 /*isVolatile=*/false, /*AlwaysInline=*/false, 1071 MachinePointerInfo(0), MachinePointerInfo(0)); 1072} 1073 1074/// LowerMemOpCallTo - Store the argument to the stack. 1075SDValue 1076ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1077 SDValue StackPtr, SDValue Arg, 1078 DebugLoc dl, SelectionDAG &DAG, 1079 const CCValAssign &VA, 1080 ISD::ArgFlagsTy Flags) const { 1081 unsigned LocMemOffset = VA.getLocMemOffset(); 1082 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1083 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1084 if (Flags.isByVal()) 1085 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1086 1087 return DAG.getStore(Chain, dl, Arg, PtrOff, 1088 MachinePointerInfo::getStack(LocMemOffset), 1089 false, false, 0); 1090} 1091 1092void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1093 SDValue Chain, SDValue &Arg, 1094 RegsToPassVector &RegsToPass, 1095 CCValAssign &VA, CCValAssign &NextVA, 1096 SDValue &StackPtr, 1097 SmallVector<SDValue, 8> &MemOpChains, 1098 ISD::ArgFlagsTy Flags) const { 1099 1100 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1101 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1102 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1103 1104 if (NextVA.isRegLoc()) 1105 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1106 else { 1107 assert(NextVA.isMemLoc()); 1108 if (StackPtr.getNode() == 0) 1109 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1110 1111 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1112 dl, DAG, NextVA, 1113 Flags)); 1114 } 1115} 1116 1117/// LowerCall - Lowering a call into a callseq_start <- 1118/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1119/// nodes. 1120SDValue 1121ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1122 CallingConv::ID CallConv, bool isVarArg, 1123 bool &isTailCall, 1124 const SmallVectorImpl<ISD::OutputArg> &Outs, 1125 const SmallVectorImpl<SDValue> &OutVals, 1126 const SmallVectorImpl<ISD::InputArg> &Ins, 1127 DebugLoc dl, SelectionDAG &DAG, 1128 SmallVectorImpl<SDValue> &InVals) const { 1129 MachineFunction &MF = DAG.getMachineFunction(); 1130 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1131 bool IsSibCall = false; 1132 // Temporarily disable tail calls so things don't break. 1133 if (!EnableARMTailCalls) 1134 isTailCall = false; 1135 if (isTailCall) { 1136 // Check if it's really possible to do a tail call. 1137 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1138 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1139 Outs, OutVals, Ins, DAG); 1140 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1141 // detected sibcalls. 1142 if (isTailCall) { 1143 ++NumTailCalls; 1144 IsSibCall = true; 1145 } 1146 } 1147 1148 // Analyze operands of the call, assigning locations to each operand. 1149 SmallVector<CCValAssign, 16> ArgLocs; 1150 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1151 *DAG.getContext()); 1152 CCInfo.AnalyzeCallOperands(Outs, 1153 CCAssignFnForNode(CallConv, /* Return*/ false, 1154 isVarArg)); 1155 1156 // Get a count of how many bytes are to be pushed on the stack. 1157 unsigned NumBytes = CCInfo.getNextStackOffset(); 1158 1159 // For tail calls, memory operands are available in our caller's stack. 1160 if (IsSibCall) 1161 NumBytes = 0; 1162 1163 // Adjust the stack pointer for the new arguments... 1164 // These operations are automatically eliminated by the prolog/epilog pass 1165 if (!IsSibCall) 1166 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1167 1168 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1169 1170 RegsToPassVector RegsToPass; 1171 SmallVector<SDValue, 8> MemOpChains; 1172 1173 // Walk the register/memloc assignments, inserting copies/loads. In the case 1174 // of tail call optimization, arguments are handled later. 1175 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1176 i != e; 1177 ++i, ++realArgIdx) { 1178 CCValAssign &VA = ArgLocs[i]; 1179 SDValue Arg = OutVals[realArgIdx]; 1180 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1181 1182 // Promote the value if needed. 1183 switch (VA.getLocInfo()) { 1184 default: llvm_unreachable("Unknown loc info!"); 1185 case CCValAssign::Full: break; 1186 case CCValAssign::SExt: 1187 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1188 break; 1189 case CCValAssign::ZExt: 1190 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1191 break; 1192 case CCValAssign::AExt: 1193 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1194 break; 1195 case CCValAssign::BCvt: 1196 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1197 break; 1198 } 1199 1200 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1201 if (VA.needsCustom()) { 1202 if (VA.getLocVT() == MVT::v2f64) { 1203 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1204 DAG.getConstant(0, MVT::i32)); 1205 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1206 DAG.getConstant(1, MVT::i32)); 1207 1208 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1209 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1210 1211 VA = ArgLocs[++i]; // skip ahead to next loc 1212 if (VA.isRegLoc()) { 1213 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1214 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1215 } else { 1216 assert(VA.isMemLoc()); 1217 1218 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1219 dl, DAG, VA, Flags)); 1220 } 1221 } else { 1222 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1223 StackPtr, MemOpChains, Flags); 1224 } 1225 } else if (VA.isRegLoc()) { 1226 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1227 } else if (!IsSibCall) { 1228 assert(VA.isMemLoc()); 1229 1230 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1231 dl, DAG, VA, Flags)); 1232 } 1233 } 1234 1235 if (!MemOpChains.empty()) 1236 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1237 &MemOpChains[0], MemOpChains.size()); 1238 1239 // Build a sequence of copy-to-reg nodes chained together with token chain 1240 // and flag operands which copy the outgoing args into the appropriate regs. 1241 SDValue InFlag; 1242 // Tail call byval lowering might overwrite argument registers so in case of 1243 // tail call optimization the copies to registers are lowered later. 1244 if (!isTailCall) 1245 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1246 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1247 RegsToPass[i].second, InFlag); 1248 InFlag = Chain.getValue(1); 1249 } 1250 1251 // For tail calls lower the arguments to the 'real' stack slot. 1252 if (isTailCall) { 1253 // Force all the incoming stack arguments to be loaded from the stack 1254 // before any new outgoing arguments are stored to the stack, because the 1255 // outgoing stack slots may alias the incoming argument stack slots, and 1256 // the alias isn't otherwise explicit. This is slightly more conservative 1257 // than necessary, because it means that each store effectively depends 1258 // on every argument instead of just those arguments it would clobber. 1259 1260 // Do not flag preceeding copytoreg stuff together with the following stuff. 1261 InFlag = SDValue(); 1262 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1263 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1264 RegsToPass[i].second, InFlag); 1265 InFlag = Chain.getValue(1); 1266 } 1267 InFlag =SDValue(); 1268 } 1269 1270 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1271 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1272 // node so that legalize doesn't hack it. 1273 bool isDirect = false; 1274 bool isARMFunc = false; 1275 bool isLocalARMFunc = false; 1276 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1277 1278 if (EnableARMLongCalls) { 1279 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1280 && "long-calls with non-static relocation model!"); 1281 // Handle a global address or an external symbol. If it's not one of 1282 // those, the target's already in a register, so we don't need to do 1283 // anything extra. 1284 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1285 const GlobalValue *GV = G->getGlobal(); 1286 // Create a constant pool entry for the callee address 1287 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1288 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1289 ARMPCLabelIndex, 1290 ARMCP::CPValue, 0); 1291 // Get the address of the callee into a register 1292 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1293 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1294 Callee = DAG.getLoad(getPointerTy(), dl, 1295 DAG.getEntryNode(), CPAddr, 1296 MachinePointerInfo::getConstantPool(), 1297 false, false, 0); 1298 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1299 const char *Sym = S->getSymbol(); 1300 1301 // Create a constant pool entry for the callee address 1302 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1303 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1304 Sym, ARMPCLabelIndex, 0); 1305 // Get the address of the callee into a register 1306 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1307 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1308 Callee = DAG.getLoad(getPointerTy(), dl, 1309 DAG.getEntryNode(), CPAddr, 1310 MachinePointerInfo::getConstantPool(), 1311 false, false, 0); 1312 } 1313 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1314 const GlobalValue *GV = G->getGlobal(); 1315 isDirect = true; 1316 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1317 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1318 getTargetMachine().getRelocationModel() != Reloc::Static; 1319 isARMFunc = !Subtarget->isThumb() || isStub; 1320 // ARM call to a local ARM function is predicable. 1321 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1322 // tBX takes a register source operand. 1323 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1324 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1325 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1326 ARMPCLabelIndex, 1327 ARMCP::CPValue, 4); 1328 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1329 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1330 Callee = DAG.getLoad(getPointerTy(), dl, 1331 DAG.getEntryNode(), CPAddr, 1332 MachinePointerInfo::getConstantPool(), 1333 false, false, 0); 1334 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1335 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1336 getPointerTy(), Callee, PICLabel); 1337 } else { 1338 // On ELF targets for PIC code, direct calls should go through the PLT 1339 unsigned OpFlags = 0; 1340 if (Subtarget->isTargetELF() && 1341 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1342 OpFlags = ARMII::MO_PLT; 1343 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1344 } 1345 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1346 isDirect = true; 1347 bool isStub = Subtarget->isTargetDarwin() && 1348 getTargetMachine().getRelocationModel() != Reloc::Static; 1349 isARMFunc = !Subtarget->isThumb() || isStub; 1350 // tBX takes a register source operand. 1351 const char *Sym = S->getSymbol(); 1352 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1353 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1354 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1355 Sym, ARMPCLabelIndex, 4); 1356 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1357 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1358 Callee = DAG.getLoad(getPointerTy(), dl, 1359 DAG.getEntryNode(), CPAddr, 1360 MachinePointerInfo::getConstantPool(), 1361 false, false, 0); 1362 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1363 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1364 getPointerTy(), Callee, PICLabel); 1365 } else { 1366 unsigned OpFlags = 0; 1367 // On ELF targets for PIC code, direct calls should go through the PLT 1368 if (Subtarget->isTargetELF() && 1369 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1370 OpFlags = ARMII::MO_PLT; 1371 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1372 } 1373 } 1374 1375 // FIXME: handle tail calls differently. 1376 unsigned CallOpc; 1377 if (Subtarget->isThumb()) { 1378 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1379 CallOpc = ARMISD::CALL_NOLINK; 1380 else 1381 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1382 } else { 1383 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1384 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1385 : ARMISD::CALL_NOLINK; 1386 } 1387 1388 std::vector<SDValue> Ops; 1389 Ops.push_back(Chain); 1390 Ops.push_back(Callee); 1391 1392 // Add argument registers to the end of the list so that they are known live 1393 // into the call. 1394 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1395 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1396 RegsToPass[i].second.getValueType())); 1397 1398 if (InFlag.getNode()) 1399 Ops.push_back(InFlag); 1400 1401 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 1402 if (isTailCall) 1403 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1404 1405 // Returns a chain and a flag for retval copy to use. 1406 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1407 InFlag = Chain.getValue(1); 1408 1409 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1410 DAG.getIntPtrConstant(0, true), InFlag); 1411 if (!Ins.empty()) 1412 InFlag = Chain.getValue(1); 1413 1414 // Handle result values, copying them out of physregs into vregs that we 1415 // return. 1416 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1417 dl, DAG, InVals); 1418} 1419 1420/// MatchingStackOffset - Return true if the given stack call argument is 1421/// already available in the same position (relatively) of the caller's 1422/// incoming argument stack. 1423static 1424bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1425 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1426 const ARMInstrInfo *TII) { 1427 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1428 int FI = INT_MAX; 1429 if (Arg.getOpcode() == ISD::CopyFromReg) { 1430 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1431 if (!VR || TargetRegisterInfo::isPhysicalRegister(VR)) 1432 return false; 1433 MachineInstr *Def = MRI->getVRegDef(VR); 1434 if (!Def) 1435 return false; 1436 if (!Flags.isByVal()) { 1437 if (!TII->isLoadFromStackSlot(Def, FI)) 1438 return false; 1439 } else { 1440 return false; 1441 } 1442 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1443 if (Flags.isByVal()) 1444 // ByVal argument is passed in as a pointer but it's now being 1445 // dereferenced. e.g. 1446 // define @foo(%struct.X* %A) { 1447 // tail call @bar(%struct.X* byval %A) 1448 // } 1449 return false; 1450 SDValue Ptr = Ld->getBasePtr(); 1451 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1452 if (!FINode) 1453 return false; 1454 FI = FINode->getIndex(); 1455 } else 1456 return false; 1457 1458 assert(FI != INT_MAX); 1459 if (!MFI->isFixedObjectIndex(FI)) 1460 return false; 1461 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1462} 1463 1464/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1465/// for tail call optimization. Targets which want to do tail call 1466/// optimization should implement this function. 1467bool 1468ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1469 CallingConv::ID CalleeCC, 1470 bool isVarArg, 1471 bool isCalleeStructRet, 1472 bool isCallerStructRet, 1473 const SmallVectorImpl<ISD::OutputArg> &Outs, 1474 const SmallVectorImpl<SDValue> &OutVals, 1475 const SmallVectorImpl<ISD::InputArg> &Ins, 1476 SelectionDAG& DAG) const { 1477 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1478 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1479 bool CCMatch = CallerCC == CalleeCC; 1480 1481 // Look for obvious safe cases to perform tail call optimization that do not 1482 // require ABI changes. This is what gcc calls sibcall. 1483 1484 // Do not sibcall optimize vararg calls unless the call site is not passing 1485 // any arguments. 1486 if (isVarArg && !Outs.empty()) 1487 return false; 1488 1489 // Also avoid sibcall optimization if either caller or callee uses struct 1490 // return semantics. 1491 if (isCalleeStructRet || isCallerStructRet) 1492 return false; 1493 1494 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1495 // emitEpilogue is not ready for them. 1496 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1497 // LR. This means if we need to reload LR, it takes an extra instructions, 1498 // which outweighs the value of the tail call; but here we don't know yet 1499 // whether LR is going to be used. Probably the right approach is to 1500 // generate the tail call here and turn it back into CALL/RET in 1501 // emitEpilogue if LR is used. 1502 if (Subtarget->isThumb1Only()) 1503 return false; 1504 1505 // For the moment, we can only do this to functions defined in this 1506 // compilation, or to indirect calls. A Thumb B to an ARM function, 1507 // or vice versa, is not easily fixed up in the linker unlike BL. 1508 // (We could do this by loading the address of the callee into a register; 1509 // that is an extra instruction over the direct call and burns a register 1510 // as well, so is not likely to be a win.) 1511 1512 // It might be safe to remove this restriction on non-Darwin. 1513 1514 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1515 // but we need to make sure there are enough registers; the only valid 1516 // registers are the 4 used for parameters. We don't currently do this 1517 // case. 1518 if (isa<ExternalSymbolSDNode>(Callee)) 1519 return false; 1520 1521 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1522 const GlobalValue *GV = G->getGlobal(); 1523 if (GV->isDeclaration() || GV->isWeakForLinker()) 1524 return false; 1525 } 1526 1527 // If the calling conventions do not match, then we'd better make sure the 1528 // results are returned in the same way as what the caller expects. 1529 if (!CCMatch) { 1530 SmallVector<CCValAssign, 16> RVLocs1; 1531 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1532 RVLocs1, *DAG.getContext()); 1533 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1534 1535 SmallVector<CCValAssign, 16> RVLocs2; 1536 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1537 RVLocs2, *DAG.getContext()); 1538 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1539 1540 if (RVLocs1.size() != RVLocs2.size()) 1541 return false; 1542 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1543 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1544 return false; 1545 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1546 return false; 1547 if (RVLocs1[i].isRegLoc()) { 1548 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1549 return false; 1550 } else { 1551 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1552 return false; 1553 } 1554 } 1555 } 1556 1557 // If the callee takes no arguments then go on to check the results of the 1558 // call. 1559 if (!Outs.empty()) { 1560 // Check if stack adjustment is needed. For now, do not do this if any 1561 // argument is passed on the stack. 1562 SmallVector<CCValAssign, 16> ArgLocs; 1563 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1564 ArgLocs, *DAG.getContext()); 1565 CCInfo.AnalyzeCallOperands(Outs, 1566 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1567 if (CCInfo.getNextStackOffset()) { 1568 MachineFunction &MF = DAG.getMachineFunction(); 1569 1570 // Check if the arguments are already laid out in the right way as 1571 // the caller's fixed stack objects. 1572 MachineFrameInfo *MFI = MF.getFrameInfo(); 1573 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1574 const ARMInstrInfo *TII = 1575 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1576 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1577 i != e; 1578 ++i, ++realArgIdx) { 1579 CCValAssign &VA = ArgLocs[i]; 1580 EVT RegVT = VA.getLocVT(); 1581 SDValue Arg = OutVals[realArgIdx]; 1582 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1583 if (VA.getLocInfo() == CCValAssign::Indirect) 1584 return false; 1585 if (VA.needsCustom()) { 1586 // f64 and vector types are split into multiple registers or 1587 // register/stack-slot combinations. The types will not match 1588 // the registers; give up on memory f64 refs until we figure 1589 // out what to do about this. 1590 if (!VA.isRegLoc()) 1591 return false; 1592 if (!ArgLocs[++i].isRegLoc()) 1593 return false; 1594 if (RegVT == MVT::v2f64) { 1595 if (!ArgLocs[++i].isRegLoc()) 1596 return false; 1597 if (!ArgLocs[++i].isRegLoc()) 1598 return false; 1599 } 1600 } else if (!VA.isRegLoc()) { 1601 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1602 MFI, MRI, TII)) 1603 return false; 1604 } 1605 } 1606 } 1607 } 1608 1609 return true; 1610} 1611 1612SDValue 1613ARMTargetLowering::LowerReturn(SDValue Chain, 1614 CallingConv::ID CallConv, bool isVarArg, 1615 const SmallVectorImpl<ISD::OutputArg> &Outs, 1616 const SmallVectorImpl<SDValue> &OutVals, 1617 DebugLoc dl, SelectionDAG &DAG) const { 1618 1619 // CCValAssign - represent the assignment of the return value to a location. 1620 SmallVector<CCValAssign, 16> RVLocs; 1621 1622 // CCState - Info about the registers and stack slots. 1623 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1624 *DAG.getContext()); 1625 1626 // Analyze outgoing return values. 1627 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1628 isVarArg)); 1629 1630 // If this is the first return lowered for this function, add 1631 // the regs to the liveout set for the function. 1632 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1633 for (unsigned i = 0; i != RVLocs.size(); ++i) 1634 if (RVLocs[i].isRegLoc()) 1635 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1636 } 1637 1638 SDValue Flag; 1639 1640 // Copy the result values into the output registers. 1641 for (unsigned i = 0, realRVLocIdx = 0; 1642 i != RVLocs.size(); 1643 ++i, ++realRVLocIdx) { 1644 CCValAssign &VA = RVLocs[i]; 1645 assert(VA.isRegLoc() && "Can only return in registers!"); 1646 1647 SDValue Arg = OutVals[realRVLocIdx]; 1648 1649 switch (VA.getLocInfo()) { 1650 default: llvm_unreachable("Unknown loc info!"); 1651 case CCValAssign::Full: break; 1652 case CCValAssign::BCvt: 1653 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1654 break; 1655 } 1656 1657 if (VA.needsCustom()) { 1658 if (VA.getLocVT() == MVT::v2f64) { 1659 // Extract the first half and return it in two registers. 1660 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1661 DAG.getConstant(0, MVT::i32)); 1662 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1663 DAG.getVTList(MVT::i32, MVT::i32), Half); 1664 1665 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1666 Flag = Chain.getValue(1); 1667 VA = RVLocs[++i]; // skip ahead to next loc 1668 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1669 HalfGPRs.getValue(1), Flag); 1670 Flag = Chain.getValue(1); 1671 VA = RVLocs[++i]; // skip ahead to next loc 1672 1673 // Extract the 2nd half and fall through to handle it as an f64 value. 1674 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1675 DAG.getConstant(1, MVT::i32)); 1676 } 1677 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1678 // available. 1679 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1680 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1681 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1682 Flag = Chain.getValue(1); 1683 VA = RVLocs[++i]; // skip ahead to next loc 1684 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1685 Flag); 1686 } else 1687 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1688 1689 // Guarantee that all emitted copies are 1690 // stuck together, avoiding something bad. 1691 Flag = Chain.getValue(1); 1692 } 1693 1694 SDValue result; 1695 if (Flag.getNode()) 1696 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1697 else // Return Void 1698 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1699 1700 return result; 1701} 1702 1703// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1704// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1705// one of the above mentioned nodes. It has to be wrapped because otherwise 1706// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1707// be used to form addressing mode. These wrapped nodes will be selected 1708// into MOVi. 1709static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1710 EVT PtrVT = Op.getValueType(); 1711 // FIXME there is no actual debug info here 1712 DebugLoc dl = Op.getDebugLoc(); 1713 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1714 SDValue Res; 1715 if (CP->isMachineConstantPoolEntry()) 1716 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1717 CP->getAlignment()); 1718 else 1719 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1720 CP->getAlignment()); 1721 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1722} 1723 1724unsigned ARMTargetLowering::getJumpTableEncoding() const { 1725 return MachineJumpTableInfo::EK_Inline; 1726} 1727 1728SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1729 SelectionDAG &DAG) const { 1730 MachineFunction &MF = DAG.getMachineFunction(); 1731 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1732 unsigned ARMPCLabelIndex = 0; 1733 DebugLoc DL = Op.getDebugLoc(); 1734 EVT PtrVT = getPointerTy(); 1735 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1736 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1737 SDValue CPAddr; 1738 if (RelocM == Reloc::Static) { 1739 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1740 } else { 1741 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1742 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1743 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1744 ARMCP::CPBlockAddress, 1745 PCAdj); 1746 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1747 } 1748 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1749 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1750 MachinePointerInfo::getConstantPool(), 1751 false, false, 0); 1752 if (RelocM == Reloc::Static) 1753 return Result; 1754 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1755 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1756} 1757 1758// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1759SDValue 1760ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1761 SelectionDAG &DAG) const { 1762 DebugLoc dl = GA->getDebugLoc(); 1763 EVT PtrVT = getPointerTy(); 1764 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1765 MachineFunction &MF = DAG.getMachineFunction(); 1766 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1767 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1768 ARMConstantPoolValue *CPV = 1769 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1770 ARMCP::CPValue, PCAdj, "tlsgd", true); 1771 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1772 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1773 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1774 MachinePointerInfo::getConstantPool(), 1775 false, false, 0); 1776 SDValue Chain = Argument.getValue(1); 1777 1778 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1779 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1780 1781 // call __tls_get_addr. 1782 ArgListTy Args; 1783 ArgListEntry Entry; 1784 Entry.Node = Argument; 1785 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1786 Args.push_back(Entry); 1787 // FIXME: is there useful debug info available here? 1788 std::pair<SDValue, SDValue> CallResult = 1789 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1790 false, false, false, false, 1791 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1792 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1793 return CallResult.first; 1794} 1795 1796// Lower ISD::GlobalTLSAddress using the "initial exec" or 1797// "local exec" model. 1798SDValue 1799ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1800 SelectionDAG &DAG) const { 1801 const GlobalValue *GV = GA->getGlobal(); 1802 DebugLoc dl = GA->getDebugLoc(); 1803 SDValue Offset; 1804 SDValue Chain = DAG.getEntryNode(); 1805 EVT PtrVT = getPointerTy(); 1806 // Get the Thread Pointer 1807 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1808 1809 if (GV->isDeclaration()) { 1810 MachineFunction &MF = DAG.getMachineFunction(); 1811 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1812 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1813 // Initial exec model. 1814 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1815 ARMConstantPoolValue *CPV = 1816 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1817 ARMCP::CPValue, PCAdj, "gottpoff", true); 1818 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1819 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1820 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1821 MachinePointerInfo::getConstantPool(), 1822 false, false, 0); 1823 Chain = Offset.getValue(1); 1824 1825 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1826 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1827 1828 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1829 MachinePointerInfo::getConstantPool(), 1830 false, false, 0); 1831 } else { 1832 // local exec model 1833 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff"); 1834 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1835 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1836 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1837 MachinePointerInfo::getConstantPool(), 1838 false, false, 0); 1839 } 1840 1841 // The address of the thread local variable is the add of the thread 1842 // pointer with the offset of the variable. 1843 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1844} 1845 1846SDValue 1847ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1848 // TODO: implement the "local dynamic" model 1849 assert(Subtarget->isTargetELF() && 1850 "TLS not implemented for non-ELF targets"); 1851 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1852 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1853 // otherwise use the "Local Exec" TLS Model 1854 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1855 return LowerToTLSGeneralDynamicModel(GA, DAG); 1856 else 1857 return LowerToTLSExecModels(GA, DAG); 1858} 1859 1860SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1861 SelectionDAG &DAG) const { 1862 EVT PtrVT = getPointerTy(); 1863 DebugLoc dl = Op.getDebugLoc(); 1864 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1865 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1866 if (RelocM == Reloc::PIC_) { 1867 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1868 ARMConstantPoolValue *CPV = 1869 new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT"); 1870 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1871 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1872 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1873 CPAddr, 1874 MachinePointerInfo::getConstantPool(), 1875 false, false, 0); 1876 SDValue Chain = Result.getValue(1); 1877 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1878 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1879 if (!UseGOTOFF) 1880 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1881 MachinePointerInfo::getGOT(), false, false, 0); 1882 return Result; 1883 } else { 1884 // If we have T2 ops, we can materialize the address directly via movt/movw 1885 // pair. This is always cheaper. 1886 if (Subtarget->useMovt()) { 1887 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1888 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 1889 } else { 1890 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1891 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1892 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1893 MachinePointerInfo::getConstantPool(), 1894 false, false, 0); 1895 } 1896 } 1897} 1898 1899SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1900 SelectionDAG &DAG) const { 1901 MachineFunction &MF = DAG.getMachineFunction(); 1902 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1903 unsigned ARMPCLabelIndex = 0; 1904 EVT PtrVT = getPointerTy(); 1905 DebugLoc dl = Op.getDebugLoc(); 1906 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1907 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1908 SDValue CPAddr; 1909 if (RelocM == Reloc::Static) 1910 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1911 else { 1912 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1913 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 1914 ARMConstantPoolValue *CPV = 1915 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 1916 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1917 } 1918 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1919 1920 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1921 MachinePointerInfo::getConstantPool(), 1922 false, false, 0); 1923 SDValue Chain = Result.getValue(1); 1924 1925 if (RelocM == Reloc::PIC_) { 1926 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1927 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1928 } 1929 1930 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 1931 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 1932 false, false, 0); 1933 1934 return Result; 1935} 1936 1937SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 1938 SelectionDAG &DAG) const { 1939 assert(Subtarget->isTargetELF() && 1940 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 1941 MachineFunction &MF = DAG.getMachineFunction(); 1942 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1943 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1944 EVT PtrVT = getPointerTy(); 1945 DebugLoc dl = Op.getDebugLoc(); 1946 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1947 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1948 "_GLOBAL_OFFSET_TABLE_", 1949 ARMPCLabelIndex, PCAdj); 1950 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1951 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1952 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1953 MachinePointerInfo::getConstantPool(), 1954 false, false, 0); 1955 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1956 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1957} 1958 1959SDValue 1960ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 1961 const { 1962 DebugLoc dl = Op.getDebugLoc(); 1963 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 1964 Op.getOperand(0), Op.getOperand(1)); 1965} 1966 1967SDValue 1968ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 1969 DebugLoc dl = Op.getDebugLoc(); 1970 SDValue Val = DAG.getConstant(0, MVT::i32); 1971 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 1972 Op.getOperand(1), Val); 1973} 1974 1975SDValue 1976ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 1977 DebugLoc dl = Op.getDebugLoc(); 1978 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 1979 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 1980} 1981 1982SDValue 1983ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 1984 const ARMSubtarget *Subtarget) const { 1985 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1986 DebugLoc dl = Op.getDebugLoc(); 1987 switch (IntNo) { 1988 default: return SDValue(); // Don't custom lower most intrinsics. 1989 case Intrinsic::arm_thread_pointer: { 1990 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1991 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1992 } 1993 case Intrinsic::eh_sjlj_lsda: { 1994 MachineFunction &MF = DAG.getMachineFunction(); 1995 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1996 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1997 EVT PtrVT = getPointerTy(); 1998 DebugLoc dl = Op.getDebugLoc(); 1999 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2000 SDValue CPAddr; 2001 unsigned PCAdj = (RelocM != Reloc::PIC_) 2002 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2003 ARMConstantPoolValue *CPV = 2004 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2005 ARMCP::CPLSDA, PCAdj); 2006 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2007 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2008 SDValue Result = 2009 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2010 MachinePointerInfo::getConstantPool(), 2011 false, false, 0); 2012 2013 if (RelocM == Reloc::PIC_) { 2014 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2015 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2016 } 2017 return Result; 2018 } 2019 } 2020} 2021 2022static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2023 const ARMSubtarget *Subtarget) { 2024 DebugLoc dl = Op.getDebugLoc(); 2025 SDValue Op5 = Op.getOperand(5); 2026 unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue(); 2027 // Some subtargets which have dmb and dsb instructions can handle barriers 2028 // directly. Some ARMv6 cpus can support them with the help of mcr 2029 // instruction. Thumb1 and pre-v6 ARM mode use a libcall instead and should 2030 // never get here. 2031 unsigned Opc = isDeviceBarrier ? ARMISD::SYNCBARRIER : ARMISD::MEMBARRIER; 2032 if (Subtarget->hasDataBarrier()) 2033 return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0)); 2034 else { 2035 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb1Only() && 2036 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2037 return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0), 2038 DAG.getConstant(0, MVT::i32)); 2039 } 2040} 2041 2042static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2043 MachineFunction &MF = DAG.getMachineFunction(); 2044 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2045 2046 // vastart just stores the address of the VarArgsFrameIndex slot into the 2047 // memory location argument. 2048 DebugLoc dl = Op.getDebugLoc(); 2049 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2050 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2051 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2052 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2053 MachinePointerInfo(SV), false, false, 0); 2054} 2055 2056SDValue 2057ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2058 SDValue &Root, SelectionDAG &DAG, 2059 DebugLoc dl) const { 2060 MachineFunction &MF = DAG.getMachineFunction(); 2061 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2062 2063 TargetRegisterClass *RC; 2064 if (AFI->isThumb1OnlyFunction()) 2065 RC = ARM::tGPRRegisterClass; 2066 else 2067 RC = ARM::GPRRegisterClass; 2068 2069 // Transform the arguments stored in physical registers into virtual ones. 2070 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2071 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2072 2073 SDValue ArgValue2; 2074 if (NextVA.isMemLoc()) { 2075 MachineFrameInfo *MFI = MF.getFrameInfo(); 2076 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2077 2078 // Create load node to retrieve arguments from the stack. 2079 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2080 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2081 MachinePointerInfo::getFixedStack(FI), 2082 false, false, 0); 2083 } else { 2084 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2085 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2086 } 2087 2088 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2089} 2090 2091SDValue 2092ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2093 CallingConv::ID CallConv, bool isVarArg, 2094 const SmallVectorImpl<ISD::InputArg> 2095 &Ins, 2096 DebugLoc dl, SelectionDAG &DAG, 2097 SmallVectorImpl<SDValue> &InVals) 2098 const { 2099 2100 MachineFunction &MF = DAG.getMachineFunction(); 2101 MachineFrameInfo *MFI = MF.getFrameInfo(); 2102 2103 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2104 2105 // Assign locations to all of the incoming arguments. 2106 SmallVector<CCValAssign, 16> ArgLocs; 2107 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2108 *DAG.getContext()); 2109 CCInfo.AnalyzeFormalArguments(Ins, 2110 CCAssignFnForNode(CallConv, /* Return*/ false, 2111 isVarArg)); 2112 2113 SmallVector<SDValue, 16> ArgValues; 2114 2115 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2116 CCValAssign &VA = ArgLocs[i]; 2117 2118 // Arguments stored in registers. 2119 if (VA.isRegLoc()) { 2120 EVT RegVT = VA.getLocVT(); 2121 2122 SDValue ArgValue; 2123 if (VA.needsCustom()) { 2124 // f64 and vector types are split up into multiple registers or 2125 // combinations of registers and stack slots. 2126 if (VA.getLocVT() == MVT::v2f64) { 2127 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2128 Chain, DAG, dl); 2129 VA = ArgLocs[++i]; // skip ahead to next loc 2130 SDValue ArgValue2; 2131 if (VA.isMemLoc()) { 2132 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2133 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2134 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2135 MachinePointerInfo::getFixedStack(FI), 2136 false, false, 0); 2137 } else { 2138 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2139 Chain, DAG, dl); 2140 } 2141 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2142 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2143 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2144 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2145 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2146 } else 2147 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2148 2149 } else { 2150 TargetRegisterClass *RC; 2151 2152 if (RegVT == MVT::f32) 2153 RC = ARM::SPRRegisterClass; 2154 else if (RegVT == MVT::f64) 2155 RC = ARM::DPRRegisterClass; 2156 else if (RegVT == MVT::v2f64) 2157 RC = ARM::QPRRegisterClass; 2158 else if (RegVT == MVT::i32) 2159 RC = (AFI->isThumb1OnlyFunction() ? 2160 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2161 else 2162 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2163 2164 // Transform the arguments in physical registers into virtual ones. 2165 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2166 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2167 } 2168 2169 // If this is an 8 or 16-bit value, it is really passed promoted 2170 // to 32 bits. Insert an assert[sz]ext to capture this, then 2171 // truncate to the right size. 2172 switch (VA.getLocInfo()) { 2173 default: llvm_unreachable("Unknown loc info!"); 2174 case CCValAssign::Full: break; 2175 case CCValAssign::BCvt: 2176 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); 2177 break; 2178 case CCValAssign::SExt: 2179 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2180 DAG.getValueType(VA.getValVT())); 2181 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2182 break; 2183 case CCValAssign::ZExt: 2184 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2185 DAG.getValueType(VA.getValVT())); 2186 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2187 break; 2188 } 2189 2190 InVals.push_back(ArgValue); 2191 2192 } else { // VA.isRegLoc() 2193 2194 // sanity check 2195 assert(VA.isMemLoc()); 2196 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2197 2198 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 2199 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true); 2200 2201 // Create load nodes to retrieve arguments from the stack. 2202 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2203 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2204 MachinePointerInfo::getFixedStack(FI), 2205 false, false, 0)); 2206 } 2207 } 2208 2209 // varargs 2210 if (isVarArg) { 2211 static const unsigned GPRArgRegs[] = { 2212 ARM::R0, ARM::R1, ARM::R2, ARM::R3 2213 }; 2214 2215 unsigned NumGPRs = CCInfo.getFirstUnallocated 2216 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2217 2218 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 2219 unsigned VARegSize = (4 - NumGPRs) * 4; 2220 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2221 unsigned ArgOffset = CCInfo.getNextStackOffset(); 2222 if (VARegSaveSize) { 2223 // If this function is vararg, store any remaining integer argument regs 2224 // to their spots on the stack so that they may be loaded by deferencing 2225 // the result of va_next. 2226 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2227 AFI->setVarArgsFrameIndex( 2228 MFI->CreateFixedObject(VARegSaveSize, 2229 ArgOffset + VARegSaveSize - VARegSize, 2230 false)); 2231 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2232 getPointerTy()); 2233 2234 SmallVector<SDValue, 4> MemOps; 2235 for (; NumGPRs < 4; ++NumGPRs) { 2236 TargetRegisterClass *RC; 2237 if (AFI->isThumb1OnlyFunction()) 2238 RC = ARM::tGPRRegisterClass; 2239 else 2240 RC = ARM::GPRRegisterClass; 2241 2242 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 2243 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2244 SDValue Store = 2245 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2246 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2247 false, false, 0); 2248 MemOps.push_back(Store); 2249 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2250 DAG.getConstant(4, getPointerTy())); 2251 } 2252 if (!MemOps.empty()) 2253 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2254 &MemOps[0], MemOps.size()); 2255 } else 2256 // This will point to the next argument passed via stack. 2257 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2258 } 2259 2260 return Chain; 2261} 2262 2263/// isFloatingPointZero - Return true if this is +0.0. 2264static bool isFloatingPointZero(SDValue Op) { 2265 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2266 return CFP->getValueAPF().isPosZero(); 2267 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2268 // Maybe this has already been legalized into the constant pool? 2269 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2270 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2271 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2272 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2273 return CFP->getValueAPF().isPosZero(); 2274 } 2275 } 2276 return false; 2277} 2278 2279/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2280/// the given operands. 2281SDValue 2282ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2283 SDValue &ARMcc, SelectionDAG &DAG, 2284 DebugLoc dl) const { 2285 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2286 unsigned C = RHSC->getZExtValue(); 2287 if (!isLegalICmpImmediate(C)) { 2288 // Constant does not fit, try adjusting it by one? 2289 switch (CC) { 2290 default: break; 2291 case ISD::SETLT: 2292 case ISD::SETGE: 2293 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2294 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2295 RHS = DAG.getConstant(C-1, MVT::i32); 2296 } 2297 break; 2298 case ISD::SETULT: 2299 case ISD::SETUGE: 2300 if (C != 0 && isLegalICmpImmediate(C-1)) { 2301 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2302 RHS = DAG.getConstant(C-1, MVT::i32); 2303 } 2304 break; 2305 case ISD::SETLE: 2306 case ISD::SETGT: 2307 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2308 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2309 RHS = DAG.getConstant(C+1, MVT::i32); 2310 } 2311 break; 2312 case ISD::SETULE: 2313 case ISD::SETUGT: 2314 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2315 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2316 RHS = DAG.getConstant(C+1, MVT::i32); 2317 } 2318 break; 2319 } 2320 } 2321 } 2322 2323 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2324 ARMISD::NodeType CompareType; 2325 switch (CondCode) { 2326 default: 2327 CompareType = ARMISD::CMP; 2328 break; 2329 case ARMCC::EQ: 2330 case ARMCC::NE: 2331 // Uses only Z Flag 2332 CompareType = ARMISD::CMPZ; 2333 break; 2334 } 2335 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2336 return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS); 2337} 2338 2339/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2340SDValue 2341ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2342 DebugLoc dl) const { 2343 SDValue Cmp; 2344 if (!isFloatingPointZero(RHS)) 2345 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS); 2346 else 2347 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS); 2348 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp); 2349} 2350 2351SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2352 SDValue Cond = Op.getOperand(0); 2353 SDValue SelectTrue = Op.getOperand(1); 2354 SDValue SelectFalse = Op.getOperand(2); 2355 DebugLoc dl = Op.getDebugLoc(); 2356 2357 // Convert: 2358 // 2359 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2360 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2361 // 2362 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2363 const ConstantSDNode *CMOVTrue = 2364 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2365 const ConstantSDNode *CMOVFalse = 2366 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2367 2368 if (CMOVTrue && CMOVFalse) { 2369 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2370 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2371 2372 SDValue True; 2373 SDValue False; 2374 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2375 True = SelectTrue; 2376 False = SelectFalse; 2377 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2378 True = SelectFalse; 2379 False = SelectTrue; 2380 } 2381 2382 if (True.getNode() && False.getNode()) { 2383 EVT VT = Cond.getValueType(); 2384 SDValue ARMcc = Cond.getOperand(2); 2385 SDValue CCR = Cond.getOperand(3); 2386 SDValue Cmp = Cond.getOperand(4); 2387 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2388 } 2389 } 2390 } 2391 2392 return DAG.getSelectCC(dl, Cond, 2393 DAG.getConstant(0, Cond.getValueType()), 2394 SelectTrue, SelectFalse, ISD::SETNE); 2395} 2396 2397SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2398 EVT VT = Op.getValueType(); 2399 SDValue LHS = Op.getOperand(0); 2400 SDValue RHS = Op.getOperand(1); 2401 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2402 SDValue TrueVal = Op.getOperand(2); 2403 SDValue FalseVal = Op.getOperand(3); 2404 DebugLoc dl = Op.getDebugLoc(); 2405 2406 if (LHS.getValueType() == MVT::i32) { 2407 SDValue ARMcc; 2408 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2409 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2410 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2411 } 2412 2413 ARMCC::CondCodes CondCode, CondCode2; 2414 FPCCToARMCC(CC, CondCode, CondCode2); 2415 2416 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2417 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2418 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2419 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2420 ARMcc, CCR, Cmp); 2421 if (CondCode2 != ARMCC::AL) { 2422 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2423 // FIXME: Needs another CMP because flag can have but one use. 2424 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2425 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2426 Result, TrueVal, ARMcc2, CCR, Cmp2); 2427 } 2428 return Result; 2429} 2430 2431/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2432/// to morph to an integer compare sequence. 2433static bool canChangeToInt(SDValue Op, bool &SeenZero, 2434 const ARMSubtarget *Subtarget) { 2435 SDNode *N = Op.getNode(); 2436 if (!N->hasOneUse()) 2437 // Otherwise it requires moving the value from fp to integer registers. 2438 return false; 2439 if (!N->getNumValues()) 2440 return false; 2441 EVT VT = Op.getValueType(); 2442 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2443 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2444 // vmrs are very slow, e.g. cortex-a8. 2445 return false; 2446 2447 if (isFloatingPointZero(Op)) { 2448 SeenZero = true; 2449 return true; 2450 } 2451 return ISD::isNormalLoad(N); 2452} 2453 2454static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2455 if (isFloatingPointZero(Op)) 2456 return DAG.getConstant(0, MVT::i32); 2457 2458 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2459 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2460 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2461 Ld->isVolatile(), Ld->isNonTemporal(), 2462 Ld->getAlignment()); 2463 2464 llvm_unreachable("Unknown VFP cmp argument!"); 2465} 2466 2467static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2468 SDValue &RetVal1, SDValue &RetVal2) { 2469 if (isFloatingPointZero(Op)) { 2470 RetVal1 = DAG.getConstant(0, MVT::i32); 2471 RetVal2 = DAG.getConstant(0, MVT::i32); 2472 return; 2473 } 2474 2475 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2476 SDValue Ptr = Ld->getBasePtr(); 2477 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2478 Ld->getChain(), Ptr, 2479 Ld->getPointerInfo(), 2480 Ld->isVolatile(), Ld->isNonTemporal(), 2481 Ld->getAlignment()); 2482 2483 EVT PtrType = Ptr.getValueType(); 2484 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2485 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2486 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2487 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2488 Ld->getChain(), NewPtr, 2489 Ld->getPointerInfo().getWithOffset(4), 2490 Ld->isVolatile(), Ld->isNonTemporal(), 2491 NewAlign); 2492 return; 2493 } 2494 2495 llvm_unreachable("Unknown VFP cmp argument!"); 2496} 2497 2498/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2499/// f32 and even f64 comparisons to integer ones. 2500SDValue 2501ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2502 SDValue Chain = Op.getOperand(0); 2503 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2504 SDValue LHS = Op.getOperand(2); 2505 SDValue RHS = Op.getOperand(3); 2506 SDValue Dest = Op.getOperand(4); 2507 DebugLoc dl = Op.getDebugLoc(); 2508 2509 bool SeenZero = false; 2510 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2511 canChangeToInt(RHS, SeenZero, Subtarget) && 2512 // If one of the operand is zero, it's safe to ignore the NaN case since 2513 // we only care about equality comparisons. 2514 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2515 // If unsafe fp math optimization is enabled and there are no othter uses of 2516 // the CMP operands, and the condition code is EQ oe NE, we can optimize it 2517 // to an integer comparison. 2518 if (CC == ISD::SETOEQ) 2519 CC = ISD::SETEQ; 2520 else if (CC == ISD::SETUNE) 2521 CC = ISD::SETNE; 2522 2523 SDValue ARMcc; 2524 if (LHS.getValueType() == MVT::f32) { 2525 LHS = bitcastf32Toi32(LHS, DAG); 2526 RHS = bitcastf32Toi32(RHS, DAG); 2527 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2528 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2529 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2530 Chain, Dest, ARMcc, CCR, Cmp); 2531 } 2532 2533 SDValue LHS1, LHS2; 2534 SDValue RHS1, RHS2; 2535 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2536 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2537 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2538 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2539 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2540 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2541 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2542 } 2543 2544 return SDValue(); 2545} 2546 2547SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2548 SDValue Chain = Op.getOperand(0); 2549 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2550 SDValue LHS = Op.getOperand(2); 2551 SDValue RHS = Op.getOperand(3); 2552 SDValue Dest = Op.getOperand(4); 2553 DebugLoc dl = Op.getDebugLoc(); 2554 2555 if (LHS.getValueType() == MVT::i32) { 2556 SDValue ARMcc; 2557 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2558 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2559 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2560 Chain, Dest, ARMcc, CCR, Cmp); 2561 } 2562 2563 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2564 2565 if (UnsafeFPMath && 2566 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2567 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2568 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2569 if (Result.getNode()) 2570 return Result; 2571 } 2572 2573 ARMCC::CondCodes CondCode, CondCode2; 2574 FPCCToARMCC(CC, CondCode, CondCode2); 2575 2576 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2577 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2578 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2579 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2580 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2581 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2582 if (CondCode2 != ARMCC::AL) { 2583 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2584 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2585 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2586 } 2587 return Res; 2588} 2589 2590SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2591 SDValue Chain = Op.getOperand(0); 2592 SDValue Table = Op.getOperand(1); 2593 SDValue Index = Op.getOperand(2); 2594 DebugLoc dl = Op.getDebugLoc(); 2595 2596 EVT PTy = getPointerTy(); 2597 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2598 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2599 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2600 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2601 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2602 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2603 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2604 if (Subtarget->isThumb2()) { 2605 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2606 // which does another jump to the destination. This also makes it easier 2607 // to translate it to TBB / TBH later. 2608 // FIXME: This might not work if the function is extremely large. 2609 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2610 Addr, Op.getOperand(2), JTI, UId); 2611 } 2612 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2613 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2614 MachinePointerInfo::getJumpTable(), 2615 false, false, 0); 2616 Chain = Addr.getValue(1); 2617 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2618 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2619 } else { 2620 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2621 MachinePointerInfo::getJumpTable(), false, false, 0); 2622 Chain = Addr.getValue(1); 2623 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2624 } 2625} 2626 2627static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2628 DebugLoc dl = Op.getDebugLoc(); 2629 unsigned Opc; 2630 2631 switch (Op.getOpcode()) { 2632 default: 2633 assert(0 && "Invalid opcode!"); 2634 case ISD::FP_TO_SINT: 2635 Opc = ARMISD::FTOSI; 2636 break; 2637 case ISD::FP_TO_UINT: 2638 Opc = ARMISD::FTOUI; 2639 break; 2640 } 2641 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2642 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); 2643} 2644 2645static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2646 EVT VT = Op.getValueType(); 2647 DebugLoc dl = Op.getDebugLoc(); 2648 unsigned Opc; 2649 2650 switch (Op.getOpcode()) { 2651 default: 2652 assert(0 && "Invalid opcode!"); 2653 case ISD::SINT_TO_FP: 2654 Opc = ARMISD::SITOF; 2655 break; 2656 case ISD::UINT_TO_FP: 2657 Opc = ARMISD::UITOF; 2658 break; 2659 } 2660 2661 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0)); 2662 return DAG.getNode(Opc, dl, VT, Op); 2663} 2664 2665SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 2666 // Implement fcopysign with a fabs and a conditional fneg. 2667 SDValue Tmp0 = Op.getOperand(0); 2668 SDValue Tmp1 = Op.getOperand(1); 2669 DebugLoc dl = Op.getDebugLoc(); 2670 EVT VT = Op.getValueType(); 2671 EVT SrcVT = Tmp1.getValueType(); 2672 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 2673 SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32); 2674 SDValue FP0 = DAG.getConstantFP(0.0, SrcVT); 2675 SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl); 2676 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2677 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp); 2678} 2679 2680SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2681 MachineFunction &MF = DAG.getMachineFunction(); 2682 MachineFrameInfo *MFI = MF.getFrameInfo(); 2683 MFI->setReturnAddressIsTaken(true); 2684 2685 EVT VT = Op.getValueType(); 2686 DebugLoc dl = Op.getDebugLoc(); 2687 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2688 if (Depth) { 2689 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2690 SDValue Offset = DAG.getConstant(4, MVT::i32); 2691 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2692 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2693 MachinePointerInfo(), false, false, 0); 2694 } 2695 2696 // Return LR, which contains the return address. Mark it an implicit live-in. 2697 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 2698 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 2699} 2700 2701SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 2702 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2703 MFI->setFrameAddressIsTaken(true); 2704 2705 EVT VT = Op.getValueType(); 2706 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 2707 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2708 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 2709 ? ARM::R7 : ARM::R11; 2710 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2711 while (Depth--) 2712 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2713 MachinePointerInfo(), 2714 false, false, 0); 2715 return FrameAddr; 2716} 2717 2718/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to 2719/// expand a bit convert where either the source or destination type is i64 to 2720/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 2721/// operand type is illegal (e.g., v2f32 for a target that doesn't support 2722/// vectors), since the legalizer won't know what to do with that. 2723static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) { 2724 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2725 DebugLoc dl = N->getDebugLoc(); 2726 SDValue Op = N->getOperand(0); 2727 2728 // This function is only supposed to be called for i64 types, either as the 2729 // source or destination of the bit convert. 2730 EVT SrcVT = Op.getValueType(); 2731 EVT DstVT = N->getValueType(0); 2732 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 2733 "ExpandBIT_CONVERT called for non-i64 type"); 2734 2735 // Turn i64->f64 into VMOVDRR. 2736 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 2737 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2738 DAG.getConstant(0, MVT::i32)); 2739 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2740 DAG.getConstant(1, MVT::i32)); 2741 return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT, 2742 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 2743 } 2744 2745 // Turn f64->i64 into VMOVRRD. 2746 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 2747 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 2748 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 2749 // Merge the pieces into a single i64 value. 2750 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 2751 } 2752 2753 return SDValue(); 2754} 2755 2756/// getZeroVector - Returns a vector of specified type with all zero elements. 2757/// Zero vectors are used to represent vector negation and in those cases 2758/// will be implemented with the NEON VNEG instruction. However, VNEG does 2759/// not support i64 elements, so sometimes the zero vectors will need to be 2760/// explicitly constructed. Regardless, use a canonical VMOV to create the 2761/// zero vector. 2762static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2763 assert(VT.isVector() && "Expected a vector type"); 2764 // The canonical modified immediate encoding of a zero vector is....0! 2765 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 2766 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 2767 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 2768 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 2769} 2770 2771/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 2772/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2773SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 2774 SelectionDAG &DAG) const { 2775 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2776 EVT VT = Op.getValueType(); 2777 unsigned VTBits = VT.getSizeInBits(); 2778 DebugLoc dl = Op.getDebugLoc(); 2779 SDValue ShOpLo = Op.getOperand(0); 2780 SDValue ShOpHi = Op.getOperand(1); 2781 SDValue ShAmt = Op.getOperand(2); 2782 SDValue ARMcc; 2783 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 2784 2785 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 2786 2787 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2788 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2789 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 2790 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2791 DAG.getConstant(VTBits, MVT::i32)); 2792 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 2793 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2794 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 2795 2796 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2797 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2798 ARMcc, DAG, dl); 2799 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 2800 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 2801 CCR, Cmp); 2802 2803 SDValue Ops[2] = { Lo, Hi }; 2804 return DAG.getMergeValues(Ops, 2, dl); 2805} 2806 2807/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 2808/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2809SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 2810 SelectionDAG &DAG) const { 2811 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2812 EVT VT = Op.getValueType(); 2813 unsigned VTBits = VT.getSizeInBits(); 2814 DebugLoc dl = Op.getDebugLoc(); 2815 SDValue ShOpLo = Op.getOperand(0); 2816 SDValue ShOpHi = Op.getOperand(1); 2817 SDValue ShAmt = Op.getOperand(2); 2818 SDValue ARMcc; 2819 2820 assert(Op.getOpcode() == ISD::SHL_PARTS); 2821 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2822 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2823 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 2824 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2825 DAG.getConstant(VTBits, MVT::i32)); 2826 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 2827 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 2828 2829 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2830 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2831 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2832 ARMcc, DAG, dl); 2833 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 2834 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 2835 CCR, Cmp); 2836 2837 SDValue Ops[2] = { Lo, Hi }; 2838 return DAG.getMergeValues(Ops, 2, dl); 2839} 2840 2841SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 2842 SelectionDAG &DAG) const { 2843 // The rounding mode is in bits 23:22 of the FPSCR. 2844 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 2845 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 2846 // so that the shift + and get folded into a bitfield extract. 2847 DebugLoc dl = Op.getDebugLoc(); 2848 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 2849 DAG.getConstant(Intrinsic::arm_get_fpscr, 2850 MVT::i32)); 2851 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 2852 DAG.getConstant(1U << 22, MVT::i32)); 2853 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 2854 DAG.getConstant(22, MVT::i32)); 2855 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 2856 DAG.getConstant(3, MVT::i32)); 2857} 2858 2859static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 2860 const ARMSubtarget *ST) { 2861 EVT VT = N->getValueType(0); 2862 DebugLoc dl = N->getDebugLoc(); 2863 2864 if (!ST->hasV6T2Ops()) 2865 return SDValue(); 2866 2867 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 2868 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 2869} 2870 2871static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 2872 const ARMSubtarget *ST) { 2873 EVT VT = N->getValueType(0); 2874 DebugLoc dl = N->getDebugLoc(); 2875 2876 // Lower vector shifts on NEON to use VSHL. 2877 if (VT.isVector()) { 2878 assert(ST->hasNEON() && "unexpected vector shift"); 2879 2880 // Left shifts translate directly to the vshiftu intrinsic. 2881 if (N->getOpcode() == ISD::SHL) 2882 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2883 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 2884 N->getOperand(0), N->getOperand(1)); 2885 2886 assert((N->getOpcode() == ISD::SRA || 2887 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 2888 2889 // NEON uses the same intrinsics for both left and right shifts. For 2890 // right shifts, the shift amounts are negative, so negate the vector of 2891 // shift amounts. 2892 EVT ShiftVT = N->getOperand(1).getValueType(); 2893 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 2894 getZeroVector(ShiftVT, DAG, dl), 2895 N->getOperand(1)); 2896 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 2897 Intrinsic::arm_neon_vshifts : 2898 Intrinsic::arm_neon_vshiftu); 2899 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2900 DAG.getConstant(vshiftInt, MVT::i32), 2901 N->getOperand(0), NegatedCount); 2902 } 2903 2904 // We can get here for a node like i32 = ISD::SHL i32, i64 2905 if (VT != MVT::i64) 2906 return SDValue(); 2907 2908 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 2909 "Unknown shift to lower!"); 2910 2911 // We only lower SRA, SRL of 1 here, all others use generic lowering. 2912 if (!isa<ConstantSDNode>(N->getOperand(1)) || 2913 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 2914 return SDValue(); 2915 2916 // If we are in thumb mode, we don't have RRX. 2917 if (ST->isThumb1Only()) return SDValue(); 2918 2919 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 2920 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2921 DAG.getConstant(0, MVT::i32)); 2922 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2923 DAG.getConstant(1, MVT::i32)); 2924 2925 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 2926 // captures the result into a carry flag. 2927 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 2928 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1); 2929 2930 // The low part is an ARMISD::RRX operand, which shifts the carry in. 2931 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 2932 2933 // Merge the pieces into a single i64 value. 2934 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 2935} 2936 2937static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 2938 SDValue TmpOp0, TmpOp1; 2939 bool Invert = false; 2940 bool Swap = false; 2941 unsigned Opc = 0; 2942 2943 SDValue Op0 = Op.getOperand(0); 2944 SDValue Op1 = Op.getOperand(1); 2945 SDValue CC = Op.getOperand(2); 2946 EVT VT = Op.getValueType(); 2947 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 2948 DebugLoc dl = Op.getDebugLoc(); 2949 2950 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 2951 switch (SetCCOpcode) { 2952 default: llvm_unreachable("Illegal FP comparison"); break; 2953 case ISD::SETUNE: 2954 case ISD::SETNE: Invert = true; // Fallthrough 2955 case ISD::SETOEQ: 2956 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 2957 case ISD::SETOLT: 2958 case ISD::SETLT: Swap = true; // Fallthrough 2959 case ISD::SETOGT: 2960 case ISD::SETGT: Opc = ARMISD::VCGT; break; 2961 case ISD::SETOLE: 2962 case ISD::SETLE: Swap = true; // Fallthrough 2963 case ISD::SETOGE: 2964 case ISD::SETGE: Opc = ARMISD::VCGE; break; 2965 case ISD::SETUGE: Swap = true; // Fallthrough 2966 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 2967 case ISD::SETUGT: Swap = true; // Fallthrough 2968 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 2969 case ISD::SETUEQ: Invert = true; // Fallthrough 2970 case ISD::SETONE: 2971 // Expand this to (OLT | OGT). 2972 TmpOp0 = Op0; 2973 TmpOp1 = Op1; 2974 Opc = ISD::OR; 2975 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2976 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 2977 break; 2978 case ISD::SETUO: Invert = true; // Fallthrough 2979 case ISD::SETO: 2980 // Expand this to (OLT | OGE). 2981 TmpOp0 = Op0; 2982 TmpOp1 = Op1; 2983 Opc = ISD::OR; 2984 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2985 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 2986 break; 2987 } 2988 } else { 2989 // Integer comparisons. 2990 switch (SetCCOpcode) { 2991 default: llvm_unreachable("Illegal integer comparison"); break; 2992 case ISD::SETNE: Invert = true; 2993 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 2994 case ISD::SETLT: Swap = true; 2995 case ISD::SETGT: Opc = ARMISD::VCGT; break; 2996 case ISD::SETLE: Swap = true; 2997 case ISD::SETGE: Opc = ARMISD::VCGE; break; 2998 case ISD::SETULT: Swap = true; 2999 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3000 case ISD::SETULE: Swap = true; 3001 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3002 } 3003 3004 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3005 if (Opc == ARMISD::VCEQ) { 3006 3007 SDValue AndOp; 3008 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3009 AndOp = Op0; 3010 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3011 AndOp = Op1; 3012 3013 // Ignore bitconvert. 3014 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT) 3015 AndOp = AndOp.getOperand(0); 3016 3017 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3018 Opc = ARMISD::VTST; 3019 Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0)); 3020 Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1)); 3021 Invert = !Invert; 3022 } 3023 } 3024 } 3025 3026 if (Swap) 3027 std::swap(Op0, Op1); 3028 3029 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3030 3031 if (Invert) 3032 Result = DAG.getNOT(dl, Result, VT); 3033 3034 return Result; 3035} 3036 3037/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3038/// valid vector constant for a NEON instruction with a "modified immediate" 3039/// operand (e.g., VMOV). If so, return the encoded value. 3040static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3041 unsigned SplatBitSize, SelectionDAG &DAG, 3042 EVT &VT, bool is128Bits, bool isVMOV) { 3043 unsigned OpCmode, Imm; 3044 3045 // SplatBitSize is set to the smallest size that splats the vector, so a 3046 // zero vector will always have SplatBitSize == 8. However, NEON modified 3047 // immediate instructions others than VMOV do not support the 8-bit encoding 3048 // of a zero vector, and the default encoding of zero is supposed to be the 3049 // 32-bit version. 3050 if (SplatBits == 0) 3051 SplatBitSize = 32; 3052 3053 switch (SplatBitSize) { 3054 case 8: 3055 if (!isVMOV) 3056 return SDValue(); 3057 // Any 1-byte value is OK. Op=0, Cmode=1110. 3058 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3059 OpCmode = 0xe; 3060 Imm = SplatBits; 3061 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3062 break; 3063 3064 case 16: 3065 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3066 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3067 if ((SplatBits & ~0xff) == 0) { 3068 // Value = 0x00nn: Op=x, Cmode=100x. 3069 OpCmode = 0x8; 3070 Imm = SplatBits; 3071 break; 3072 } 3073 if ((SplatBits & ~0xff00) == 0) { 3074 // Value = 0xnn00: Op=x, Cmode=101x. 3075 OpCmode = 0xa; 3076 Imm = SplatBits >> 8; 3077 break; 3078 } 3079 return SDValue(); 3080 3081 case 32: 3082 // NEON's 32-bit VMOV supports splat values where: 3083 // * only one byte is nonzero, or 3084 // * the least significant byte is 0xff and the second byte is nonzero, or 3085 // * the least significant 2 bytes are 0xff and the third is nonzero. 3086 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3087 if ((SplatBits & ~0xff) == 0) { 3088 // Value = 0x000000nn: Op=x, Cmode=000x. 3089 OpCmode = 0; 3090 Imm = SplatBits; 3091 break; 3092 } 3093 if ((SplatBits & ~0xff00) == 0) { 3094 // Value = 0x0000nn00: Op=x, Cmode=001x. 3095 OpCmode = 0x2; 3096 Imm = SplatBits >> 8; 3097 break; 3098 } 3099 if ((SplatBits & ~0xff0000) == 0) { 3100 // Value = 0x00nn0000: Op=x, Cmode=010x. 3101 OpCmode = 0x4; 3102 Imm = SplatBits >> 16; 3103 break; 3104 } 3105 if ((SplatBits & ~0xff000000) == 0) { 3106 // Value = 0xnn000000: Op=x, Cmode=011x. 3107 OpCmode = 0x6; 3108 Imm = SplatBits >> 24; 3109 break; 3110 } 3111 3112 if ((SplatBits & ~0xffff) == 0 && 3113 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3114 // Value = 0x0000nnff: Op=x, Cmode=1100. 3115 OpCmode = 0xc; 3116 Imm = SplatBits >> 8; 3117 SplatBits |= 0xff; 3118 break; 3119 } 3120 3121 if ((SplatBits & ~0xffffff) == 0 && 3122 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3123 // Value = 0x00nnffff: Op=x, Cmode=1101. 3124 OpCmode = 0xd; 3125 Imm = SplatBits >> 16; 3126 SplatBits |= 0xffff; 3127 break; 3128 } 3129 3130 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3131 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3132 // VMOV.I32. A (very) minor optimization would be to replicate the value 3133 // and fall through here to test for a valid 64-bit splat. But, then the 3134 // caller would also need to check and handle the change in size. 3135 return SDValue(); 3136 3137 case 64: { 3138 if (!isVMOV) 3139 return SDValue(); 3140 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3141 uint64_t BitMask = 0xff; 3142 uint64_t Val = 0; 3143 unsigned ImmMask = 1; 3144 Imm = 0; 3145 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3146 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3147 Val |= BitMask; 3148 Imm |= ImmMask; 3149 } else if ((SplatBits & BitMask) != 0) { 3150 return SDValue(); 3151 } 3152 BitMask <<= 8; 3153 ImmMask <<= 1; 3154 } 3155 // Op=1, Cmode=1110. 3156 OpCmode = 0x1e; 3157 SplatBits = Val; 3158 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3159 break; 3160 } 3161 3162 default: 3163 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3164 return SDValue(); 3165 } 3166 3167 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3168 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3169} 3170 3171static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3172 bool &ReverseVEXT, unsigned &Imm) { 3173 unsigned NumElts = VT.getVectorNumElements(); 3174 ReverseVEXT = false; 3175 3176 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3177 if (M[0] < 0) 3178 return false; 3179 3180 Imm = M[0]; 3181 3182 // If this is a VEXT shuffle, the immediate value is the index of the first 3183 // element. The other shuffle indices must be the successive elements after 3184 // the first one. 3185 unsigned ExpectedElt = Imm; 3186 for (unsigned i = 1; i < NumElts; ++i) { 3187 // Increment the expected index. If it wraps around, it may still be 3188 // a VEXT but the source vectors must be swapped. 3189 ExpectedElt += 1; 3190 if (ExpectedElt == NumElts * 2) { 3191 ExpectedElt = 0; 3192 ReverseVEXT = true; 3193 } 3194 3195 if (M[i] < 0) continue; // ignore UNDEF indices 3196 if (ExpectedElt != static_cast<unsigned>(M[i])) 3197 return false; 3198 } 3199 3200 // Adjust the index value if the source operands will be swapped. 3201 if (ReverseVEXT) 3202 Imm -= NumElts; 3203 3204 return true; 3205} 3206 3207/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3208/// instruction with the specified blocksize. (The order of the elements 3209/// within each block of the vector is reversed.) 3210static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3211 unsigned BlockSize) { 3212 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3213 "Only possible block sizes for VREV are: 16, 32, 64"); 3214 3215 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3216 if (EltSz == 64) 3217 return false; 3218 3219 unsigned NumElts = VT.getVectorNumElements(); 3220 unsigned BlockElts = M[0] + 1; 3221 // If the first shuffle index is UNDEF, be optimistic. 3222 if (M[0] < 0) 3223 BlockElts = BlockSize / EltSz; 3224 3225 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3226 return false; 3227 3228 for (unsigned i = 0; i < NumElts; ++i) { 3229 if (M[i] < 0) continue; // ignore UNDEF indices 3230 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3231 return false; 3232 } 3233 3234 return true; 3235} 3236 3237static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3238 unsigned &WhichResult) { 3239 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3240 if (EltSz == 64) 3241 return false; 3242 3243 unsigned NumElts = VT.getVectorNumElements(); 3244 WhichResult = (M[0] == 0 ? 0 : 1); 3245 for (unsigned i = 0; i < NumElts; i += 2) { 3246 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3247 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3248 return false; 3249 } 3250 return true; 3251} 3252 3253/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3254/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3255/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3256static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3257 unsigned &WhichResult) { 3258 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3259 if (EltSz == 64) 3260 return false; 3261 3262 unsigned NumElts = VT.getVectorNumElements(); 3263 WhichResult = (M[0] == 0 ? 0 : 1); 3264 for (unsigned i = 0; i < NumElts; i += 2) { 3265 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3266 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3267 return false; 3268 } 3269 return true; 3270} 3271 3272static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3273 unsigned &WhichResult) { 3274 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3275 if (EltSz == 64) 3276 return false; 3277 3278 unsigned NumElts = VT.getVectorNumElements(); 3279 WhichResult = (M[0] == 0 ? 0 : 1); 3280 for (unsigned i = 0; i != NumElts; ++i) { 3281 if (M[i] < 0) continue; // ignore UNDEF indices 3282 if ((unsigned) M[i] != 2 * i + WhichResult) 3283 return false; 3284 } 3285 3286 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3287 if (VT.is64BitVector() && EltSz == 32) 3288 return false; 3289 3290 return true; 3291} 3292 3293/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3294/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3295/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3296static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3297 unsigned &WhichResult) { 3298 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3299 if (EltSz == 64) 3300 return false; 3301 3302 unsigned Half = VT.getVectorNumElements() / 2; 3303 WhichResult = (M[0] == 0 ? 0 : 1); 3304 for (unsigned j = 0; j != 2; ++j) { 3305 unsigned Idx = WhichResult; 3306 for (unsigned i = 0; i != Half; ++i) { 3307 int MIdx = M[i + j * Half]; 3308 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3309 return false; 3310 Idx += 2; 3311 } 3312 } 3313 3314 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3315 if (VT.is64BitVector() && EltSz == 32) 3316 return false; 3317 3318 return true; 3319} 3320 3321static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3322 unsigned &WhichResult) { 3323 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3324 if (EltSz == 64) 3325 return false; 3326 3327 unsigned NumElts = VT.getVectorNumElements(); 3328 WhichResult = (M[0] == 0 ? 0 : 1); 3329 unsigned Idx = WhichResult * NumElts / 2; 3330 for (unsigned i = 0; i != NumElts; i += 2) { 3331 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3332 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3333 return false; 3334 Idx += 1; 3335 } 3336 3337 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3338 if (VT.is64BitVector() && EltSz == 32) 3339 return false; 3340 3341 return true; 3342} 3343 3344/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3345/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3346/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3347static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3348 unsigned &WhichResult) { 3349 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3350 if (EltSz == 64) 3351 return false; 3352 3353 unsigned NumElts = VT.getVectorNumElements(); 3354 WhichResult = (M[0] == 0 ? 0 : 1); 3355 unsigned Idx = WhichResult * NumElts / 2; 3356 for (unsigned i = 0; i != NumElts; i += 2) { 3357 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3358 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3359 return false; 3360 Idx += 1; 3361 } 3362 3363 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3364 if (VT.is64BitVector() && EltSz == 32) 3365 return false; 3366 3367 return true; 3368} 3369 3370// If N is an integer constant that can be moved into a register in one 3371// instruction, return an SDValue of such a constant (will become a MOV 3372// instruction). Otherwise return null. 3373static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3374 const ARMSubtarget *ST, DebugLoc dl) { 3375 uint64_t Val; 3376 if (!isa<ConstantSDNode>(N)) 3377 return SDValue(); 3378 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3379 3380 if (ST->isThumb1Only()) { 3381 if (Val <= 255 || ~Val <= 255) 3382 return DAG.getConstant(Val, MVT::i32); 3383 } else { 3384 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3385 return DAG.getConstant(Val, MVT::i32); 3386 } 3387 return SDValue(); 3388} 3389 3390// If this is a case we can't handle, return null and let the default 3391// expansion code take care of it. 3392static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3393 const ARMSubtarget *ST) { 3394 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3395 DebugLoc dl = Op.getDebugLoc(); 3396 EVT VT = Op.getValueType(); 3397 3398 APInt SplatBits, SplatUndef; 3399 unsigned SplatBitSize; 3400 bool HasAnyUndefs; 3401 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3402 if (SplatBitSize <= 64) { 3403 // Check if an immediate VMOV works. 3404 EVT VmovVT; 3405 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3406 SplatUndef.getZExtValue(), SplatBitSize, 3407 DAG, VmovVT, VT.is128BitVector(), true); 3408 if (Val.getNode()) { 3409 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3410 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 3411 } 3412 3413 // Try an immediate VMVN. 3414 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3415 ((1LL << SplatBitSize) - 1)); 3416 Val = isNEONModifiedImm(NegatedImm, 3417 SplatUndef.getZExtValue(), SplatBitSize, 3418 DAG, VmovVT, VT.is128BitVector(), false); 3419 if (Val.getNode()) { 3420 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3421 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); 3422 } 3423 } 3424 } 3425 3426 // Scan through the operands to see if only one value is used. 3427 unsigned NumElts = VT.getVectorNumElements(); 3428 bool isOnlyLowElement = true; 3429 bool usesOnlyOneValue = true; 3430 bool isConstant = true; 3431 SDValue Value; 3432 for (unsigned i = 0; i < NumElts; ++i) { 3433 SDValue V = Op.getOperand(i); 3434 if (V.getOpcode() == ISD::UNDEF) 3435 continue; 3436 if (i > 0) 3437 isOnlyLowElement = false; 3438 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3439 isConstant = false; 3440 3441 if (!Value.getNode()) 3442 Value = V; 3443 else if (V != Value) 3444 usesOnlyOneValue = false; 3445 } 3446 3447 if (!Value.getNode()) 3448 return DAG.getUNDEF(VT); 3449 3450 if (isOnlyLowElement) 3451 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3452 3453 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3454 3455 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3456 // i32 and try again. 3457 if (usesOnlyOneValue && EltSize <= 32) { 3458 if (!isConstant) 3459 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3460 if (VT.getVectorElementType().isFloatingPoint()) { 3461 SmallVector<SDValue, 8> Ops; 3462 for (unsigned i = 0; i < NumElts; ++i) 3463 Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, 3464 Op.getOperand(i))); 3465 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &Ops[0], 3466 NumElts); 3467 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3468 if (Val.getNode()) 3469 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3470 } 3471 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3472 if (Val.getNode()) 3473 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3474 } 3475 3476 // If all elements are constants and the case above didn't get hit, fall back 3477 // to the default expansion, which will generate a load from the constant 3478 // pool. 3479 if (isConstant) 3480 return SDValue(); 3481 3482 // Vectors with 32- or 64-bit elements can be built by directly assigning 3483 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3484 // will be legalized. 3485 if (EltSize >= 32) { 3486 // Do the expansion with floating-point types, since that is what the VFP 3487 // registers are defined to use, and since i64 is not legal. 3488 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3489 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3490 SmallVector<SDValue, 8> Ops; 3491 for (unsigned i = 0; i < NumElts; ++i) 3492 Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i))); 3493 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3494 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3495 } 3496 3497 return SDValue(); 3498} 3499 3500/// isShuffleMaskLegal - Targets can use this to indicate that they only 3501/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 3502/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 3503/// are assumed to be legal. 3504bool 3505ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 3506 EVT VT) const { 3507 if (VT.getVectorNumElements() == 4 && 3508 (VT.is128BitVector() || VT.is64BitVector())) { 3509 unsigned PFIndexes[4]; 3510 for (unsigned i = 0; i != 4; ++i) { 3511 if (M[i] < 0) 3512 PFIndexes[i] = 8; 3513 else 3514 PFIndexes[i] = M[i]; 3515 } 3516 3517 // Compute the index in the perfect shuffle table. 3518 unsigned PFTableIndex = 3519 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3520 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3521 unsigned Cost = (PFEntry >> 30); 3522 3523 if (Cost <= 4) 3524 return true; 3525 } 3526 3527 bool ReverseVEXT; 3528 unsigned Imm, WhichResult; 3529 3530 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3531 return (EltSize >= 32 || 3532 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 3533 isVREVMask(M, VT, 64) || 3534 isVREVMask(M, VT, 32) || 3535 isVREVMask(M, VT, 16) || 3536 isVEXTMask(M, VT, ReverseVEXT, Imm) || 3537 isVTRNMask(M, VT, WhichResult) || 3538 isVUZPMask(M, VT, WhichResult) || 3539 isVZIPMask(M, VT, WhichResult) || 3540 isVTRN_v_undef_Mask(M, VT, WhichResult) || 3541 isVUZP_v_undef_Mask(M, VT, WhichResult) || 3542 isVZIP_v_undef_Mask(M, VT, WhichResult)); 3543} 3544 3545/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3546/// the specified operations to build the shuffle. 3547static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3548 SDValue RHS, SelectionDAG &DAG, 3549 DebugLoc dl) { 3550 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3551 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3552 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3553 3554 enum { 3555 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3556 OP_VREV, 3557 OP_VDUP0, 3558 OP_VDUP1, 3559 OP_VDUP2, 3560 OP_VDUP3, 3561 OP_VEXT1, 3562 OP_VEXT2, 3563 OP_VEXT3, 3564 OP_VUZPL, // VUZP, left result 3565 OP_VUZPR, // VUZP, right result 3566 OP_VZIPL, // VZIP, left result 3567 OP_VZIPR, // VZIP, right result 3568 OP_VTRNL, // VTRN, left result 3569 OP_VTRNR // VTRN, right result 3570 }; 3571 3572 if (OpNum == OP_COPY) { 3573 if (LHSID == (1*9+2)*9+3) return LHS; 3574 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3575 return RHS; 3576 } 3577 3578 SDValue OpLHS, OpRHS; 3579 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 3580 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 3581 EVT VT = OpLHS.getValueType(); 3582 3583 switch (OpNum) { 3584 default: llvm_unreachable("Unknown shuffle opcode!"); 3585 case OP_VREV: 3586 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 3587 case OP_VDUP0: 3588 case OP_VDUP1: 3589 case OP_VDUP2: 3590 case OP_VDUP3: 3591 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 3592 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 3593 case OP_VEXT1: 3594 case OP_VEXT2: 3595 case OP_VEXT3: 3596 return DAG.getNode(ARMISD::VEXT, dl, VT, 3597 OpLHS, OpRHS, 3598 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 3599 case OP_VUZPL: 3600 case OP_VUZPR: 3601 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3602 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 3603 case OP_VZIPL: 3604 case OP_VZIPR: 3605 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3606 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 3607 case OP_VTRNL: 3608 case OP_VTRNR: 3609 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3610 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 3611 } 3612} 3613 3614static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 3615 SDValue V1 = Op.getOperand(0); 3616 SDValue V2 = Op.getOperand(1); 3617 DebugLoc dl = Op.getDebugLoc(); 3618 EVT VT = Op.getValueType(); 3619 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 3620 SmallVector<int, 8> ShuffleMask; 3621 3622 // Convert shuffles that are directly supported on NEON to target-specific 3623 // DAG nodes, instead of keeping them as shuffles and matching them again 3624 // during code selection. This is more efficient and avoids the possibility 3625 // of inconsistencies between legalization and selection. 3626 // FIXME: floating-point vectors should be canonicalized to integer vectors 3627 // of the same time so that they get CSEd properly. 3628 SVN->getMask(ShuffleMask); 3629 3630 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3631 if (EltSize <= 32) { 3632 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 3633 int Lane = SVN->getSplatIndex(); 3634 // If this is undef splat, generate it via "just" vdup, if possible. 3635 if (Lane == -1) Lane = 0; 3636 3637 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 3638 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 3639 } 3640 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 3641 DAG.getConstant(Lane, MVT::i32)); 3642 } 3643 3644 bool ReverseVEXT; 3645 unsigned Imm; 3646 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 3647 if (ReverseVEXT) 3648 std::swap(V1, V2); 3649 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 3650 DAG.getConstant(Imm, MVT::i32)); 3651 } 3652 3653 if (isVREVMask(ShuffleMask, VT, 64)) 3654 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 3655 if (isVREVMask(ShuffleMask, VT, 32)) 3656 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 3657 if (isVREVMask(ShuffleMask, VT, 16)) 3658 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 3659 3660 // Check for Neon shuffles that modify both input vectors in place. 3661 // If both results are used, i.e., if there are two shuffles with the same 3662 // source operands and with masks corresponding to both results of one of 3663 // these operations, DAG memoization will ensure that a single node is 3664 // used for both shuffles. 3665 unsigned WhichResult; 3666 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 3667 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3668 V1, V2).getValue(WhichResult); 3669 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 3670 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3671 V1, V2).getValue(WhichResult); 3672 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 3673 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3674 V1, V2).getValue(WhichResult); 3675 3676 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3677 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3678 V1, V1).getValue(WhichResult); 3679 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3680 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3681 V1, V1).getValue(WhichResult); 3682 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3683 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3684 V1, V1).getValue(WhichResult); 3685 } 3686 3687 // If the shuffle is not directly supported and it has 4 elements, use 3688 // the PerfectShuffle-generated table to synthesize it from other shuffles. 3689 unsigned NumElts = VT.getVectorNumElements(); 3690 if (NumElts == 4) { 3691 unsigned PFIndexes[4]; 3692 for (unsigned i = 0; i != 4; ++i) { 3693 if (ShuffleMask[i] < 0) 3694 PFIndexes[i] = 8; 3695 else 3696 PFIndexes[i] = ShuffleMask[i]; 3697 } 3698 3699 // Compute the index in the perfect shuffle table. 3700 unsigned PFTableIndex = 3701 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3702 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3703 unsigned Cost = (PFEntry >> 30); 3704 3705 if (Cost <= 4) 3706 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 3707 } 3708 3709 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 3710 if (EltSize >= 32) { 3711 // Do the expansion with floating-point types, since that is what the VFP 3712 // registers are defined to use, and since i64 is not legal. 3713 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3714 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3715 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1); 3716 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2); 3717 SmallVector<SDValue, 8> Ops; 3718 for (unsigned i = 0; i < NumElts; ++i) { 3719 if (ShuffleMask[i] < 0) 3720 Ops.push_back(DAG.getUNDEF(EltVT)); 3721 else 3722 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3723 ShuffleMask[i] < (int)NumElts ? V1 : V2, 3724 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 3725 MVT::i32))); 3726 } 3727 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3728 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3729 } 3730 3731 return SDValue(); 3732} 3733 3734static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 3735 EVT VT = Op.getValueType(); 3736 DebugLoc dl = Op.getDebugLoc(); 3737 SDValue Vec = Op.getOperand(0); 3738 SDValue Lane = Op.getOperand(1); 3739 assert(VT == MVT::i32 && 3740 Vec.getValueType().getVectorElementType().getSizeInBits() < 32 && 3741 "unexpected type for custom-lowering vector extract"); 3742 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 3743} 3744 3745static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 3746 // The only time a CONCAT_VECTORS operation can have legal types is when 3747 // two 64-bit vectors are concatenated to a 128-bit vector. 3748 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 3749 "unexpected CONCAT_VECTORS"); 3750 DebugLoc dl = Op.getDebugLoc(); 3751 SDValue Val = DAG.getUNDEF(MVT::v2f64); 3752 SDValue Op0 = Op.getOperand(0); 3753 SDValue Op1 = Op.getOperand(1); 3754 if (Op0.getOpcode() != ISD::UNDEF) 3755 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3756 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0), 3757 DAG.getIntPtrConstant(0)); 3758 if (Op1.getOpcode() != ISD::UNDEF) 3759 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3760 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1), 3761 DAG.getIntPtrConstant(1)); 3762 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val); 3763} 3764 3765/// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or 3766/// an extending load, return the unextended value. 3767static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 3768 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 3769 return N->getOperand(0); 3770 LoadSDNode *LD = cast<LoadSDNode>(N); 3771 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 3772 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 3773 LD->isNonTemporal(), LD->getAlignment()); 3774} 3775 3776static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 3777 // Multiplications are only custom-lowered for 128-bit vectors so that 3778 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 3779 EVT VT = Op.getValueType(); 3780 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 3781 SDNode *N0 = Op.getOperand(0).getNode(); 3782 SDNode *N1 = Op.getOperand(1).getNode(); 3783 unsigned NewOpc = 0; 3784 if ((N0->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N0)) && 3785 (N1->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N1))) { 3786 NewOpc = ARMISD::VMULLs; 3787 } else if ((N0->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N0)) && 3788 (N1->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N1))) { 3789 NewOpc = ARMISD::VMULLu; 3790 } else if (VT.getSimpleVT().SimpleTy == MVT::v2i64) { 3791 // Fall through to expand this. It is not legal. 3792 return SDValue(); 3793 } else { 3794 // Other vector multiplications are legal. 3795 return Op; 3796 } 3797 3798 // Legalize to a VMULL instruction. 3799 DebugLoc DL = Op.getDebugLoc(); 3800 SDValue Op0 = SkipExtension(N0, DAG); 3801 SDValue Op1 = SkipExtension(N1, DAG); 3802 3803 assert(Op0.getValueType().is64BitVector() && 3804 Op1.getValueType().is64BitVector() && 3805 "unexpected types for extended operands to VMULL"); 3806 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 3807} 3808 3809SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3810 switch (Op.getOpcode()) { 3811 default: llvm_unreachable("Don't know how to custom lower this!"); 3812 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3813 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 3814 case ISD::GlobalAddress: 3815 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 3816 LowerGlobalAddressELF(Op, DAG); 3817 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3818 case ISD::SELECT: return LowerSELECT(Op, DAG); 3819 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 3820 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 3821 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 3822 case ISD::VASTART: return LowerVASTART(Op, DAG); 3823 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 3824 case ISD::SINT_TO_FP: 3825 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 3826 case ISD::FP_TO_SINT: 3827 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 3828 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 3829 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3830 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3831 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 3832 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 3833 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 3834 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 3835 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 3836 Subtarget); 3837 case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG); 3838 case ISD::SHL: 3839 case ISD::SRL: 3840 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 3841 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 3842 case ISD::SRL_PARTS: 3843 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 3844 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 3845 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 3846 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 3847 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3848 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 3849 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 3850 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 3851 case ISD::MUL: return LowerMUL(Op, DAG); 3852 } 3853 return SDValue(); 3854} 3855 3856/// ReplaceNodeResults - Replace the results of node with an illegal result 3857/// type with new values built out of custom code. 3858void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 3859 SmallVectorImpl<SDValue>&Results, 3860 SelectionDAG &DAG) const { 3861 SDValue Res; 3862 switch (N->getOpcode()) { 3863 default: 3864 llvm_unreachable("Don't know how to custom expand this!"); 3865 break; 3866 case ISD::BIT_CONVERT: 3867 Res = ExpandBIT_CONVERT(N, DAG); 3868 break; 3869 case ISD::SRL: 3870 case ISD::SRA: 3871 Res = LowerShift(N, DAG, Subtarget); 3872 break; 3873 } 3874 if (Res.getNode()) 3875 Results.push_back(Res); 3876} 3877 3878//===----------------------------------------------------------------------===// 3879// ARM Scheduler Hooks 3880//===----------------------------------------------------------------------===// 3881 3882MachineBasicBlock * 3883ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 3884 MachineBasicBlock *BB, 3885 unsigned Size) const { 3886 unsigned dest = MI->getOperand(0).getReg(); 3887 unsigned ptr = MI->getOperand(1).getReg(); 3888 unsigned oldval = MI->getOperand(2).getReg(); 3889 unsigned newval = MI->getOperand(3).getReg(); 3890 unsigned scratch = BB->getParent()->getRegInfo() 3891 .createVirtualRegister(ARM::GPRRegisterClass); 3892 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3893 DebugLoc dl = MI->getDebugLoc(); 3894 bool isThumb2 = Subtarget->isThumb2(); 3895 3896 unsigned ldrOpc, strOpc; 3897 switch (Size) { 3898 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 3899 case 1: 3900 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 3901 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB; 3902 break; 3903 case 2: 3904 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 3905 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 3906 break; 3907 case 4: 3908 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 3909 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 3910 break; 3911 } 3912 3913 MachineFunction *MF = BB->getParent(); 3914 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3915 MachineFunction::iterator It = BB; 3916 ++It; // insert the new blocks after the current block 3917 3918 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 3919 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 3920 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3921 MF->insert(It, loop1MBB); 3922 MF->insert(It, loop2MBB); 3923 MF->insert(It, exitMBB); 3924 3925 // Transfer the remainder of BB and its successor edges to exitMBB. 3926 exitMBB->splice(exitMBB->begin(), BB, 3927 llvm::next(MachineBasicBlock::iterator(MI)), 3928 BB->end()); 3929 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 3930 3931 // thisMBB: 3932 // ... 3933 // fallthrough --> loop1MBB 3934 BB->addSuccessor(loop1MBB); 3935 3936 // loop1MBB: 3937 // ldrex dest, [ptr] 3938 // cmp dest, oldval 3939 // bne exitMBB 3940 BB = loop1MBB; 3941 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 3942 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 3943 .addReg(dest).addReg(oldval)); 3944 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 3945 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 3946 BB->addSuccessor(loop2MBB); 3947 BB->addSuccessor(exitMBB); 3948 3949 // loop2MBB: 3950 // strex scratch, newval, [ptr] 3951 // cmp scratch, #0 3952 // bne loop1MBB 3953 BB = loop2MBB; 3954 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 3955 .addReg(ptr)); 3956 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 3957 .addReg(scratch).addImm(0)); 3958 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 3959 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 3960 BB->addSuccessor(loop1MBB); 3961 BB->addSuccessor(exitMBB); 3962 3963 // exitMBB: 3964 // ... 3965 BB = exitMBB; 3966 3967 MI->eraseFromParent(); // The instruction is gone now. 3968 3969 return BB; 3970} 3971 3972MachineBasicBlock * 3973ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 3974 unsigned Size, unsigned BinOpcode) const { 3975 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 3976 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3977 3978 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3979 MachineFunction *MF = BB->getParent(); 3980 MachineFunction::iterator It = BB; 3981 ++It; 3982 3983 unsigned dest = MI->getOperand(0).getReg(); 3984 unsigned ptr = MI->getOperand(1).getReg(); 3985 unsigned incr = MI->getOperand(2).getReg(); 3986 DebugLoc dl = MI->getDebugLoc(); 3987 3988 bool isThumb2 = Subtarget->isThumb2(); 3989 unsigned ldrOpc, strOpc; 3990 switch (Size) { 3991 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 3992 case 1: 3993 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 3994 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 3995 break; 3996 case 2: 3997 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 3998 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 3999 break; 4000 case 4: 4001 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4002 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4003 break; 4004 } 4005 4006 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4007 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4008 MF->insert(It, loopMBB); 4009 MF->insert(It, exitMBB); 4010 4011 // Transfer the remainder of BB and its successor edges to exitMBB. 4012 exitMBB->splice(exitMBB->begin(), BB, 4013 llvm::next(MachineBasicBlock::iterator(MI)), 4014 BB->end()); 4015 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4016 4017 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4018 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4019 unsigned scratch2 = (!BinOpcode) ? incr : 4020 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4021 4022 // thisMBB: 4023 // ... 4024 // fallthrough --> loopMBB 4025 BB->addSuccessor(loopMBB); 4026 4027 // loopMBB: 4028 // ldrex dest, ptr 4029 // <binop> scratch2, dest, incr 4030 // strex scratch, scratch2, ptr 4031 // cmp scratch, #0 4032 // bne- loopMBB 4033 // fallthrough --> exitMBB 4034 BB = loopMBB; 4035 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4036 if (BinOpcode) { 4037 // operand order needs to go the other way for NAND 4038 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 4039 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4040 addReg(incr).addReg(dest)).addReg(0); 4041 else 4042 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4043 addReg(dest).addReg(incr)).addReg(0); 4044 } 4045 4046 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 4047 .addReg(ptr)); 4048 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4049 .addReg(scratch).addImm(0)); 4050 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4051 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4052 4053 BB->addSuccessor(loopMBB); 4054 BB->addSuccessor(exitMBB); 4055 4056 // exitMBB: 4057 // ... 4058 BB = exitMBB; 4059 4060 MI->eraseFromParent(); // The instruction is gone now. 4061 4062 return BB; 4063} 4064 4065static 4066MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 4067 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 4068 E = MBB->succ_end(); I != E; ++I) 4069 if (*I != Succ) 4070 return *I; 4071 llvm_unreachable("Expecting a BB with two successors!"); 4072} 4073 4074MachineBasicBlock * 4075ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4076 MachineBasicBlock *BB) const { 4077 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4078 DebugLoc dl = MI->getDebugLoc(); 4079 bool isThumb2 = Subtarget->isThumb2(); 4080 switch (MI->getOpcode()) { 4081 default: 4082 MI->dump(); 4083 llvm_unreachable("Unexpected instr type to insert"); 4084 4085 case ARM::ATOMIC_LOAD_ADD_I8: 4086 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4087 case ARM::ATOMIC_LOAD_ADD_I16: 4088 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4089 case ARM::ATOMIC_LOAD_ADD_I32: 4090 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4091 4092 case ARM::ATOMIC_LOAD_AND_I8: 4093 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4094 case ARM::ATOMIC_LOAD_AND_I16: 4095 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4096 case ARM::ATOMIC_LOAD_AND_I32: 4097 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4098 4099 case ARM::ATOMIC_LOAD_OR_I8: 4100 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4101 case ARM::ATOMIC_LOAD_OR_I16: 4102 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4103 case ARM::ATOMIC_LOAD_OR_I32: 4104 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4105 4106 case ARM::ATOMIC_LOAD_XOR_I8: 4107 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4108 case ARM::ATOMIC_LOAD_XOR_I16: 4109 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4110 case ARM::ATOMIC_LOAD_XOR_I32: 4111 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 4112 4113 case ARM::ATOMIC_LOAD_NAND_I8: 4114 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4115 case ARM::ATOMIC_LOAD_NAND_I16: 4116 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4117 case ARM::ATOMIC_LOAD_NAND_I32: 4118 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 4119 4120 case ARM::ATOMIC_LOAD_SUB_I8: 4121 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4122 case ARM::ATOMIC_LOAD_SUB_I16: 4123 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4124 case ARM::ATOMIC_LOAD_SUB_I32: 4125 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 4126 4127 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 4128 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 4129 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 4130 4131 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 4132 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 4133 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 4134 4135 case ARM::tMOVCCr_pseudo: { 4136 // To "insert" a SELECT_CC instruction, we actually have to insert the 4137 // diamond control-flow pattern. The incoming instruction knows the 4138 // destination vreg to set, the condition code register to branch on, the 4139 // true/false values to select between, and a branch opcode to use. 4140 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4141 MachineFunction::iterator It = BB; 4142 ++It; 4143 4144 // thisMBB: 4145 // ... 4146 // TrueVal = ... 4147 // cmpTY ccX, r1, r2 4148 // bCC copy1MBB 4149 // fallthrough --> copy0MBB 4150 MachineBasicBlock *thisMBB = BB; 4151 MachineFunction *F = BB->getParent(); 4152 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4153 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4154 F->insert(It, copy0MBB); 4155 F->insert(It, sinkMBB); 4156 4157 // Transfer the remainder of BB and its successor edges to sinkMBB. 4158 sinkMBB->splice(sinkMBB->begin(), BB, 4159 llvm::next(MachineBasicBlock::iterator(MI)), 4160 BB->end()); 4161 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 4162 4163 BB->addSuccessor(copy0MBB); 4164 BB->addSuccessor(sinkMBB); 4165 4166 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 4167 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 4168 4169 // copy0MBB: 4170 // %FalseValue = ... 4171 // # fallthrough to sinkMBB 4172 BB = copy0MBB; 4173 4174 // Update machine-CFG edges 4175 BB->addSuccessor(sinkMBB); 4176 4177 // sinkMBB: 4178 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4179 // ... 4180 BB = sinkMBB; 4181 BuildMI(*BB, BB->begin(), dl, 4182 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 4183 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 4184 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4185 4186 MI->eraseFromParent(); // The pseudo instruction is gone now. 4187 return BB; 4188 } 4189 4190 case ARM::BCCi64: 4191 case ARM::BCCZi64: { 4192 // Compare both parts that make up the double comparison separately for 4193 // equality. 4194 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 4195 4196 unsigned LHS1 = MI->getOperand(1).getReg(); 4197 unsigned LHS2 = MI->getOperand(2).getReg(); 4198 if (RHSisZero) { 4199 AddDefaultPred(BuildMI(BB, dl, 4200 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4201 .addReg(LHS1).addImm(0)); 4202 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4203 .addReg(LHS2).addImm(0) 4204 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4205 } else { 4206 unsigned RHS1 = MI->getOperand(3).getReg(); 4207 unsigned RHS2 = MI->getOperand(4).getReg(); 4208 AddDefaultPred(BuildMI(BB, dl, 4209 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4210 .addReg(LHS1).addReg(RHS1)); 4211 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4212 .addReg(LHS2).addReg(RHS2) 4213 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 4214 } 4215 4216 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 4217 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 4218 if (MI->getOperand(0).getImm() == ARMCC::NE) 4219 std::swap(destMBB, exitMBB); 4220 4221 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4222 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 4223 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 4224 .addMBB(exitMBB); 4225 4226 MI->eraseFromParent(); // The pseudo instruction is gone now. 4227 return BB; 4228 } 4229 } 4230} 4231 4232//===----------------------------------------------------------------------===// 4233// ARM Optimization Hooks 4234//===----------------------------------------------------------------------===// 4235 4236static 4237SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 4238 TargetLowering::DAGCombinerInfo &DCI) { 4239 SelectionDAG &DAG = DCI.DAG; 4240 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4241 EVT VT = N->getValueType(0); 4242 unsigned Opc = N->getOpcode(); 4243 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 4244 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 4245 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 4246 ISD::CondCode CC = ISD::SETCC_INVALID; 4247 4248 if (isSlctCC) { 4249 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 4250 } else { 4251 SDValue CCOp = Slct.getOperand(0); 4252 if (CCOp.getOpcode() == ISD::SETCC) 4253 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 4254 } 4255 4256 bool DoXform = false; 4257 bool InvCC = false; 4258 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 4259 "Bad input!"); 4260 4261 if (LHS.getOpcode() == ISD::Constant && 4262 cast<ConstantSDNode>(LHS)->isNullValue()) { 4263 DoXform = true; 4264 } else if (CC != ISD::SETCC_INVALID && 4265 RHS.getOpcode() == ISD::Constant && 4266 cast<ConstantSDNode>(RHS)->isNullValue()) { 4267 std::swap(LHS, RHS); 4268 SDValue Op0 = Slct.getOperand(0); 4269 EVT OpVT = isSlctCC ? Op0.getValueType() : 4270 Op0.getOperand(0).getValueType(); 4271 bool isInt = OpVT.isInteger(); 4272 CC = ISD::getSetCCInverse(CC, isInt); 4273 4274 if (!TLI.isCondCodeLegal(CC, OpVT)) 4275 return SDValue(); // Inverse operator isn't legal. 4276 4277 DoXform = true; 4278 InvCC = true; 4279 } 4280 4281 if (DoXform) { 4282 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 4283 if (isSlctCC) 4284 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 4285 Slct.getOperand(0), Slct.getOperand(1), CC); 4286 SDValue CCOp = Slct.getOperand(0); 4287 if (InvCC) 4288 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 4289 CCOp.getOperand(0), CCOp.getOperand(1), CC); 4290 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4291 CCOp, OtherOp, Result); 4292 } 4293 return SDValue(); 4294} 4295 4296/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 4297/// operands N0 and N1. This is a helper for PerformADDCombine that is 4298/// called with the default operands, and if that fails, with commuted 4299/// operands. 4300static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 4301 TargetLowering::DAGCombinerInfo &DCI) { 4302 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 4303 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 4304 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 4305 if (Result.getNode()) return Result; 4306 } 4307 return SDValue(); 4308} 4309 4310/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 4311/// 4312static SDValue PerformADDCombine(SDNode *N, 4313 TargetLowering::DAGCombinerInfo &DCI) { 4314 SDValue N0 = N->getOperand(0); 4315 SDValue N1 = N->getOperand(1); 4316 4317 // First try with the default operand order. 4318 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 4319 if (Result.getNode()) 4320 return Result; 4321 4322 // If that didn't work, try again with the operands commuted. 4323 return PerformADDCombineWithOperands(N, N1, N0, DCI); 4324} 4325 4326/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 4327/// 4328static SDValue PerformSUBCombine(SDNode *N, 4329 TargetLowering::DAGCombinerInfo &DCI) { 4330 SDValue N0 = N->getOperand(0); 4331 SDValue N1 = N->getOperand(1); 4332 4333 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 4334 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 4335 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 4336 if (Result.getNode()) return Result; 4337 } 4338 4339 return SDValue(); 4340} 4341 4342static SDValue PerformMULCombine(SDNode *N, 4343 TargetLowering::DAGCombinerInfo &DCI, 4344 const ARMSubtarget *Subtarget) { 4345 SelectionDAG &DAG = DCI.DAG; 4346 4347 if (Subtarget->isThumb1Only()) 4348 return SDValue(); 4349 4350 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 4351 return SDValue(); 4352 4353 EVT VT = N->getValueType(0); 4354 if (VT != MVT::i32) 4355 return SDValue(); 4356 4357 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4358 if (!C) 4359 return SDValue(); 4360 4361 uint64_t MulAmt = C->getZExtValue(); 4362 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 4363 ShiftAmt = ShiftAmt & (32 - 1); 4364 SDValue V = N->getOperand(0); 4365 DebugLoc DL = N->getDebugLoc(); 4366 4367 SDValue Res; 4368 MulAmt >>= ShiftAmt; 4369 if (isPowerOf2_32(MulAmt - 1)) { 4370 // (mul x, 2^N + 1) => (add (shl x, N), x) 4371 Res = DAG.getNode(ISD::ADD, DL, VT, 4372 V, DAG.getNode(ISD::SHL, DL, VT, 4373 V, DAG.getConstant(Log2_32(MulAmt-1), 4374 MVT::i32))); 4375 } else if (isPowerOf2_32(MulAmt + 1)) { 4376 // (mul x, 2^N - 1) => (sub (shl x, N), x) 4377 Res = DAG.getNode(ISD::SUB, DL, VT, 4378 DAG.getNode(ISD::SHL, DL, VT, 4379 V, DAG.getConstant(Log2_32(MulAmt+1), 4380 MVT::i32)), 4381 V); 4382 } else 4383 return SDValue(); 4384 4385 if (ShiftAmt != 0) 4386 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 4387 DAG.getConstant(ShiftAmt, MVT::i32)); 4388 4389 // Do not add new nodes to DAG combiner worklist. 4390 DCI.CombineTo(N, Res, false); 4391 return SDValue(); 4392} 4393 4394/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 4395static SDValue PerformORCombine(SDNode *N, 4396 TargetLowering::DAGCombinerInfo &DCI, 4397 const ARMSubtarget *Subtarget) { 4398 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 4399 // reasonable. 4400 4401 // BFI is only available on V6T2+ 4402 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 4403 return SDValue(); 4404 4405 SelectionDAG &DAG = DCI.DAG; 4406 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 4407 DebugLoc DL = N->getDebugLoc(); 4408 // 1) or (and A, mask), val => ARMbfi A, val, mask 4409 // iff (val & mask) == val 4410 // 4411 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4412 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 4413 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4414 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 4415 // && CountPopulation_32(mask) == CountPopulation_32(~mask2) 4416 // (i.e., copy a bitfield value into another bitfield of the same width) 4417 if (N0.getOpcode() != ISD::AND) 4418 return SDValue(); 4419 4420 EVT VT = N->getValueType(0); 4421 if (VT != MVT::i32) 4422 return SDValue(); 4423 4424 4425 // The value and the mask need to be constants so we can verify this is 4426 // actually a bitfield set. If the mask is 0xffff, we can do better 4427 // via a movt instruction, so don't use BFI in that case. 4428 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 4429 if (!C) 4430 return SDValue(); 4431 unsigned Mask = C->getZExtValue(); 4432 if (Mask == 0xffff) 4433 return SDValue(); 4434 SDValue Res; 4435 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 4436 if ((C = dyn_cast<ConstantSDNode>(N1))) { 4437 unsigned Val = C->getZExtValue(); 4438 if (!ARM::isBitFieldInvertedMask(Mask) || (Val & ~Mask) != Val) 4439 return SDValue(); 4440 Val >>= CountTrailingZeros_32(~Mask); 4441 4442 Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0), 4443 DAG.getConstant(Val, MVT::i32), 4444 DAG.getConstant(Mask, MVT::i32)); 4445 4446 // Do not add new nodes to DAG combiner worklist. 4447 DCI.CombineTo(N, Res, false); 4448 } else if (N1.getOpcode() == ISD::AND) { 4449 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 4450 C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 4451 if (!C) 4452 return SDValue(); 4453 unsigned Mask2 = C->getZExtValue(); 4454 4455 if (ARM::isBitFieldInvertedMask(Mask) && 4456 ARM::isBitFieldInvertedMask(~Mask2) && 4457 (CountPopulation_32(Mask) == CountPopulation_32(~Mask2))) { 4458 // The pack halfword instruction works better for masks that fit it, 4459 // so use that when it's available. 4460 if (Subtarget->hasT2ExtractPack() && 4461 (Mask == 0xffff || Mask == 0xffff0000)) 4462 return SDValue(); 4463 // 2a 4464 unsigned lsb = CountTrailingZeros_32(Mask2); 4465 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 4466 DAG.getConstant(lsb, MVT::i32)); 4467 Res = DAG.getNode(ARMISD::BFI, DL, VT, N0.getOperand(0), Res, 4468 DAG.getConstant(Mask, MVT::i32)); 4469 // Do not add new nodes to DAG combiner worklist. 4470 DCI.CombineTo(N, Res, false); 4471 } else if (ARM::isBitFieldInvertedMask(~Mask) && 4472 ARM::isBitFieldInvertedMask(Mask2) && 4473 (CountPopulation_32(~Mask) == CountPopulation_32(Mask2))) { 4474 // The pack halfword instruction works better for masks that fit it, 4475 // so use that when it's available. 4476 if (Subtarget->hasT2ExtractPack() && 4477 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 4478 return SDValue(); 4479 // 2b 4480 unsigned lsb = CountTrailingZeros_32(Mask); 4481 Res = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), 4482 DAG.getConstant(lsb, MVT::i32)); 4483 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 4484 DAG.getConstant(Mask2, MVT::i32)); 4485 // Do not add new nodes to DAG combiner worklist. 4486 DCI.CombineTo(N, Res, false); 4487 } 4488 } 4489 4490 return SDValue(); 4491} 4492 4493/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 4494/// ARMISD::VMOVRRD. 4495static SDValue PerformVMOVRRDCombine(SDNode *N, 4496 TargetLowering::DAGCombinerInfo &DCI) { 4497 // vmovrrd(vmovdrr x, y) -> x,y 4498 SDValue InDouble = N->getOperand(0); 4499 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 4500 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 4501 return SDValue(); 4502} 4503 4504/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 4505/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 4506static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 4507 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 4508 SDValue Op0 = N->getOperand(0); 4509 SDValue Op1 = N->getOperand(1); 4510 if (Op0.getOpcode() == ISD::BIT_CONVERT) 4511 Op0 = Op0.getOperand(0); 4512 if (Op1.getOpcode() == ISD::BIT_CONVERT) 4513 Op1 = Op1.getOperand(0); 4514 if (Op0.getOpcode() == ARMISD::VMOVRRD && 4515 Op0.getNode() == Op1.getNode() && 4516 Op0.getResNo() == 0 && Op1.getResNo() == 1) 4517 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), 4518 N->getValueType(0), Op0.getOperand(0)); 4519 return SDValue(); 4520} 4521 4522/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 4523/// ISD::BUILD_VECTOR. 4524static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG) { 4525 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 4526 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 4527 // into a pair of GPRs, which is fine when the value is used as a scalar, 4528 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 4529 if (N->getNumOperands() == 2) 4530 return PerformVMOVDRRCombine(N, DAG); 4531 4532 return SDValue(); 4533} 4534 4535/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 4536/// ISD::VECTOR_SHUFFLE. 4537static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 4538 // The LLVM shufflevector instruction does not require the shuffle mask 4539 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 4540 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 4541 // operands do not match the mask length, they are extended by concatenating 4542 // them with undef vectors. That is probably the right thing for other 4543 // targets, but for NEON it is better to concatenate two double-register 4544 // size vector operands into a single quad-register size vector. Do that 4545 // transformation here: 4546 // shuffle(concat(v1, undef), concat(v2, undef)) -> 4547 // shuffle(concat(v1, v2), undef) 4548 SDValue Op0 = N->getOperand(0); 4549 SDValue Op1 = N->getOperand(1); 4550 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 4551 Op1.getOpcode() != ISD::CONCAT_VECTORS || 4552 Op0.getNumOperands() != 2 || 4553 Op1.getNumOperands() != 2) 4554 return SDValue(); 4555 SDValue Concat0Op1 = Op0.getOperand(1); 4556 SDValue Concat1Op1 = Op1.getOperand(1); 4557 if (Concat0Op1.getOpcode() != ISD::UNDEF || 4558 Concat1Op1.getOpcode() != ISD::UNDEF) 4559 return SDValue(); 4560 // Skip the transformation if any of the types are illegal. 4561 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4562 EVT VT = N->getValueType(0); 4563 if (!TLI.isTypeLegal(VT) || 4564 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 4565 !TLI.isTypeLegal(Concat1Op1.getValueType())) 4566 return SDValue(); 4567 4568 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 4569 Op0.getOperand(0), Op1.getOperand(0)); 4570 // Translate the shuffle mask. 4571 SmallVector<int, 16> NewMask; 4572 unsigned NumElts = VT.getVectorNumElements(); 4573 unsigned HalfElts = NumElts/2; 4574 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 4575 for (unsigned n = 0; n < NumElts; ++n) { 4576 int MaskElt = SVN->getMaskElt(n); 4577 int NewElt = -1; 4578 if (MaskElt < (int)HalfElts) 4579 NewElt = MaskElt; 4580 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 4581 NewElt = HalfElts + MaskElt - NumElts; 4582 NewMask.push_back(NewElt); 4583 } 4584 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 4585 DAG.getUNDEF(VT), NewMask.data()); 4586} 4587 4588/// PerformVDUPLANECombine - Target-specific dag combine xforms for 4589/// ARMISD::VDUPLANE. 4590static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) { 4591 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 4592 // redundant. 4593 SDValue Op = N->getOperand(0); 4594 EVT VT = N->getValueType(0); 4595 4596 // Ignore bit_converts. 4597 while (Op.getOpcode() == ISD::BIT_CONVERT) 4598 Op = Op.getOperand(0); 4599 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 4600 return SDValue(); 4601 4602 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 4603 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 4604 // The canonical VMOV for a zero vector uses a 32-bit element size. 4605 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4606 unsigned EltBits; 4607 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 4608 EltSize = 8; 4609 if (EltSize > VT.getVectorElementType().getSizeInBits()) 4610 return SDValue(); 4611 4612 return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op); 4613} 4614 4615/// getVShiftImm - Check if this is a valid build_vector for the immediate 4616/// operand of a vector shift operation, where all the elements of the 4617/// build_vector must have the same constant integer value. 4618static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 4619 // Ignore bit_converts. 4620 while (Op.getOpcode() == ISD::BIT_CONVERT) 4621 Op = Op.getOperand(0); 4622 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 4623 APInt SplatBits, SplatUndef; 4624 unsigned SplatBitSize; 4625 bool HasAnyUndefs; 4626 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 4627 HasAnyUndefs, ElementBits) || 4628 SplatBitSize > ElementBits) 4629 return false; 4630 Cnt = SplatBits.getSExtValue(); 4631 return true; 4632} 4633 4634/// isVShiftLImm - Check if this is a valid build_vector for the immediate 4635/// operand of a vector shift left operation. That value must be in the range: 4636/// 0 <= Value < ElementBits for a left shift; or 4637/// 0 <= Value <= ElementBits for a long left shift. 4638static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 4639 assert(VT.isVector() && "vector shift count is not a vector type"); 4640 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 4641 if (! getVShiftImm(Op, ElementBits, Cnt)) 4642 return false; 4643 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 4644} 4645 4646/// isVShiftRImm - Check if this is a valid build_vector for the immediate 4647/// operand of a vector shift right operation. For a shift opcode, the value 4648/// is positive, but for an intrinsic the value count must be negative. The 4649/// absolute value must be in the range: 4650/// 1 <= |Value| <= ElementBits for a right shift; or 4651/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 4652static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 4653 int64_t &Cnt) { 4654 assert(VT.isVector() && "vector shift count is not a vector type"); 4655 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 4656 if (! getVShiftImm(Op, ElementBits, Cnt)) 4657 return false; 4658 if (isIntrinsic) 4659 Cnt = -Cnt; 4660 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 4661} 4662 4663/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 4664static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 4665 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 4666 switch (IntNo) { 4667 default: 4668 // Don't do anything for most intrinsics. 4669 break; 4670 4671 // Vector shifts: check for immediate versions and lower them. 4672 // Note: This is done during DAG combining instead of DAG legalizing because 4673 // the build_vectors for 64-bit vector element shift counts are generally 4674 // not legal, and it is hard to see their values after they get legalized to 4675 // loads from a constant pool. 4676 case Intrinsic::arm_neon_vshifts: 4677 case Intrinsic::arm_neon_vshiftu: 4678 case Intrinsic::arm_neon_vshiftls: 4679 case Intrinsic::arm_neon_vshiftlu: 4680 case Intrinsic::arm_neon_vshiftn: 4681 case Intrinsic::arm_neon_vrshifts: 4682 case Intrinsic::arm_neon_vrshiftu: 4683 case Intrinsic::arm_neon_vrshiftn: 4684 case Intrinsic::arm_neon_vqshifts: 4685 case Intrinsic::arm_neon_vqshiftu: 4686 case Intrinsic::arm_neon_vqshiftsu: 4687 case Intrinsic::arm_neon_vqshiftns: 4688 case Intrinsic::arm_neon_vqshiftnu: 4689 case Intrinsic::arm_neon_vqshiftnsu: 4690 case Intrinsic::arm_neon_vqrshiftns: 4691 case Intrinsic::arm_neon_vqrshiftnu: 4692 case Intrinsic::arm_neon_vqrshiftnsu: { 4693 EVT VT = N->getOperand(1).getValueType(); 4694 int64_t Cnt; 4695 unsigned VShiftOpc = 0; 4696 4697 switch (IntNo) { 4698 case Intrinsic::arm_neon_vshifts: 4699 case Intrinsic::arm_neon_vshiftu: 4700 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 4701 VShiftOpc = ARMISD::VSHL; 4702 break; 4703 } 4704 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 4705 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 4706 ARMISD::VSHRs : ARMISD::VSHRu); 4707 break; 4708 } 4709 return SDValue(); 4710 4711 case Intrinsic::arm_neon_vshiftls: 4712 case Intrinsic::arm_neon_vshiftlu: 4713 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 4714 break; 4715 llvm_unreachable("invalid shift count for vshll intrinsic"); 4716 4717 case Intrinsic::arm_neon_vrshifts: 4718 case Intrinsic::arm_neon_vrshiftu: 4719 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 4720 break; 4721 return SDValue(); 4722 4723 case Intrinsic::arm_neon_vqshifts: 4724 case Intrinsic::arm_neon_vqshiftu: 4725 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 4726 break; 4727 return SDValue(); 4728 4729 case Intrinsic::arm_neon_vqshiftsu: 4730 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 4731 break; 4732 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 4733 4734 case Intrinsic::arm_neon_vshiftn: 4735 case Intrinsic::arm_neon_vrshiftn: 4736 case Intrinsic::arm_neon_vqshiftns: 4737 case Intrinsic::arm_neon_vqshiftnu: 4738 case Intrinsic::arm_neon_vqshiftnsu: 4739 case Intrinsic::arm_neon_vqrshiftns: 4740 case Intrinsic::arm_neon_vqrshiftnu: 4741 case Intrinsic::arm_neon_vqrshiftnsu: 4742 // Narrowing shifts require an immediate right shift. 4743 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 4744 break; 4745 llvm_unreachable("invalid shift count for narrowing vector shift " 4746 "intrinsic"); 4747 4748 default: 4749 llvm_unreachable("unhandled vector shift"); 4750 } 4751 4752 switch (IntNo) { 4753 case Intrinsic::arm_neon_vshifts: 4754 case Intrinsic::arm_neon_vshiftu: 4755 // Opcode already set above. 4756 break; 4757 case Intrinsic::arm_neon_vshiftls: 4758 case Intrinsic::arm_neon_vshiftlu: 4759 if (Cnt == VT.getVectorElementType().getSizeInBits()) 4760 VShiftOpc = ARMISD::VSHLLi; 4761 else 4762 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 4763 ARMISD::VSHLLs : ARMISD::VSHLLu); 4764 break; 4765 case Intrinsic::arm_neon_vshiftn: 4766 VShiftOpc = ARMISD::VSHRN; break; 4767 case Intrinsic::arm_neon_vrshifts: 4768 VShiftOpc = ARMISD::VRSHRs; break; 4769 case Intrinsic::arm_neon_vrshiftu: 4770 VShiftOpc = ARMISD::VRSHRu; break; 4771 case Intrinsic::arm_neon_vrshiftn: 4772 VShiftOpc = ARMISD::VRSHRN; break; 4773 case Intrinsic::arm_neon_vqshifts: 4774 VShiftOpc = ARMISD::VQSHLs; break; 4775 case Intrinsic::arm_neon_vqshiftu: 4776 VShiftOpc = ARMISD::VQSHLu; break; 4777 case Intrinsic::arm_neon_vqshiftsu: 4778 VShiftOpc = ARMISD::VQSHLsu; break; 4779 case Intrinsic::arm_neon_vqshiftns: 4780 VShiftOpc = ARMISD::VQSHRNs; break; 4781 case Intrinsic::arm_neon_vqshiftnu: 4782 VShiftOpc = ARMISD::VQSHRNu; break; 4783 case Intrinsic::arm_neon_vqshiftnsu: 4784 VShiftOpc = ARMISD::VQSHRNsu; break; 4785 case Intrinsic::arm_neon_vqrshiftns: 4786 VShiftOpc = ARMISD::VQRSHRNs; break; 4787 case Intrinsic::arm_neon_vqrshiftnu: 4788 VShiftOpc = ARMISD::VQRSHRNu; break; 4789 case Intrinsic::arm_neon_vqrshiftnsu: 4790 VShiftOpc = ARMISD::VQRSHRNsu; break; 4791 } 4792 4793 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 4794 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 4795 } 4796 4797 case Intrinsic::arm_neon_vshiftins: { 4798 EVT VT = N->getOperand(1).getValueType(); 4799 int64_t Cnt; 4800 unsigned VShiftOpc = 0; 4801 4802 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 4803 VShiftOpc = ARMISD::VSLI; 4804 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 4805 VShiftOpc = ARMISD::VSRI; 4806 else { 4807 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 4808 } 4809 4810 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 4811 N->getOperand(1), N->getOperand(2), 4812 DAG.getConstant(Cnt, MVT::i32)); 4813 } 4814 4815 case Intrinsic::arm_neon_vqrshifts: 4816 case Intrinsic::arm_neon_vqrshiftu: 4817 // No immediate versions of these to check for. 4818 break; 4819 } 4820 4821 return SDValue(); 4822} 4823 4824/// PerformShiftCombine - Checks for immediate versions of vector shifts and 4825/// lowers them. As with the vector shift intrinsics, this is done during DAG 4826/// combining instead of DAG legalizing because the build_vectors for 64-bit 4827/// vector element shift counts are generally not legal, and it is hard to see 4828/// their values after they get legalized to loads from a constant pool. 4829static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 4830 const ARMSubtarget *ST) { 4831 EVT VT = N->getValueType(0); 4832 4833 // Nothing to be done for scalar shifts. 4834 if (! VT.isVector()) 4835 return SDValue(); 4836 4837 assert(ST->hasNEON() && "unexpected vector shift"); 4838 int64_t Cnt; 4839 4840 switch (N->getOpcode()) { 4841 default: llvm_unreachable("unexpected shift opcode"); 4842 4843 case ISD::SHL: 4844 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 4845 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 4846 DAG.getConstant(Cnt, MVT::i32)); 4847 break; 4848 4849 case ISD::SRA: 4850 case ISD::SRL: 4851 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 4852 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 4853 ARMISD::VSHRs : ARMISD::VSHRu); 4854 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 4855 DAG.getConstant(Cnt, MVT::i32)); 4856 } 4857 } 4858 return SDValue(); 4859} 4860 4861/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 4862/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 4863static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 4864 const ARMSubtarget *ST) { 4865 SDValue N0 = N->getOperand(0); 4866 4867 // Check for sign- and zero-extensions of vector extract operations of 8- 4868 // and 16-bit vector elements. NEON supports these directly. They are 4869 // handled during DAG combining because type legalization will promote them 4870 // to 32-bit types and it is messy to recognize the operations after that. 4871 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 4872 SDValue Vec = N0.getOperand(0); 4873 SDValue Lane = N0.getOperand(1); 4874 EVT VT = N->getValueType(0); 4875 EVT EltVT = N0.getValueType(); 4876 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4877 4878 if (VT == MVT::i32 && 4879 (EltVT == MVT::i8 || EltVT == MVT::i16) && 4880 TLI.isTypeLegal(Vec.getValueType())) { 4881 4882 unsigned Opc = 0; 4883 switch (N->getOpcode()) { 4884 default: llvm_unreachable("unexpected opcode"); 4885 case ISD::SIGN_EXTEND: 4886 Opc = ARMISD::VGETLANEs; 4887 break; 4888 case ISD::ZERO_EXTEND: 4889 case ISD::ANY_EXTEND: 4890 Opc = ARMISD::VGETLANEu; 4891 break; 4892 } 4893 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 4894 } 4895 } 4896 4897 return SDValue(); 4898} 4899 4900/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 4901/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 4902static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 4903 const ARMSubtarget *ST) { 4904 // If the target supports NEON, try to use vmax/vmin instructions for f32 4905 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 4906 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 4907 // a NaN; only do the transformation when it matches that behavior. 4908 4909 // For now only do this when using NEON for FP operations; if using VFP, it 4910 // is not obvious that the benefit outweighs the cost of switching to the 4911 // NEON pipeline. 4912 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 4913 N->getValueType(0) != MVT::f32) 4914 return SDValue(); 4915 4916 SDValue CondLHS = N->getOperand(0); 4917 SDValue CondRHS = N->getOperand(1); 4918 SDValue LHS = N->getOperand(2); 4919 SDValue RHS = N->getOperand(3); 4920 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 4921 4922 unsigned Opcode = 0; 4923 bool IsReversed; 4924 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 4925 IsReversed = false; // x CC y ? x : y 4926 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 4927 IsReversed = true ; // x CC y ? y : x 4928 } else { 4929 return SDValue(); 4930 } 4931 4932 bool IsUnordered; 4933 switch (CC) { 4934 default: break; 4935 case ISD::SETOLT: 4936 case ISD::SETOLE: 4937 case ISD::SETLT: 4938 case ISD::SETLE: 4939 case ISD::SETULT: 4940 case ISD::SETULE: 4941 // If LHS is NaN, an ordered comparison will be false and the result will 4942 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 4943 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 4944 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 4945 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 4946 break; 4947 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 4948 // will return -0, so vmin can only be used for unsafe math or if one of 4949 // the operands is known to be nonzero. 4950 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 4951 !UnsafeFPMath && 4952 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 4953 break; 4954 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 4955 break; 4956 4957 case ISD::SETOGT: 4958 case ISD::SETOGE: 4959 case ISD::SETGT: 4960 case ISD::SETGE: 4961 case ISD::SETUGT: 4962 case ISD::SETUGE: 4963 // If LHS is NaN, an ordered comparison will be false and the result will 4964 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 4965 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 4966 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 4967 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 4968 break; 4969 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 4970 // will return +0, so vmax can only be used for unsafe math or if one of 4971 // the operands is known to be nonzero. 4972 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 4973 !UnsafeFPMath && 4974 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 4975 break; 4976 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 4977 break; 4978 } 4979 4980 if (!Opcode) 4981 return SDValue(); 4982 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 4983} 4984 4985SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 4986 DAGCombinerInfo &DCI) const { 4987 switch (N->getOpcode()) { 4988 default: break; 4989 case ISD::ADD: return PerformADDCombine(N, DCI); 4990 case ISD::SUB: return PerformSUBCombine(N, DCI); 4991 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 4992 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 4993 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 4994 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 4995 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI.DAG); 4996 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 4997 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI.DAG); 4998 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 4999 case ISD::SHL: 5000 case ISD::SRA: 5001 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 5002 case ISD::SIGN_EXTEND: 5003 case ISD::ZERO_EXTEND: 5004 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 5005 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 5006 } 5007 return SDValue(); 5008} 5009 5010bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 5011 if (!Subtarget->allowsUnalignedMem()) 5012 return false; 5013 5014 switch (VT.getSimpleVT().SimpleTy) { 5015 default: 5016 return false; 5017 case MVT::i8: 5018 case MVT::i16: 5019 case MVT::i32: 5020 return true; 5021 // FIXME: VLD1 etc with standard alignment is legal. 5022 } 5023} 5024 5025static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 5026 if (V < 0) 5027 return false; 5028 5029 unsigned Scale = 1; 5030 switch (VT.getSimpleVT().SimpleTy) { 5031 default: return false; 5032 case MVT::i1: 5033 case MVT::i8: 5034 // Scale == 1; 5035 break; 5036 case MVT::i16: 5037 // Scale == 2; 5038 Scale = 2; 5039 break; 5040 case MVT::i32: 5041 // Scale == 4; 5042 Scale = 4; 5043 break; 5044 } 5045 5046 if ((V & (Scale - 1)) != 0) 5047 return false; 5048 V /= Scale; 5049 return V == (V & ((1LL << 5) - 1)); 5050} 5051 5052static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 5053 const ARMSubtarget *Subtarget) { 5054 bool isNeg = false; 5055 if (V < 0) { 5056 isNeg = true; 5057 V = - V; 5058 } 5059 5060 switch (VT.getSimpleVT().SimpleTy) { 5061 default: return false; 5062 case MVT::i1: 5063 case MVT::i8: 5064 case MVT::i16: 5065 case MVT::i32: 5066 // + imm12 or - imm8 5067 if (isNeg) 5068 return V == (V & ((1LL << 8) - 1)); 5069 return V == (V & ((1LL << 12) - 1)); 5070 case MVT::f32: 5071 case MVT::f64: 5072 // Same as ARM mode. FIXME: NEON? 5073 if (!Subtarget->hasVFP2()) 5074 return false; 5075 if ((V & 3) != 0) 5076 return false; 5077 V >>= 2; 5078 return V == (V & ((1LL << 8) - 1)); 5079 } 5080} 5081 5082/// isLegalAddressImmediate - Return true if the integer value can be used 5083/// as the offset of the target addressing mode for load / store of the 5084/// given type. 5085static bool isLegalAddressImmediate(int64_t V, EVT VT, 5086 const ARMSubtarget *Subtarget) { 5087 if (V == 0) 5088 return true; 5089 5090 if (!VT.isSimple()) 5091 return false; 5092 5093 if (Subtarget->isThumb1Only()) 5094 return isLegalT1AddressImmediate(V, VT); 5095 else if (Subtarget->isThumb2()) 5096 return isLegalT2AddressImmediate(V, VT, Subtarget); 5097 5098 // ARM mode. 5099 if (V < 0) 5100 V = - V; 5101 switch (VT.getSimpleVT().SimpleTy) { 5102 default: return false; 5103 case MVT::i1: 5104 case MVT::i8: 5105 case MVT::i32: 5106 // +- imm12 5107 return V == (V & ((1LL << 12) - 1)); 5108 case MVT::i16: 5109 // +- imm8 5110 return V == (V & ((1LL << 8) - 1)); 5111 case MVT::f32: 5112 case MVT::f64: 5113 if (!Subtarget->hasVFP2()) // FIXME: NEON? 5114 return false; 5115 if ((V & 3) != 0) 5116 return false; 5117 V >>= 2; 5118 return V == (V & ((1LL << 8) - 1)); 5119 } 5120} 5121 5122bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 5123 EVT VT) const { 5124 int Scale = AM.Scale; 5125 if (Scale < 0) 5126 return false; 5127 5128 switch (VT.getSimpleVT().SimpleTy) { 5129 default: return false; 5130 case MVT::i1: 5131 case MVT::i8: 5132 case MVT::i16: 5133 case MVT::i32: 5134 if (Scale == 1) 5135 return true; 5136 // r + r << imm 5137 Scale = Scale & ~1; 5138 return Scale == 2 || Scale == 4 || Scale == 8; 5139 case MVT::i64: 5140 // r + r 5141 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5142 return true; 5143 return false; 5144 case MVT::isVoid: 5145 // Note, we allow "void" uses (basically, uses that aren't loads or 5146 // stores), because arm allows folding a scale into many arithmetic 5147 // operations. This should be made more precise and revisited later. 5148 5149 // Allow r << imm, but the imm has to be a multiple of two. 5150 if (Scale & 1) return false; 5151 return isPowerOf2_32(Scale); 5152 } 5153} 5154 5155/// isLegalAddressingMode - Return true if the addressing mode represented 5156/// by AM is legal for this target, for a load/store of the specified type. 5157bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5158 const Type *Ty) const { 5159 EVT VT = getValueType(Ty, true); 5160 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 5161 return false; 5162 5163 // Can never fold addr of global into load/store. 5164 if (AM.BaseGV) 5165 return false; 5166 5167 switch (AM.Scale) { 5168 case 0: // no scale reg, must be "r+i" or "r", or "i". 5169 break; 5170 case 1: 5171 if (Subtarget->isThumb1Only()) 5172 return false; 5173 // FALL THROUGH. 5174 default: 5175 // ARM doesn't support any R+R*scale+imm addr modes. 5176 if (AM.BaseOffs) 5177 return false; 5178 5179 if (!VT.isSimple()) 5180 return false; 5181 5182 if (Subtarget->isThumb2()) 5183 return isLegalT2ScaledAddressingMode(AM, VT); 5184 5185 int Scale = AM.Scale; 5186 switch (VT.getSimpleVT().SimpleTy) { 5187 default: return false; 5188 case MVT::i1: 5189 case MVT::i8: 5190 case MVT::i32: 5191 if (Scale < 0) Scale = -Scale; 5192 if (Scale == 1) 5193 return true; 5194 // r + r << imm 5195 return isPowerOf2_32(Scale & ~1); 5196 case MVT::i16: 5197 case MVT::i64: 5198 // r + r 5199 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 5200 return true; 5201 return false; 5202 5203 case MVT::isVoid: 5204 // Note, we allow "void" uses (basically, uses that aren't loads or 5205 // stores), because arm allows folding a scale into many arithmetic 5206 // operations. This should be made more precise and revisited later. 5207 5208 // Allow r << imm, but the imm has to be a multiple of two. 5209 if (Scale & 1) return false; 5210 return isPowerOf2_32(Scale); 5211 } 5212 break; 5213 } 5214 return true; 5215} 5216 5217/// isLegalICmpImmediate - Return true if the specified immediate is legal 5218/// icmp immediate, that is the target has icmp instructions which can compare 5219/// a register against the immediate without having to materialize the 5220/// immediate into a register. 5221bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 5222 if (!Subtarget->isThumb()) 5223 return ARM_AM::getSOImmVal(Imm) != -1; 5224 if (Subtarget->isThumb2()) 5225 return ARM_AM::getT2SOImmVal(Imm) != -1; 5226 return Imm >= 0 && Imm <= 255; 5227} 5228 5229static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 5230 bool isSEXTLoad, SDValue &Base, 5231 SDValue &Offset, bool &isInc, 5232 SelectionDAG &DAG) { 5233 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5234 return false; 5235 5236 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 5237 // AddressingMode 3 5238 Base = Ptr->getOperand(0); 5239 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5240 int RHSC = (int)RHS->getZExtValue(); 5241 if (RHSC < 0 && RHSC > -256) { 5242 assert(Ptr->getOpcode() == ISD::ADD); 5243 isInc = false; 5244 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5245 return true; 5246 } 5247 } 5248 isInc = (Ptr->getOpcode() == ISD::ADD); 5249 Offset = Ptr->getOperand(1); 5250 return true; 5251 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 5252 // AddressingMode 2 5253 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5254 int RHSC = (int)RHS->getZExtValue(); 5255 if (RHSC < 0 && RHSC > -0x1000) { 5256 assert(Ptr->getOpcode() == ISD::ADD); 5257 isInc = false; 5258 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5259 Base = Ptr->getOperand(0); 5260 return true; 5261 } 5262 } 5263 5264 if (Ptr->getOpcode() == ISD::ADD) { 5265 isInc = true; 5266 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 5267 if (ShOpcVal != ARM_AM::no_shift) { 5268 Base = Ptr->getOperand(1); 5269 Offset = Ptr->getOperand(0); 5270 } else { 5271 Base = Ptr->getOperand(0); 5272 Offset = Ptr->getOperand(1); 5273 } 5274 return true; 5275 } 5276 5277 isInc = (Ptr->getOpcode() == ISD::ADD); 5278 Base = Ptr->getOperand(0); 5279 Offset = Ptr->getOperand(1); 5280 return true; 5281 } 5282 5283 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 5284 return false; 5285} 5286 5287static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 5288 bool isSEXTLoad, SDValue &Base, 5289 SDValue &Offset, bool &isInc, 5290 SelectionDAG &DAG) { 5291 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 5292 return false; 5293 5294 Base = Ptr->getOperand(0); 5295 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 5296 int RHSC = (int)RHS->getZExtValue(); 5297 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 5298 assert(Ptr->getOpcode() == ISD::ADD); 5299 isInc = false; 5300 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 5301 return true; 5302 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 5303 isInc = Ptr->getOpcode() == ISD::ADD; 5304 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 5305 return true; 5306 } 5307 } 5308 5309 return false; 5310} 5311 5312/// getPreIndexedAddressParts - returns true by value, base pointer and 5313/// offset pointer and addressing mode by reference if the node's address 5314/// can be legally represented as pre-indexed load / store address. 5315bool 5316ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 5317 SDValue &Offset, 5318 ISD::MemIndexedMode &AM, 5319 SelectionDAG &DAG) const { 5320 if (Subtarget->isThumb1Only()) 5321 return false; 5322 5323 EVT VT; 5324 SDValue Ptr; 5325 bool isSEXTLoad = false; 5326 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5327 Ptr = LD->getBasePtr(); 5328 VT = LD->getMemoryVT(); 5329 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5330 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5331 Ptr = ST->getBasePtr(); 5332 VT = ST->getMemoryVT(); 5333 } else 5334 return false; 5335 5336 bool isInc; 5337 bool isLegal = false; 5338 if (Subtarget->isThumb2()) 5339 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5340 Offset, isInc, DAG); 5341 else 5342 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 5343 Offset, isInc, DAG); 5344 if (!isLegal) 5345 return false; 5346 5347 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 5348 return true; 5349} 5350 5351/// getPostIndexedAddressParts - returns true by value, base pointer and 5352/// offset pointer and addressing mode by reference if this node can be 5353/// combined with a load / store to form a post-indexed load / store. 5354bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 5355 SDValue &Base, 5356 SDValue &Offset, 5357 ISD::MemIndexedMode &AM, 5358 SelectionDAG &DAG) const { 5359 if (Subtarget->isThumb1Only()) 5360 return false; 5361 5362 EVT VT; 5363 SDValue Ptr; 5364 bool isSEXTLoad = false; 5365 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 5366 VT = LD->getMemoryVT(); 5367 Ptr = LD->getBasePtr(); 5368 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 5369 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 5370 VT = ST->getMemoryVT(); 5371 Ptr = ST->getBasePtr(); 5372 } else 5373 return false; 5374 5375 bool isInc; 5376 bool isLegal = false; 5377 if (Subtarget->isThumb2()) 5378 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5379 isInc, DAG); 5380 else 5381 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 5382 isInc, DAG); 5383 if (!isLegal) 5384 return false; 5385 5386 if (Ptr != Base) { 5387 // Swap base ptr and offset to catch more post-index load / store when 5388 // it's legal. In Thumb2 mode, offset must be an immediate. 5389 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 5390 !Subtarget->isThumb2()) 5391 std::swap(Base, Offset); 5392 5393 // Post-indexed load / store update the base pointer. 5394 if (Ptr != Base) 5395 return false; 5396 } 5397 5398 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 5399 return true; 5400} 5401 5402void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5403 const APInt &Mask, 5404 APInt &KnownZero, 5405 APInt &KnownOne, 5406 const SelectionDAG &DAG, 5407 unsigned Depth) const { 5408 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5409 switch (Op.getOpcode()) { 5410 default: break; 5411 case ARMISD::CMOV: { 5412 // Bits are known zero/one if known on the LHS and RHS. 5413 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 5414 if (KnownZero == 0 && KnownOne == 0) return; 5415 5416 APInt KnownZeroRHS, KnownOneRHS; 5417 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 5418 KnownZeroRHS, KnownOneRHS, Depth+1); 5419 KnownZero &= KnownZeroRHS; 5420 KnownOne &= KnownOneRHS; 5421 return; 5422 } 5423 } 5424} 5425 5426//===----------------------------------------------------------------------===// 5427// ARM Inline Assembly Support 5428//===----------------------------------------------------------------------===// 5429 5430/// getConstraintType - Given a constraint letter, return the type of 5431/// constraint it is for this target. 5432ARMTargetLowering::ConstraintType 5433ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 5434 if (Constraint.size() == 1) { 5435 switch (Constraint[0]) { 5436 default: break; 5437 case 'l': return C_RegisterClass; 5438 case 'w': return C_RegisterClass; 5439 } 5440 } 5441 return TargetLowering::getConstraintType(Constraint); 5442} 5443 5444/// Examine constraint type and operand type and determine a weight value. 5445/// This object must already have been set up with the operand type 5446/// and the current alternative constraint selected. 5447TargetLowering::ConstraintWeight 5448ARMTargetLowering::getSingleConstraintMatchWeight( 5449 AsmOperandInfo &info, const char *constraint) const { 5450 ConstraintWeight weight = CW_Invalid; 5451 Value *CallOperandVal = info.CallOperandVal; 5452 // If we don't have a value, we can't do a match, 5453 // but allow it at the lowest weight. 5454 if (CallOperandVal == NULL) 5455 return CW_Default; 5456 const Type *type = CallOperandVal->getType(); 5457 // Look at the constraint type. 5458 switch (*constraint) { 5459 default: 5460 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 5461 break; 5462 case 'l': 5463 if (type->isIntegerTy()) { 5464 if (Subtarget->isThumb()) 5465 weight = CW_SpecificReg; 5466 else 5467 weight = CW_Register; 5468 } 5469 break; 5470 case 'w': 5471 if (type->isFloatingPointTy()) 5472 weight = CW_Register; 5473 break; 5474 } 5475 return weight; 5476} 5477 5478std::pair<unsigned, const TargetRegisterClass*> 5479ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5480 EVT VT) const { 5481 if (Constraint.size() == 1) { 5482 // GCC ARM Constraint Letters 5483 switch (Constraint[0]) { 5484 case 'l': 5485 if (Subtarget->isThumb()) 5486 return std::make_pair(0U, ARM::tGPRRegisterClass); 5487 else 5488 return std::make_pair(0U, ARM::GPRRegisterClass); 5489 case 'r': 5490 return std::make_pair(0U, ARM::GPRRegisterClass); 5491 case 'w': 5492 if (VT == MVT::f32) 5493 return std::make_pair(0U, ARM::SPRRegisterClass); 5494 if (VT.getSizeInBits() == 64) 5495 return std::make_pair(0U, ARM::DPRRegisterClass); 5496 if (VT.getSizeInBits() == 128) 5497 return std::make_pair(0U, ARM::QPRRegisterClass); 5498 break; 5499 } 5500 } 5501 if (StringRef("{cc}").equals_lower(Constraint)) 5502 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 5503 5504 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5505} 5506 5507std::vector<unsigned> ARMTargetLowering:: 5508getRegClassForInlineAsmConstraint(const std::string &Constraint, 5509 EVT VT) const { 5510 if (Constraint.size() != 1) 5511 return std::vector<unsigned>(); 5512 5513 switch (Constraint[0]) { // GCC ARM Constraint Letters 5514 default: break; 5515 case 'l': 5516 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5517 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5518 0); 5519 case 'r': 5520 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 5521 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 5522 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 5523 ARM::R12, ARM::LR, 0); 5524 case 'w': 5525 if (VT == MVT::f32) 5526 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 5527 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 5528 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 5529 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 5530 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 5531 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 5532 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 5533 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 5534 if (VT.getSizeInBits() == 64) 5535 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 5536 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 5537 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 5538 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 5539 if (VT.getSizeInBits() == 128) 5540 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 5541 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 5542 break; 5543 } 5544 5545 return std::vector<unsigned>(); 5546} 5547 5548/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5549/// vector. If it is invalid, don't add anything to Ops. 5550void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5551 char Constraint, 5552 std::vector<SDValue>&Ops, 5553 SelectionDAG &DAG) const { 5554 SDValue Result(0, 0); 5555 5556 switch (Constraint) { 5557 default: break; 5558 case 'I': case 'J': case 'K': case 'L': 5559 case 'M': case 'N': case 'O': 5560 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 5561 if (!C) 5562 return; 5563 5564 int64_t CVal64 = C->getSExtValue(); 5565 int CVal = (int) CVal64; 5566 // None of these constraints allow values larger than 32 bits. Check 5567 // that the value fits in an int. 5568 if (CVal != CVal64) 5569 return; 5570 5571 switch (Constraint) { 5572 case 'I': 5573 if (Subtarget->isThumb1Only()) { 5574 // This must be a constant between 0 and 255, for ADD 5575 // immediates. 5576 if (CVal >= 0 && CVal <= 255) 5577 break; 5578 } else if (Subtarget->isThumb2()) { 5579 // A constant that can be used as an immediate value in a 5580 // data-processing instruction. 5581 if (ARM_AM::getT2SOImmVal(CVal) != -1) 5582 break; 5583 } else { 5584 // A constant that can be used as an immediate value in a 5585 // data-processing instruction. 5586 if (ARM_AM::getSOImmVal(CVal) != -1) 5587 break; 5588 } 5589 return; 5590 5591 case 'J': 5592 if (Subtarget->isThumb()) { // FIXME thumb2 5593 // This must be a constant between -255 and -1, for negated ADD 5594 // immediates. This can be used in GCC with an "n" modifier that 5595 // prints the negated value, for use with SUB instructions. It is 5596 // not useful otherwise but is implemented for compatibility. 5597 if (CVal >= -255 && CVal <= -1) 5598 break; 5599 } else { 5600 // This must be a constant between -4095 and 4095. It is not clear 5601 // what this constraint is intended for. Implemented for 5602 // compatibility with GCC. 5603 if (CVal >= -4095 && CVal <= 4095) 5604 break; 5605 } 5606 return; 5607 5608 case 'K': 5609 if (Subtarget->isThumb1Only()) { 5610 // A 32-bit value where only one byte has a nonzero value. Exclude 5611 // zero to match GCC. This constraint is used by GCC internally for 5612 // constants that can be loaded with a move/shift combination. 5613 // It is not useful otherwise but is implemented for compatibility. 5614 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 5615 break; 5616 } else if (Subtarget->isThumb2()) { 5617 // A constant whose bitwise inverse can be used as an immediate 5618 // value in a data-processing instruction. This can be used in GCC 5619 // with a "B" modifier that prints the inverted value, for use with 5620 // BIC and MVN instructions. It is not useful otherwise but is 5621 // implemented for compatibility. 5622 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 5623 break; 5624 } else { 5625 // A constant whose bitwise inverse can be used as an immediate 5626 // value in a data-processing instruction. This can be used in GCC 5627 // with a "B" modifier that prints the inverted value, for use with 5628 // BIC and MVN instructions. It is not useful otherwise but is 5629 // implemented for compatibility. 5630 if (ARM_AM::getSOImmVal(~CVal) != -1) 5631 break; 5632 } 5633 return; 5634 5635 case 'L': 5636 if (Subtarget->isThumb1Only()) { 5637 // This must be a constant between -7 and 7, 5638 // for 3-operand ADD/SUB immediate instructions. 5639 if (CVal >= -7 && CVal < 7) 5640 break; 5641 } else if (Subtarget->isThumb2()) { 5642 // A constant whose negation can be used as an immediate value in a 5643 // data-processing instruction. This can be used in GCC with an "n" 5644 // modifier that prints the negated value, for use with SUB 5645 // instructions. It is not useful otherwise but is implemented for 5646 // compatibility. 5647 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 5648 break; 5649 } else { 5650 // A constant whose negation can be used as an immediate value in a 5651 // data-processing instruction. This can be used in GCC with an "n" 5652 // modifier that prints the negated value, for use with SUB 5653 // instructions. It is not useful otherwise but is implemented for 5654 // compatibility. 5655 if (ARM_AM::getSOImmVal(-CVal) != -1) 5656 break; 5657 } 5658 return; 5659 5660 case 'M': 5661 if (Subtarget->isThumb()) { // FIXME thumb2 5662 // This must be a multiple of 4 between 0 and 1020, for 5663 // ADD sp + immediate. 5664 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 5665 break; 5666 } else { 5667 // A power of two or a constant between 0 and 32. This is used in 5668 // GCC for the shift amount on shifted register operands, but it is 5669 // useful in general for any shift amounts. 5670 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 5671 break; 5672 } 5673 return; 5674 5675 case 'N': 5676 if (Subtarget->isThumb()) { // FIXME thumb2 5677 // This must be a constant between 0 and 31, for shift amounts. 5678 if (CVal >= 0 && CVal <= 31) 5679 break; 5680 } 5681 return; 5682 5683 case 'O': 5684 if (Subtarget->isThumb()) { // FIXME thumb2 5685 // This must be a multiple of 4 between -508 and 508, for 5686 // ADD/SUB sp = sp + immediate. 5687 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 5688 break; 5689 } 5690 return; 5691 } 5692 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 5693 break; 5694 } 5695 5696 if (Result.getNode()) { 5697 Ops.push_back(Result); 5698 return; 5699 } 5700 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5701} 5702 5703bool 5704ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 5705 // The ARM target isn't yet aware of offsets. 5706 return false; 5707} 5708 5709int ARM::getVFPf32Imm(const APFloat &FPImm) { 5710 APInt Imm = FPImm.bitcastToAPInt(); 5711 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 5712 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 5713 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 5714 5715 // We can handle 4 bits of mantissa. 5716 // mantissa = (16+UInt(e:f:g:h))/16. 5717 if (Mantissa & 0x7ffff) 5718 return -1; 5719 Mantissa >>= 19; 5720 if ((Mantissa & 0xf) != Mantissa) 5721 return -1; 5722 5723 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 5724 if (Exp < -3 || Exp > 4) 5725 return -1; 5726 Exp = ((Exp+3) & 0x7) ^ 4; 5727 5728 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 5729} 5730 5731int ARM::getVFPf64Imm(const APFloat &FPImm) { 5732 APInt Imm = FPImm.bitcastToAPInt(); 5733 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 5734 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 5735 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 5736 5737 // We can handle 4 bits of mantissa. 5738 // mantissa = (16+UInt(e:f:g:h))/16. 5739 if (Mantissa & 0xffffffffffffLL) 5740 return -1; 5741 Mantissa >>= 48; 5742 if ((Mantissa & 0xf) != Mantissa) 5743 return -1; 5744 5745 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 5746 if (Exp < -3 || Exp > 4) 5747 return -1; 5748 Exp = ((Exp+3) & 0x7) ^ 4; 5749 5750 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 5751} 5752 5753bool ARM::isBitFieldInvertedMask(unsigned v) { 5754 if (v == 0xffffffff) 5755 return 0; 5756 // there can be 1's on either or both "outsides", all the "inside" 5757 // bits must be 0's 5758 unsigned int lsb = 0, msb = 31; 5759 while (v & (1 << msb)) --msb; 5760 while (v & (1 << lsb)) ++lsb; 5761 for (unsigned int i = lsb; i <= msb; ++i) { 5762 if (v & (1 << i)) 5763 return 0; 5764 } 5765 return 1; 5766} 5767 5768/// isFPImmLegal - Returns true if the target can instruction select the 5769/// specified FP immediate natively. If false, the legalizer will 5770/// materialize the FP immediate as a load from a constant pool. 5771bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 5772 if (!Subtarget->hasVFP3()) 5773 return false; 5774 if (VT == MVT::f32) 5775 return ARM::getVFPf32Imm(Imm) != -1; 5776 if (VT == MVT::f64) 5777 return ARM::getVFPf64Imm(Imm) != -1; 5778 return false; 5779} 5780 5781/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 5782/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 5783/// specified in the intrinsic calls. 5784bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 5785 const CallInst &I, 5786 unsigned Intrinsic) const { 5787 switch (Intrinsic) { 5788 case Intrinsic::arm_neon_vld1: 5789 case Intrinsic::arm_neon_vld2: 5790 case Intrinsic::arm_neon_vld3: 5791 case Intrinsic::arm_neon_vld4: 5792 case Intrinsic::arm_neon_vld2lane: 5793 case Intrinsic::arm_neon_vld3lane: 5794 case Intrinsic::arm_neon_vld4lane: { 5795 Info.opc = ISD::INTRINSIC_W_CHAIN; 5796 // Conservatively set memVT to the entire set of vectors loaded. 5797 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 5798 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 5799 Info.ptrVal = I.getArgOperand(0); 5800 Info.offset = 0; 5801 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 5802 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 5803 Info.vol = false; // volatile loads with NEON intrinsics not supported 5804 Info.readMem = true; 5805 Info.writeMem = false; 5806 return true; 5807 } 5808 case Intrinsic::arm_neon_vst1: 5809 case Intrinsic::arm_neon_vst2: 5810 case Intrinsic::arm_neon_vst3: 5811 case Intrinsic::arm_neon_vst4: 5812 case Intrinsic::arm_neon_vst2lane: 5813 case Intrinsic::arm_neon_vst3lane: 5814 case Intrinsic::arm_neon_vst4lane: { 5815 Info.opc = ISD::INTRINSIC_VOID; 5816 // Conservatively set memVT to the entire set of vectors stored. 5817 unsigned NumElts = 0; 5818 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 5819 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 5820 if (!ArgTy->isVectorTy()) 5821 break; 5822 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 5823 } 5824 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 5825 Info.ptrVal = I.getArgOperand(0); 5826 Info.offset = 0; 5827 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 5828 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 5829 Info.vol = false; // volatile stores with NEON intrinsics not supported 5830 Info.readMem = false; 5831 Info.writeMem = true; 5832 return true; 5833 } 5834 default: 5835 break; 5836 } 5837 5838 return false; 5839} 5840