ARMISelLowering.cpp revision f05b1dcf870346094f8aaee8e387c92d3e47e98d
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMISelLowering.h" 21#include "ARMMachineFunctionInfo.h" 22#include "ARMPerfectShuffle.h" 23#include "ARMRegisterInfo.h" 24#include "ARMSubtarget.h" 25#include "ARMTargetMachine.h" 26#include "ARMTargetObjectFile.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/Function.h" 30#include "llvm/GlobalValue.h" 31#include "llvm/Instruction.h" 32#include "llvm/Instructions.h" 33#include "llvm/Intrinsics.h" 34#include "llvm/Type.h" 35#include "llvm/CodeGen/CallingConvLower.h" 36#include "llvm/CodeGen/IntrinsicLowering.h" 37#include "llvm/CodeGen/MachineBasicBlock.h" 38#include "llvm/CodeGen/MachineFrameInfo.h" 39#include "llvm/CodeGen/MachineFunction.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/PseudoSourceValue.h" 43#include "llvm/CodeGen/SelectionDAG.h" 44#include "llvm/MC/MCSectionMachO.h" 45#include "llvm/Target/TargetOptions.h" 46#include "llvm/ADT/VectorExtras.h" 47#include "llvm/ADT/StringExtras.h" 48#include "llvm/ADT/Statistic.h" 49#include "llvm/Support/CommandLine.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53#include <sstream> 54using namespace llvm; 55 56STATISTIC(NumTailCalls, "Number of tail calls"); 57STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 58 59// This option should go away when tail calls fully work. 60static cl::opt<bool> 61EnableARMTailCalls("arm-tail-calls", cl::Hidden, 62 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 63 cl::init(false)); 64 65cl::opt<bool> 66EnableARMLongCalls("arm-long-calls", cl::Hidden, 67 cl::desc("Generate calls via indirect call instructions"), 68 cl::init(false)); 69 70static cl::opt<bool> 71ARMInterworking("arm-interworking", cl::Hidden, 72 cl::desc("Enable / disable ARM interworking (for debugging only)"), 73 cl::init(true)); 74 75static cl::opt<bool> 76UseDivMod("arm-divmod-libcall", cl::Hidden, 77 cl::desc("Use __{u}divmod libcalls for div / rem pairs"), 78 cl::init(false)); 79 80void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 81 EVT PromotedBitwiseVT) { 82 if (VT != PromotedLdStVT) { 83 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 84 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 85 PromotedLdStVT.getSimpleVT()); 86 87 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 88 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 89 PromotedLdStVT.getSimpleVT()); 90 } 91 92 EVT ElemTy = VT.getVectorElementType(); 93 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 94 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 95 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 96 if (ElemTy != MVT::i32) { 97 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 98 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 99 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 100 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 101 } 102 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 103 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 104 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 105 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); 106 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 107 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 108 if (VT.isInteger()) { 109 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 110 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 111 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 112 setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand); 113 setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand); 114 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 115 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 116 setTruncStoreAction(VT.getSimpleVT(), 117 (MVT::SimpleValueType)InnerVT, Expand); 118 } 119 setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); 120 121 // Promote all bit-wise operations. 122 if (VT.isInteger() && VT != PromotedBitwiseVT) { 123 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 124 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 125 PromotedBitwiseVT.getSimpleVT()); 126 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 127 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 128 PromotedBitwiseVT.getSimpleVT()); 129 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 130 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 131 PromotedBitwiseVT.getSimpleVT()); 132 } 133 134 // Neon does not support vector divide/remainder operations. 135 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 136 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 137 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 138 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 139 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 140 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 141} 142 143void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 144 addRegisterClass(VT, ARM::DPRRegisterClass); 145 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 146} 147 148void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 149 addRegisterClass(VT, ARM::QPRRegisterClass); 150 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 151} 152 153static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 154 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 155 return new TargetLoweringObjectFileMachO(); 156 157 return new ARMElfTargetObjectFile(); 158} 159 160ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 161 : TargetLowering(TM, createTLOF(TM)) { 162 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 163 RegInfo = TM.getRegisterInfo(); 164 Itins = TM.getInstrItineraryData(); 165 166 if (Subtarget->isTargetDarwin()) { 167 // Uses VFP for Thumb libfuncs if available. 168 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 169 // Single-precision floating-point arithmetic. 170 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 171 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 172 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 173 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 174 175 // Double-precision floating-point arithmetic. 176 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 177 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 178 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 179 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 180 181 // Single-precision comparisons. 182 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 183 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 184 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 185 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 186 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 187 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 188 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 189 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 190 191 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 192 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 193 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 194 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 195 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 196 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 197 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 198 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 199 200 // Double-precision comparisons. 201 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 202 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 203 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 204 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 205 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 206 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 207 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 208 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 209 210 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 211 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 212 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 213 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 214 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 215 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 216 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 217 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 218 219 // Floating-point to integer conversions. 220 // i64 conversions are done via library routines even when generating VFP 221 // instructions, so use the same ones. 222 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 223 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 224 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 225 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 226 227 // Conversions between floating types. 228 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 229 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 230 231 // Integer to floating-point conversions. 232 // i64 conversions are done via library routines even when generating VFP 233 // instructions, so use the same ones. 234 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 235 // e.g., __floatunsidf vs. __floatunssidfvfp. 236 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 237 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 238 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 239 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 240 } 241 } 242 243 // These libcalls are not available in 32-bit. 244 setLibcallName(RTLIB::SHL_I128, 0); 245 setLibcallName(RTLIB::SRL_I128, 0); 246 setLibcallName(RTLIB::SRA_I128, 0); 247 248 if (Subtarget->isAAPCS_ABI()) { 249 // Double-precision floating-point arithmetic helper functions 250 // RTABI chapter 4.1.2, Table 2 251 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 252 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 253 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 254 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 255 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 256 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 257 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 258 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 259 260 // Double-precision floating-point comparison helper functions 261 // RTABI chapter 4.1.2, Table 3 262 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 263 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 264 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 265 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 266 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 267 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 268 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 269 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 270 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 271 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 272 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 273 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 274 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 275 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 276 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 277 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 278 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 279 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 280 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 281 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 282 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 283 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 284 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 285 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 286 287 // Single-precision floating-point arithmetic helper functions 288 // RTABI chapter 4.1.2, Table 4 289 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 290 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 291 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 292 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 293 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 294 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 295 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 297 298 // Single-precision floating-point comparison helper functions 299 // RTABI chapter 4.1.2, Table 5 300 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 301 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 302 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 303 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 304 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 305 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 306 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 307 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 308 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 309 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 310 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 311 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 312 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 313 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 314 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 315 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 316 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 317 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 318 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 319 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 320 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 321 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 322 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 323 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 324 325 // Floating-point to integer conversions. 326 // RTABI chapter 4.1.2, Table 6 327 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 328 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 329 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 330 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 331 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 332 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 333 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 334 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 335 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 336 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 337 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 338 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 339 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 340 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 341 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 342 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 343 344 // Conversions between floating types. 345 // RTABI chapter 4.1.2, Table 7 346 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 347 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 348 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 349 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 350 351 // Integer to floating-point conversions. 352 // RTABI chapter 4.1.2, Table 8 353 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 354 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 355 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 356 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 357 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 358 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 359 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 360 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 361 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 362 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 363 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 364 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 365 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 366 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 367 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 368 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 369 370 // Long long helper functions 371 // RTABI chapter 4.2, Table 9 372 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 373 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 374 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 375 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 376 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 377 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 378 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 380 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 381 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 382 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 383 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 384 385 // Integer division functions 386 // RTABI chapter 4.3.1 387 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 388 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 389 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 390 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 391 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 392 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 393 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 394 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 395 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 396 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 397 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 398 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 399 } 400 401 if (UseDivMod) { 402 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 403 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 404 } 405 406 if (Subtarget->isThumb1Only()) 407 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 408 else 409 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 410 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 411 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 412 if (!Subtarget->isFPOnlySP()) 413 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 414 415 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 416 } 417 418 if (Subtarget->hasNEON()) { 419 addDRTypeForNEON(MVT::v2f32); 420 addDRTypeForNEON(MVT::v8i8); 421 addDRTypeForNEON(MVT::v4i16); 422 addDRTypeForNEON(MVT::v2i32); 423 addDRTypeForNEON(MVT::v1i64); 424 425 addQRTypeForNEON(MVT::v4f32); 426 addQRTypeForNEON(MVT::v2f64); 427 addQRTypeForNEON(MVT::v16i8); 428 addQRTypeForNEON(MVT::v8i16); 429 addQRTypeForNEON(MVT::v4i32); 430 addQRTypeForNEON(MVT::v2i64); 431 432 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 433 // neither Neon nor VFP support any arithmetic operations on it. 434 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 435 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 436 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 437 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 438 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 439 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 440 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 441 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 442 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 443 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 444 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 445 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 446 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 447 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 448 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 449 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 450 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 451 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 452 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 453 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 454 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 455 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 456 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 457 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 458 459 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 460 461 // Neon does not support some operations on v1i64 and v2i64 types. 462 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 463 // Custom handling for some quad-vector types to detect VMULL. 464 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 465 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 466 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 467 // Custom handling for some vector types to avoid expensive expansions 468 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 469 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 470 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 471 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 472 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 473 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 474 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 475 // a destination type that is wider than the source. 476 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 477 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 478 479 setTargetDAGCombine(ISD::INTRINSIC_VOID); 480 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 481 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 482 setTargetDAGCombine(ISD::SHL); 483 setTargetDAGCombine(ISD::SRL); 484 setTargetDAGCombine(ISD::SRA); 485 setTargetDAGCombine(ISD::SIGN_EXTEND); 486 setTargetDAGCombine(ISD::ZERO_EXTEND); 487 setTargetDAGCombine(ISD::ANY_EXTEND); 488 setTargetDAGCombine(ISD::SELECT_CC); 489 setTargetDAGCombine(ISD::BUILD_VECTOR); 490 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 491 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 492 setTargetDAGCombine(ISD::STORE); 493 } 494 495 computeRegisterProperties(); 496 497 // ARM does not have f32 extending load. 498 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 499 500 // ARM does not have i1 sign extending load. 501 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 502 503 // ARM supports all 4 flavors of integer indexed load / store. 504 if (!Subtarget->isThumb1Only()) { 505 for (unsigned im = (unsigned)ISD::PRE_INC; 506 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 507 setIndexedLoadAction(im, MVT::i1, Legal); 508 setIndexedLoadAction(im, MVT::i8, Legal); 509 setIndexedLoadAction(im, MVT::i16, Legal); 510 setIndexedLoadAction(im, MVT::i32, Legal); 511 setIndexedStoreAction(im, MVT::i1, Legal); 512 setIndexedStoreAction(im, MVT::i8, Legal); 513 setIndexedStoreAction(im, MVT::i16, Legal); 514 setIndexedStoreAction(im, MVT::i32, Legal); 515 } 516 } 517 518 // i64 operation support. 519 if (Subtarget->isThumb1Only()) { 520 setOperationAction(ISD::MUL, MVT::i64, Expand); 521 setOperationAction(ISD::MULHU, MVT::i32, Expand); 522 setOperationAction(ISD::MULHS, MVT::i32, Expand); 523 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 524 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 525 } else { 526 setOperationAction(ISD::MUL, MVT::i64, Expand); 527 setOperationAction(ISD::MULHU, MVT::i32, Expand); 528 if (!Subtarget->hasV6Ops()) 529 setOperationAction(ISD::MULHS, MVT::i32, Expand); 530 } 531 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 532 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 533 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 534 setOperationAction(ISD::SRL, MVT::i64, Custom); 535 setOperationAction(ISD::SRA, MVT::i64, Custom); 536 537 // ARM does not have ROTL. 538 setOperationAction(ISD::ROTL, MVT::i32, Expand); 539 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 540 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 541 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 542 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 543 544 // Only ARMv6 has BSWAP. 545 if (!Subtarget->hasV6Ops()) 546 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 547 548 // These are expanded into libcalls. 549 if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { 550 // v7M has a hardware divider 551 setOperationAction(ISD::SDIV, MVT::i32, Expand); 552 setOperationAction(ISD::UDIV, MVT::i32, Expand); 553 } 554 setOperationAction(ISD::SREM, MVT::i32, Expand); 555 setOperationAction(ISD::UREM, MVT::i32, Expand); 556 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 557 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 558 559 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 560 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 561 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 563 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 564 565 setOperationAction(ISD::TRAP, MVT::Other, Legal); 566 567 // Use the default implementation. 568 setOperationAction(ISD::VASTART, MVT::Other, Custom); 569 setOperationAction(ISD::VAARG, MVT::Other, Expand); 570 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 571 setOperationAction(ISD::VAEND, MVT::Other, Expand); 572 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 573 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 574 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 575 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 576 setExceptionPointerRegister(ARM::R0); 577 setExceptionSelectorRegister(ARM::R1); 578 579 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 580 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 581 // the default expansion. 582 if (Subtarget->hasDataBarrier() || 583 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 584 // membarrier needs custom lowering; the rest are legal and handled 585 // normally. 586 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 587 } else { 588 // Set them all for expansion, which will force libcalls. 589 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 590 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand); 591 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand); 592 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 593 setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand); 594 setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand); 595 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 596 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand); 597 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand); 598 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 599 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand); 600 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand); 601 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 602 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand); 603 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand); 604 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 605 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand); 606 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand); 607 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 608 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand); 609 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand); 610 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 611 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand); 612 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand); 613 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 614 // Since the libcalls include locking, fold in the fences 615 setShouldFoldAtomicFences(true); 616 } 617 // 64-bit versions are always libcalls (for now) 618 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand); 619 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); 620 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand); 621 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand); 622 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand); 623 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand); 624 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand); 625 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand); 626 627 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 628 629 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 630 if (!Subtarget->hasV6Ops()) { 631 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 632 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 633 } 634 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 635 636 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 637 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 638 // iff target supports vfp2. 639 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 640 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 641 } 642 643 // We want to custom lower some of our intrinsics. 644 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 645 if (Subtarget->isTargetDarwin()) { 646 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 647 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 648 setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom); 649 } 650 651 setOperationAction(ISD::SETCC, MVT::i32, Expand); 652 setOperationAction(ISD::SETCC, MVT::f32, Expand); 653 setOperationAction(ISD::SETCC, MVT::f64, Expand); 654 setOperationAction(ISD::SELECT, MVT::i32, Custom); 655 setOperationAction(ISD::SELECT, MVT::f32, Custom); 656 setOperationAction(ISD::SELECT, MVT::f64, Custom); 657 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 658 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 659 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 660 661 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 662 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 663 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 664 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 665 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 666 667 // We don't support sin/cos/fmod/copysign/pow 668 setOperationAction(ISD::FSIN, MVT::f64, Expand); 669 setOperationAction(ISD::FSIN, MVT::f32, Expand); 670 setOperationAction(ISD::FCOS, MVT::f32, Expand); 671 setOperationAction(ISD::FCOS, MVT::f64, Expand); 672 setOperationAction(ISD::FREM, MVT::f64, Expand); 673 setOperationAction(ISD::FREM, MVT::f32, Expand); 674 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 675 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 676 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 677 } 678 setOperationAction(ISD::FPOW, MVT::f64, Expand); 679 setOperationAction(ISD::FPOW, MVT::f32, Expand); 680 681 // Various VFP goodness 682 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 683 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 684 if (Subtarget->hasVFP2()) { 685 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 686 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 687 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 688 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 689 } 690 // Special handling for half-precision FP. 691 if (!Subtarget->hasFP16()) { 692 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 693 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 694 } 695 } 696 697 // We have target-specific dag combine patterns for the following nodes: 698 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 699 setTargetDAGCombine(ISD::ADD); 700 setTargetDAGCombine(ISD::SUB); 701 setTargetDAGCombine(ISD::MUL); 702 703 if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) 704 setTargetDAGCombine(ISD::OR); 705 if (Subtarget->hasNEON()) 706 setTargetDAGCombine(ISD::AND); 707 708 setStackPointerRegisterToSaveRestore(ARM::SP); 709 710 if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) 711 setSchedulingPreference(Sched::RegPressure); 712 else 713 setSchedulingPreference(Sched::Hybrid); 714 715 //// temporary - rewrite interface to use type 716 maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1; 717 718 // On ARM arguments smaller than 4 bytes are extended, so all arguments 719 // are at least 4 bytes aligned. 720 setMinStackArgumentAlignment(4); 721 722 benefitFromCodePlacementOpt = true; 723} 724 725// FIXME: It might make sense to define the representative register class as the 726// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 727// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 728// SPR's representative would be DPR_VFP2. This should work well if register 729// pressure tracking were modified such that a register use would increment the 730// pressure of the register class's representative and all of it's super 731// classes' representatives transitively. We have not implemented this because 732// of the difficulty prior to coalescing of modeling operand register classes 733// due to the common occurence of cross class copies and subregister insertions 734// and extractions. 735std::pair<const TargetRegisterClass*, uint8_t> 736ARMTargetLowering::findRepresentativeClass(EVT VT) const{ 737 const TargetRegisterClass *RRC = 0; 738 uint8_t Cost = 1; 739 switch (VT.getSimpleVT().SimpleTy) { 740 default: 741 return TargetLowering::findRepresentativeClass(VT); 742 // Use DPR as representative register class for all floating point 743 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 744 // the cost is 1 for both f32 and f64. 745 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 746 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 747 RRC = ARM::DPRRegisterClass; 748 // When NEON is used for SP, only half of the register file is available 749 // because operations that define both SP and DP results will be constrained 750 // to the VFP2 class (D0-D15). We currently model this constraint prior to 751 // coalescing by double-counting the SP regs. See the FIXME above. 752 if (Subtarget->useNEONForSinglePrecisionFP()) 753 Cost = 2; 754 break; 755 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 756 case MVT::v4f32: case MVT::v2f64: 757 RRC = ARM::DPRRegisterClass; 758 Cost = 2; 759 break; 760 case MVT::v4i64: 761 RRC = ARM::DPRRegisterClass; 762 Cost = 4; 763 break; 764 case MVT::v8i64: 765 RRC = ARM::DPRRegisterClass; 766 Cost = 8; 767 break; 768 } 769 return std::make_pair(RRC, Cost); 770} 771 772const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 773 switch (Opcode) { 774 default: return 0; 775 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 776 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 777 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 778 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 779 case ARMISD::CALL: return "ARMISD::CALL"; 780 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 781 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 782 case ARMISD::tCALL: return "ARMISD::tCALL"; 783 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 784 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 785 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 786 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 787 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 788 case ARMISD::CMP: return "ARMISD::CMP"; 789 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 790 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 791 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 792 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 793 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 794 case ARMISD::CMOV: return "ARMISD::CMOV"; 795 796 case ARMISD::RBIT: return "ARMISD::RBIT"; 797 798 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 799 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 800 case ARMISD::SITOF: return "ARMISD::SITOF"; 801 case ARMISD::UITOF: return "ARMISD::UITOF"; 802 803 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 804 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 805 case ARMISD::RRX: return "ARMISD::RRX"; 806 807 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 808 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 809 810 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 811 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 812 case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP"; 813 814 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 815 816 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 817 818 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 819 820 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 821 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 822 823 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 824 825 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 826 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 827 case ARMISD::VCGE: return "ARMISD::VCGE"; 828 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 829 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 830 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 831 case ARMISD::VCGT: return "ARMISD::VCGT"; 832 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 833 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 834 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 835 case ARMISD::VTST: return "ARMISD::VTST"; 836 837 case ARMISD::VSHL: return "ARMISD::VSHL"; 838 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 839 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 840 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 841 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 842 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 843 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 844 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 845 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 846 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 847 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 848 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 849 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 850 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 851 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 852 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 853 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 854 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 855 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 856 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 857 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 858 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 859 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 860 case ARMISD::VDUP: return "ARMISD::VDUP"; 861 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 862 case ARMISD::VEXT: return "ARMISD::VEXT"; 863 case ARMISD::VREV64: return "ARMISD::VREV64"; 864 case ARMISD::VREV32: return "ARMISD::VREV32"; 865 case ARMISD::VREV16: return "ARMISD::VREV16"; 866 case ARMISD::VZIP: return "ARMISD::VZIP"; 867 case ARMISD::VUZP: return "ARMISD::VUZP"; 868 case ARMISD::VTRN: return "ARMISD::VTRN"; 869 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 870 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 871 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 872 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 873 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 874 case ARMISD::FMAX: return "ARMISD::FMAX"; 875 case ARMISD::FMIN: return "ARMISD::FMIN"; 876 case ARMISD::BFI: return "ARMISD::BFI"; 877 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 878 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 879 case ARMISD::VBSL: return "ARMISD::VBSL"; 880 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 881 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 882 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 883 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 884 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 885 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 886 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 887 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 888 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 889 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 890 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 891 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 892 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 893 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 894 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 895 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 896 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 897 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 898 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 899 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 900 } 901} 902 903/// getRegClassFor - Return the register class that should be used for the 904/// specified value type. 905TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 906 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 907 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 908 // load / store 4 to 8 consecutive D registers. 909 if (Subtarget->hasNEON()) { 910 if (VT == MVT::v4i64) 911 return ARM::QQPRRegisterClass; 912 else if (VT == MVT::v8i64) 913 return ARM::QQQQPRRegisterClass; 914 } 915 return TargetLowering::getRegClassFor(VT); 916} 917 918// Create a fast isel object. 919FastISel * 920ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 921 return ARM::createFastISel(funcInfo); 922} 923 924/// getFunctionAlignment - Return the Log2 alignment of this function. 925unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 926 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2; 927} 928 929/// getMaximalGlobalOffset - Returns the maximal possible offset which can 930/// be used for loads / stores from the global. 931unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 932 return (Subtarget->isThumb1Only() ? 127 : 4095); 933} 934 935Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 936 unsigned NumVals = N->getNumValues(); 937 if (!NumVals) 938 return Sched::RegPressure; 939 940 for (unsigned i = 0; i != NumVals; ++i) { 941 EVT VT = N->getValueType(i); 942 if (VT == MVT::Glue || VT == MVT::Other) 943 continue; 944 if (VT.isFloatingPoint() || VT.isVector()) 945 return Sched::Latency; 946 } 947 948 if (!N->isMachineOpcode()) 949 return Sched::RegPressure; 950 951 // Load are scheduled for latency even if there instruction itinerary 952 // is not available. 953 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 954 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 955 956 if (TID.getNumDefs() == 0) 957 return Sched::RegPressure; 958 if (!Itins->isEmpty() && 959 Itins->getOperandCycle(TID.getSchedClass(), 0) > 2) 960 return Sched::Latency; 961 962 return Sched::RegPressure; 963} 964 965//===----------------------------------------------------------------------===// 966// Lowering Code 967//===----------------------------------------------------------------------===// 968 969/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 970static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 971 switch (CC) { 972 default: llvm_unreachable("Unknown condition code!"); 973 case ISD::SETNE: return ARMCC::NE; 974 case ISD::SETEQ: return ARMCC::EQ; 975 case ISD::SETGT: return ARMCC::GT; 976 case ISD::SETGE: return ARMCC::GE; 977 case ISD::SETLT: return ARMCC::LT; 978 case ISD::SETLE: return ARMCC::LE; 979 case ISD::SETUGT: return ARMCC::HI; 980 case ISD::SETUGE: return ARMCC::HS; 981 case ISD::SETULT: return ARMCC::LO; 982 case ISD::SETULE: return ARMCC::LS; 983 } 984} 985 986/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 987static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 988 ARMCC::CondCodes &CondCode2) { 989 CondCode2 = ARMCC::AL; 990 switch (CC) { 991 default: llvm_unreachable("Unknown FP condition!"); 992 case ISD::SETEQ: 993 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 994 case ISD::SETGT: 995 case ISD::SETOGT: CondCode = ARMCC::GT; break; 996 case ISD::SETGE: 997 case ISD::SETOGE: CondCode = ARMCC::GE; break; 998 case ISD::SETOLT: CondCode = ARMCC::MI; break; 999 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1000 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1001 case ISD::SETO: CondCode = ARMCC::VC; break; 1002 case ISD::SETUO: CondCode = ARMCC::VS; break; 1003 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1004 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1005 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1006 case ISD::SETLT: 1007 case ISD::SETULT: CondCode = ARMCC::LT; break; 1008 case ISD::SETLE: 1009 case ISD::SETULE: CondCode = ARMCC::LE; break; 1010 case ISD::SETNE: 1011 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1012 } 1013} 1014 1015//===----------------------------------------------------------------------===// 1016// Calling Convention Implementation 1017//===----------------------------------------------------------------------===// 1018 1019#include "ARMGenCallingConv.inc" 1020 1021/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1022/// given CallingConvention value. 1023CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1024 bool Return, 1025 bool isVarArg) const { 1026 switch (CC) { 1027 default: 1028 llvm_unreachable("Unsupported calling convention"); 1029 case CallingConv::Fast: 1030 if (Subtarget->hasVFP2() && !isVarArg) { 1031 if (!Subtarget->isAAPCS_ABI()) 1032 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1033 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1034 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1035 } 1036 // Fallthrough 1037 case CallingConv::C: { 1038 // Use target triple & subtarget features to do actual dispatch. 1039 if (!Subtarget->isAAPCS_ABI()) 1040 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1041 else if (Subtarget->hasVFP2() && 1042 FloatABIType == FloatABI::Hard && !isVarArg) 1043 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1044 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1045 } 1046 case CallingConv::ARM_AAPCS_VFP: 1047 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1048 case CallingConv::ARM_AAPCS: 1049 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1050 case CallingConv::ARM_APCS: 1051 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1052 } 1053} 1054 1055/// LowerCallResult - Lower the result values of a call into the 1056/// appropriate copies out of appropriate physical registers. 1057SDValue 1058ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1059 CallingConv::ID CallConv, bool isVarArg, 1060 const SmallVectorImpl<ISD::InputArg> &Ins, 1061 DebugLoc dl, SelectionDAG &DAG, 1062 SmallVectorImpl<SDValue> &InVals) const { 1063 1064 // Assign locations to each value returned by this call. 1065 SmallVector<CCValAssign, 16> RVLocs; 1066 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 1067 RVLocs, *DAG.getContext()); 1068 CCInfo.AnalyzeCallResult(Ins, 1069 CCAssignFnForNode(CallConv, /* Return*/ true, 1070 isVarArg)); 1071 1072 // Copy all of the result registers out of their specified physreg. 1073 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1074 CCValAssign VA = RVLocs[i]; 1075 1076 SDValue Val; 1077 if (VA.needsCustom()) { 1078 // Handle f64 or half of a v2f64. 1079 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1080 InFlag); 1081 Chain = Lo.getValue(1); 1082 InFlag = Lo.getValue(2); 1083 VA = RVLocs[++i]; // skip ahead to next loc 1084 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1085 InFlag); 1086 Chain = Hi.getValue(1); 1087 InFlag = Hi.getValue(2); 1088 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1089 1090 if (VA.getLocVT() == MVT::v2f64) { 1091 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1092 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1093 DAG.getConstant(0, MVT::i32)); 1094 1095 VA = RVLocs[++i]; // skip ahead to next loc 1096 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1097 Chain = Lo.getValue(1); 1098 InFlag = Lo.getValue(2); 1099 VA = RVLocs[++i]; // skip ahead to next loc 1100 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1101 Chain = Hi.getValue(1); 1102 InFlag = Hi.getValue(2); 1103 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1104 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1105 DAG.getConstant(1, MVT::i32)); 1106 } 1107 } else { 1108 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1109 InFlag); 1110 Chain = Val.getValue(1); 1111 InFlag = Val.getValue(2); 1112 } 1113 1114 switch (VA.getLocInfo()) { 1115 default: llvm_unreachable("Unknown loc info!"); 1116 case CCValAssign::Full: break; 1117 case CCValAssign::BCvt: 1118 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1119 break; 1120 } 1121 1122 InVals.push_back(Val); 1123 } 1124 1125 return Chain; 1126} 1127 1128/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1129/// by "Src" to address "Dst" of size "Size". Alignment information is 1130/// specified by the specific parameter attribute. The copy will be passed as 1131/// a byval function parameter. 1132/// Sometimes what we are copying is the end of a larger object, the part that 1133/// does not fit in registers. 1134static SDValue 1135CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1136 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1137 DebugLoc dl) { 1138 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1139 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1140 /*isVolatile=*/false, /*AlwaysInline=*/false, 1141 MachinePointerInfo(0), MachinePointerInfo(0)); 1142} 1143 1144/// LowerMemOpCallTo - Store the argument to the stack. 1145SDValue 1146ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1147 SDValue StackPtr, SDValue Arg, 1148 DebugLoc dl, SelectionDAG &DAG, 1149 const CCValAssign &VA, 1150 ISD::ArgFlagsTy Flags) const { 1151 unsigned LocMemOffset = VA.getLocMemOffset(); 1152 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1153 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1154 if (Flags.isByVal()) 1155 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1156 1157 return DAG.getStore(Chain, dl, Arg, PtrOff, 1158 MachinePointerInfo::getStack(LocMemOffset), 1159 false, false, 0); 1160} 1161 1162void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1163 SDValue Chain, SDValue &Arg, 1164 RegsToPassVector &RegsToPass, 1165 CCValAssign &VA, CCValAssign &NextVA, 1166 SDValue &StackPtr, 1167 SmallVector<SDValue, 8> &MemOpChains, 1168 ISD::ArgFlagsTy Flags) const { 1169 1170 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1171 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1172 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1173 1174 if (NextVA.isRegLoc()) 1175 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1176 else { 1177 assert(NextVA.isMemLoc()); 1178 if (StackPtr.getNode() == 0) 1179 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1180 1181 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1182 dl, DAG, NextVA, 1183 Flags)); 1184 } 1185} 1186 1187/// LowerCall - Lowering a call into a callseq_start <- 1188/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1189/// nodes. 1190SDValue 1191ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1192 CallingConv::ID CallConv, bool isVarArg, 1193 bool &isTailCall, 1194 const SmallVectorImpl<ISD::OutputArg> &Outs, 1195 const SmallVectorImpl<SDValue> &OutVals, 1196 const SmallVectorImpl<ISD::InputArg> &Ins, 1197 DebugLoc dl, SelectionDAG &DAG, 1198 SmallVectorImpl<SDValue> &InVals) const { 1199 MachineFunction &MF = DAG.getMachineFunction(); 1200 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1201 bool IsSibCall = false; 1202 // Temporarily disable tail calls so things don't break. 1203 if (!EnableARMTailCalls) 1204 isTailCall = false; 1205 if (isTailCall) { 1206 // Check if it's really possible to do a tail call. 1207 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1208 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1209 Outs, OutVals, Ins, DAG); 1210 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1211 // detected sibcalls. 1212 if (isTailCall) { 1213 ++NumTailCalls; 1214 IsSibCall = true; 1215 } 1216 } 1217 1218 // Analyze operands of the call, assigning locations to each operand. 1219 SmallVector<CCValAssign, 16> ArgLocs; 1220 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1221 *DAG.getContext()); 1222 CCInfo.AnalyzeCallOperands(Outs, 1223 CCAssignFnForNode(CallConv, /* Return*/ false, 1224 isVarArg)); 1225 1226 // Get a count of how many bytes are to be pushed on the stack. 1227 unsigned NumBytes = CCInfo.getNextStackOffset(); 1228 1229 // For tail calls, memory operands are available in our caller's stack. 1230 if (IsSibCall) 1231 NumBytes = 0; 1232 1233 // Adjust the stack pointer for the new arguments... 1234 // These operations are automatically eliminated by the prolog/epilog pass 1235 if (!IsSibCall) 1236 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1237 1238 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1239 1240 RegsToPassVector RegsToPass; 1241 SmallVector<SDValue, 8> MemOpChains; 1242 1243 // Walk the register/memloc assignments, inserting copies/loads. In the case 1244 // of tail call optimization, arguments are handled later. 1245 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1246 i != e; 1247 ++i, ++realArgIdx) { 1248 CCValAssign &VA = ArgLocs[i]; 1249 SDValue Arg = OutVals[realArgIdx]; 1250 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1251 bool isByVal = Flags.isByVal(); 1252 1253 // Promote the value if needed. 1254 switch (VA.getLocInfo()) { 1255 default: llvm_unreachable("Unknown loc info!"); 1256 case CCValAssign::Full: break; 1257 case CCValAssign::SExt: 1258 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1259 break; 1260 case CCValAssign::ZExt: 1261 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1262 break; 1263 case CCValAssign::AExt: 1264 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1265 break; 1266 case CCValAssign::BCvt: 1267 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1268 break; 1269 } 1270 1271 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1272 if (VA.needsCustom()) { 1273 if (VA.getLocVT() == MVT::v2f64) { 1274 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1275 DAG.getConstant(0, MVT::i32)); 1276 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1277 DAG.getConstant(1, MVT::i32)); 1278 1279 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1280 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1281 1282 VA = ArgLocs[++i]; // skip ahead to next loc 1283 if (VA.isRegLoc()) { 1284 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1285 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1286 } else { 1287 assert(VA.isMemLoc()); 1288 1289 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1290 dl, DAG, VA, Flags)); 1291 } 1292 } else { 1293 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1294 StackPtr, MemOpChains, Flags); 1295 } 1296 } else if (VA.isRegLoc()) { 1297 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1298 } else if (!IsSibCall || isByVal) { 1299 assert(VA.isMemLoc()); 1300 1301 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1302 dl, DAG, VA, Flags)); 1303 } 1304 } 1305 1306 if (!MemOpChains.empty()) 1307 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1308 &MemOpChains[0], MemOpChains.size()); 1309 1310 // Build a sequence of copy-to-reg nodes chained together with token chain 1311 // and flag operands which copy the outgoing args into the appropriate regs. 1312 SDValue InFlag; 1313 // Tail call byval lowering might overwrite argument registers so in case of 1314 // tail call optimization the copies to registers are lowered later. 1315 if (!isTailCall) 1316 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1317 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1318 RegsToPass[i].second, InFlag); 1319 InFlag = Chain.getValue(1); 1320 } 1321 1322 // For tail calls lower the arguments to the 'real' stack slot. 1323 if (isTailCall) { 1324 // Force all the incoming stack arguments to be loaded from the stack 1325 // before any new outgoing arguments are stored to the stack, because the 1326 // outgoing stack slots may alias the incoming argument stack slots, and 1327 // the alias isn't otherwise explicit. This is slightly more conservative 1328 // than necessary, because it means that each store effectively depends 1329 // on every argument instead of just those arguments it would clobber. 1330 1331 // Do not flag preceeding copytoreg stuff together with the following stuff. 1332 InFlag = SDValue(); 1333 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1334 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1335 RegsToPass[i].second, InFlag); 1336 InFlag = Chain.getValue(1); 1337 } 1338 InFlag =SDValue(); 1339 } 1340 1341 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1342 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1343 // node so that legalize doesn't hack it. 1344 bool isDirect = false; 1345 bool isARMFunc = false; 1346 bool isLocalARMFunc = false; 1347 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1348 1349 if (EnableARMLongCalls) { 1350 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1351 && "long-calls with non-static relocation model!"); 1352 // Handle a global address or an external symbol. If it's not one of 1353 // those, the target's already in a register, so we don't need to do 1354 // anything extra. 1355 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1356 const GlobalValue *GV = G->getGlobal(); 1357 // Create a constant pool entry for the callee address 1358 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1359 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1360 ARMPCLabelIndex, 1361 ARMCP::CPValue, 0); 1362 // Get the address of the callee into a register 1363 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1364 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1365 Callee = DAG.getLoad(getPointerTy(), dl, 1366 DAG.getEntryNode(), CPAddr, 1367 MachinePointerInfo::getConstantPool(), 1368 false, false, 0); 1369 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1370 const char *Sym = S->getSymbol(); 1371 1372 // Create a constant pool entry for the callee address 1373 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1374 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1375 Sym, ARMPCLabelIndex, 0); 1376 // Get the address of the callee into a register 1377 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1378 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1379 Callee = DAG.getLoad(getPointerTy(), dl, 1380 DAG.getEntryNode(), CPAddr, 1381 MachinePointerInfo::getConstantPool(), 1382 false, false, 0); 1383 } 1384 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1385 const GlobalValue *GV = G->getGlobal(); 1386 isDirect = true; 1387 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1388 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1389 getTargetMachine().getRelocationModel() != Reloc::Static; 1390 isARMFunc = !Subtarget->isThumb() || isStub; 1391 // ARM call to a local ARM function is predicable. 1392 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1393 // tBX takes a register source operand. 1394 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1395 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1396 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1397 ARMPCLabelIndex, 1398 ARMCP::CPValue, 4); 1399 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1400 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1401 Callee = DAG.getLoad(getPointerTy(), dl, 1402 DAG.getEntryNode(), CPAddr, 1403 MachinePointerInfo::getConstantPool(), 1404 false, false, 0); 1405 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1406 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1407 getPointerTy(), Callee, PICLabel); 1408 } else { 1409 // On ELF targets for PIC code, direct calls should go through the PLT 1410 unsigned OpFlags = 0; 1411 if (Subtarget->isTargetELF() && 1412 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1413 OpFlags = ARMII::MO_PLT; 1414 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1415 } 1416 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1417 isDirect = true; 1418 bool isStub = Subtarget->isTargetDarwin() && 1419 getTargetMachine().getRelocationModel() != Reloc::Static; 1420 isARMFunc = !Subtarget->isThumb() || isStub; 1421 // tBX takes a register source operand. 1422 const char *Sym = S->getSymbol(); 1423 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1424 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1425 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1426 Sym, ARMPCLabelIndex, 4); 1427 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1428 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1429 Callee = DAG.getLoad(getPointerTy(), dl, 1430 DAG.getEntryNode(), CPAddr, 1431 MachinePointerInfo::getConstantPool(), 1432 false, false, 0); 1433 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1434 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1435 getPointerTy(), Callee, PICLabel); 1436 } else { 1437 unsigned OpFlags = 0; 1438 // On ELF targets for PIC code, direct calls should go through the PLT 1439 if (Subtarget->isTargetELF() && 1440 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1441 OpFlags = ARMII::MO_PLT; 1442 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1443 } 1444 } 1445 1446 // FIXME: handle tail calls differently. 1447 unsigned CallOpc; 1448 if (Subtarget->isThumb()) { 1449 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1450 CallOpc = ARMISD::CALL_NOLINK; 1451 else 1452 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1453 } else { 1454 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1455 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1456 : ARMISD::CALL_NOLINK; 1457 } 1458 1459 std::vector<SDValue> Ops; 1460 Ops.push_back(Chain); 1461 Ops.push_back(Callee); 1462 1463 // Add argument registers to the end of the list so that they are known live 1464 // into the call. 1465 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1466 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1467 RegsToPass[i].second.getValueType())); 1468 1469 if (InFlag.getNode()) 1470 Ops.push_back(InFlag); 1471 1472 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1473 if (isTailCall) 1474 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1475 1476 // Returns a chain and a flag for retval copy to use. 1477 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1478 InFlag = Chain.getValue(1); 1479 1480 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1481 DAG.getIntPtrConstant(0, true), InFlag); 1482 if (!Ins.empty()) 1483 InFlag = Chain.getValue(1); 1484 1485 // Handle result values, copying them out of physregs into vregs that we 1486 // return. 1487 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1488 dl, DAG, InVals); 1489} 1490 1491/// HandleByVal - Every parameter *after* a byval parameter is passed 1492/// on the stack. Confiscate all the parameter registers to insure 1493/// this. 1494void 1495llvm::ARMTargetLowering::HandleByVal(CCState *State) const { 1496 static const unsigned RegList1[] = { 1497 ARM::R0, ARM::R1, ARM::R2, ARM::R3 1498 }; 1499 do {} while (State->AllocateReg(RegList1, 4)); 1500} 1501 1502/// MatchingStackOffset - Return true if the given stack call argument is 1503/// already available in the same position (relatively) of the caller's 1504/// incoming argument stack. 1505static 1506bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1507 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1508 const ARMInstrInfo *TII) { 1509 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1510 int FI = INT_MAX; 1511 if (Arg.getOpcode() == ISD::CopyFromReg) { 1512 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1513 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1514 return false; 1515 MachineInstr *Def = MRI->getVRegDef(VR); 1516 if (!Def) 1517 return false; 1518 if (!Flags.isByVal()) { 1519 if (!TII->isLoadFromStackSlot(Def, FI)) 1520 return false; 1521 } else { 1522 return false; 1523 } 1524 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1525 if (Flags.isByVal()) 1526 // ByVal argument is passed in as a pointer but it's now being 1527 // dereferenced. e.g. 1528 // define @foo(%struct.X* %A) { 1529 // tail call @bar(%struct.X* byval %A) 1530 // } 1531 return false; 1532 SDValue Ptr = Ld->getBasePtr(); 1533 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1534 if (!FINode) 1535 return false; 1536 FI = FINode->getIndex(); 1537 } else 1538 return false; 1539 1540 assert(FI != INT_MAX); 1541 if (!MFI->isFixedObjectIndex(FI)) 1542 return false; 1543 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1544} 1545 1546/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1547/// for tail call optimization. Targets which want to do tail call 1548/// optimization should implement this function. 1549bool 1550ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1551 CallingConv::ID CalleeCC, 1552 bool isVarArg, 1553 bool isCalleeStructRet, 1554 bool isCallerStructRet, 1555 const SmallVectorImpl<ISD::OutputArg> &Outs, 1556 const SmallVectorImpl<SDValue> &OutVals, 1557 const SmallVectorImpl<ISD::InputArg> &Ins, 1558 SelectionDAG& DAG) const { 1559 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1560 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1561 bool CCMatch = CallerCC == CalleeCC; 1562 1563 // Look for obvious safe cases to perform tail call optimization that do not 1564 // require ABI changes. This is what gcc calls sibcall. 1565 1566 // Do not sibcall optimize vararg calls unless the call site is not passing 1567 // any arguments. 1568 if (isVarArg && !Outs.empty()) 1569 return false; 1570 1571 // Also avoid sibcall optimization if either caller or callee uses struct 1572 // return semantics. 1573 if (isCalleeStructRet || isCallerStructRet) 1574 return false; 1575 1576 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1577 // emitEpilogue is not ready for them. 1578 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1579 // LR. This means if we need to reload LR, it takes an extra instructions, 1580 // which outweighs the value of the tail call; but here we don't know yet 1581 // whether LR is going to be used. Probably the right approach is to 1582 // generate the tail call here and turn it back into CALL/RET in 1583 // emitEpilogue if LR is used. 1584 1585 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1586 // but we need to make sure there are enough registers; the only valid 1587 // registers are the 4 used for parameters. We don't currently do this 1588 // case. 1589 if (Subtarget->isThumb1Only()) 1590 return false; 1591 1592 // If the calling conventions do not match, then we'd better make sure the 1593 // results are returned in the same way as what the caller expects. 1594 if (!CCMatch) { 1595 SmallVector<CCValAssign, 16> RVLocs1; 1596 CCState CCInfo1(CalleeCC, false, getTargetMachine(), 1597 RVLocs1, *DAG.getContext()); 1598 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1599 1600 SmallVector<CCValAssign, 16> RVLocs2; 1601 CCState CCInfo2(CallerCC, false, getTargetMachine(), 1602 RVLocs2, *DAG.getContext()); 1603 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1604 1605 if (RVLocs1.size() != RVLocs2.size()) 1606 return false; 1607 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1608 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1609 return false; 1610 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1611 return false; 1612 if (RVLocs1[i].isRegLoc()) { 1613 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1614 return false; 1615 } else { 1616 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1617 return false; 1618 } 1619 } 1620 } 1621 1622 // If the callee takes no arguments then go on to check the results of the 1623 // call. 1624 if (!Outs.empty()) { 1625 // Check if stack adjustment is needed. For now, do not do this if any 1626 // argument is passed on the stack. 1627 SmallVector<CCValAssign, 16> ArgLocs; 1628 CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(), 1629 ArgLocs, *DAG.getContext()); 1630 CCInfo.AnalyzeCallOperands(Outs, 1631 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1632 if (CCInfo.getNextStackOffset()) { 1633 MachineFunction &MF = DAG.getMachineFunction(); 1634 1635 // Check if the arguments are already laid out in the right way as 1636 // the caller's fixed stack objects. 1637 MachineFrameInfo *MFI = MF.getFrameInfo(); 1638 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1639 const ARMInstrInfo *TII = 1640 ((ARMTargetMachine&)getTargetMachine()).getInstrInfo(); 1641 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1642 i != e; 1643 ++i, ++realArgIdx) { 1644 CCValAssign &VA = ArgLocs[i]; 1645 EVT RegVT = VA.getLocVT(); 1646 SDValue Arg = OutVals[realArgIdx]; 1647 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1648 if (VA.getLocInfo() == CCValAssign::Indirect) 1649 return false; 1650 if (VA.needsCustom()) { 1651 // f64 and vector types are split into multiple registers or 1652 // register/stack-slot combinations. The types will not match 1653 // the registers; give up on memory f64 refs until we figure 1654 // out what to do about this. 1655 if (!VA.isRegLoc()) 1656 return false; 1657 if (!ArgLocs[++i].isRegLoc()) 1658 return false; 1659 if (RegVT == MVT::v2f64) { 1660 if (!ArgLocs[++i].isRegLoc()) 1661 return false; 1662 if (!ArgLocs[++i].isRegLoc()) 1663 return false; 1664 } 1665 } else if (!VA.isRegLoc()) { 1666 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1667 MFI, MRI, TII)) 1668 return false; 1669 } 1670 } 1671 } 1672 } 1673 1674 return true; 1675} 1676 1677SDValue 1678ARMTargetLowering::LowerReturn(SDValue Chain, 1679 CallingConv::ID CallConv, bool isVarArg, 1680 const SmallVectorImpl<ISD::OutputArg> &Outs, 1681 const SmallVectorImpl<SDValue> &OutVals, 1682 DebugLoc dl, SelectionDAG &DAG) const { 1683 1684 // CCValAssign - represent the assignment of the return value to a location. 1685 SmallVector<CCValAssign, 16> RVLocs; 1686 1687 // CCState - Info about the registers and stack slots. 1688 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1689 *DAG.getContext()); 1690 1691 // Analyze outgoing return values. 1692 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1693 isVarArg)); 1694 1695 // If this is the first return lowered for this function, add 1696 // the regs to the liveout set for the function. 1697 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1698 for (unsigned i = 0; i != RVLocs.size(); ++i) 1699 if (RVLocs[i].isRegLoc()) 1700 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1701 } 1702 1703 SDValue Flag; 1704 1705 // Copy the result values into the output registers. 1706 for (unsigned i = 0, realRVLocIdx = 0; 1707 i != RVLocs.size(); 1708 ++i, ++realRVLocIdx) { 1709 CCValAssign &VA = RVLocs[i]; 1710 assert(VA.isRegLoc() && "Can only return in registers!"); 1711 1712 SDValue Arg = OutVals[realRVLocIdx]; 1713 1714 switch (VA.getLocInfo()) { 1715 default: llvm_unreachable("Unknown loc info!"); 1716 case CCValAssign::Full: break; 1717 case CCValAssign::BCvt: 1718 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1719 break; 1720 } 1721 1722 if (VA.needsCustom()) { 1723 if (VA.getLocVT() == MVT::v2f64) { 1724 // Extract the first half and return it in two registers. 1725 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1726 DAG.getConstant(0, MVT::i32)); 1727 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1728 DAG.getVTList(MVT::i32, MVT::i32), Half); 1729 1730 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1731 Flag = Chain.getValue(1); 1732 VA = RVLocs[++i]; // skip ahead to next loc 1733 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1734 HalfGPRs.getValue(1), Flag); 1735 Flag = Chain.getValue(1); 1736 VA = RVLocs[++i]; // skip ahead to next loc 1737 1738 // Extract the 2nd half and fall through to handle it as an f64 value. 1739 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1740 DAG.getConstant(1, MVT::i32)); 1741 } 1742 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1743 // available. 1744 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1745 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1746 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1747 Flag = Chain.getValue(1); 1748 VA = RVLocs[++i]; // skip ahead to next loc 1749 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1750 Flag); 1751 } else 1752 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1753 1754 // Guarantee that all emitted copies are 1755 // stuck together, avoiding something bad. 1756 Flag = Chain.getValue(1); 1757 } 1758 1759 SDValue result; 1760 if (Flag.getNode()) 1761 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1762 else // Return Void 1763 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1764 1765 return result; 1766} 1767 1768bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const { 1769 if (N->getNumValues() != 1) 1770 return false; 1771 if (!N->hasNUsesOfValue(1, 0)) 1772 return false; 1773 1774 unsigned NumCopies = 0; 1775 SDNode* Copies[2]; 1776 SDNode *Use = *N->use_begin(); 1777 if (Use->getOpcode() == ISD::CopyToReg) { 1778 Copies[NumCopies++] = Use; 1779 } else if (Use->getOpcode() == ARMISD::VMOVRRD) { 1780 // f64 returned in a pair of GPRs. 1781 for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end(); 1782 UI != UE; ++UI) { 1783 if (UI->getOpcode() != ISD::CopyToReg) 1784 return false; 1785 Copies[UI.getUse().getResNo()] = *UI; 1786 ++NumCopies; 1787 } 1788 } else if (Use->getOpcode() == ISD::BITCAST) { 1789 // f32 returned in a single GPR. 1790 if (!Use->hasNUsesOfValue(1, 0)) 1791 return false; 1792 Use = *Use->use_begin(); 1793 if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0)) 1794 return false; 1795 Copies[NumCopies++] = Use; 1796 } else { 1797 return false; 1798 } 1799 1800 if (NumCopies != 1 && NumCopies != 2) 1801 return false; 1802 1803 bool HasRet = false; 1804 for (unsigned i = 0; i < NumCopies; ++i) { 1805 SDNode *Copy = Copies[i]; 1806 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1807 UI != UE; ++UI) { 1808 if (UI->getOpcode() == ISD::CopyToReg) { 1809 SDNode *Use = *UI; 1810 if (Use == Copies[0] || Use == Copies[1]) 1811 continue; 1812 return false; 1813 } 1814 if (UI->getOpcode() != ARMISD::RET_FLAG) 1815 return false; 1816 HasRet = true; 1817 } 1818 } 1819 1820 return HasRet; 1821} 1822 1823bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1824 if (!EnableARMTailCalls) 1825 return false; 1826 1827 if (!CI->isTailCall()) 1828 return false; 1829 1830 return !Subtarget->isThumb1Only(); 1831} 1832 1833// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1834// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1835// one of the above mentioned nodes. It has to be wrapped because otherwise 1836// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1837// be used to form addressing mode. These wrapped nodes will be selected 1838// into MOVi. 1839static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1840 EVT PtrVT = Op.getValueType(); 1841 // FIXME there is no actual debug info here 1842 DebugLoc dl = Op.getDebugLoc(); 1843 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1844 SDValue Res; 1845 if (CP->isMachineConstantPoolEntry()) 1846 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1847 CP->getAlignment()); 1848 else 1849 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1850 CP->getAlignment()); 1851 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1852} 1853 1854unsigned ARMTargetLowering::getJumpTableEncoding() const { 1855 return MachineJumpTableInfo::EK_Inline; 1856} 1857 1858SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1859 SelectionDAG &DAG) const { 1860 MachineFunction &MF = DAG.getMachineFunction(); 1861 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1862 unsigned ARMPCLabelIndex = 0; 1863 DebugLoc DL = Op.getDebugLoc(); 1864 EVT PtrVT = getPointerTy(); 1865 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1866 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1867 SDValue CPAddr; 1868 if (RelocM == Reloc::Static) { 1869 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1870 } else { 1871 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1872 ARMPCLabelIndex = AFI->createPICLabelUId(); 1873 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1874 ARMCP::CPBlockAddress, 1875 PCAdj); 1876 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1877 } 1878 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1879 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1880 MachinePointerInfo::getConstantPool(), 1881 false, false, 0); 1882 if (RelocM == Reloc::Static) 1883 return Result; 1884 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1885 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1886} 1887 1888// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1889SDValue 1890ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1891 SelectionDAG &DAG) const { 1892 DebugLoc dl = GA->getDebugLoc(); 1893 EVT PtrVT = getPointerTy(); 1894 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1895 MachineFunction &MF = DAG.getMachineFunction(); 1896 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1897 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1898 ARMConstantPoolValue *CPV = 1899 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1900 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 1901 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1902 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1903 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1904 MachinePointerInfo::getConstantPool(), 1905 false, false, 0); 1906 SDValue Chain = Argument.getValue(1); 1907 1908 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1909 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1910 1911 // call __tls_get_addr. 1912 ArgListTy Args; 1913 ArgListEntry Entry; 1914 Entry.Node = Argument; 1915 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1916 Args.push_back(Entry); 1917 // FIXME: is there useful debug info available here? 1918 std::pair<SDValue, SDValue> CallResult = 1919 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1920 false, false, false, false, 1921 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1922 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1923 return CallResult.first; 1924} 1925 1926// Lower ISD::GlobalTLSAddress using the "initial exec" or 1927// "local exec" model. 1928SDValue 1929ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1930 SelectionDAG &DAG) const { 1931 const GlobalValue *GV = GA->getGlobal(); 1932 DebugLoc dl = GA->getDebugLoc(); 1933 SDValue Offset; 1934 SDValue Chain = DAG.getEntryNode(); 1935 EVT PtrVT = getPointerTy(); 1936 // Get the Thread Pointer 1937 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1938 1939 if (GV->isDeclaration()) { 1940 MachineFunction &MF = DAG.getMachineFunction(); 1941 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1942 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1943 // Initial exec model. 1944 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1945 ARMConstantPoolValue *CPV = 1946 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1947 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, true); 1948 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1949 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1950 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1951 MachinePointerInfo::getConstantPool(), 1952 false, false, 0); 1953 Chain = Offset.getValue(1); 1954 1955 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1956 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1957 1958 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1959 MachinePointerInfo::getConstantPool(), 1960 false, false, 0); 1961 } else { 1962 // local exec model 1963 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::TPOFF); 1964 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1965 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1966 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1967 MachinePointerInfo::getConstantPool(), 1968 false, false, 0); 1969 } 1970 1971 // The address of the thread local variable is the add of the thread 1972 // pointer with the offset of the variable. 1973 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1974} 1975 1976SDValue 1977ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1978 // TODO: implement the "local dynamic" model 1979 assert(Subtarget->isTargetELF() && 1980 "TLS not implemented for non-ELF targets"); 1981 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1982 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1983 // otherwise use the "Local Exec" TLS Model 1984 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1985 return LowerToTLSGeneralDynamicModel(GA, DAG); 1986 else 1987 return LowerToTLSExecModels(GA, DAG); 1988} 1989 1990SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1991 SelectionDAG &DAG) const { 1992 EVT PtrVT = getPointerTy(); 1993 DebugLoc dl = Op.getDebugLoc(); 1994 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1995 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1996 if (RelocM == Reloc::PIC_) { 1997 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1998 ARMConstantPoolValue *CPV = 1999 new ARMConstantPoolValue(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2000 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2001 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2002 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2003 CPAddr, 2004 MachinePointerInfo::getConstantPool(), 2005 false, false, 0); 2006 SDValue Chain = Result.getValue(1); 2007 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2008 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2009 if (!UseGOTOFF) 2010 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2011 MachinePointerInfo::getGOT(), false, false, 0); 2012 return Result; 2013 } 2014 2015 // If we have T2 ops, we can materialize the address directly via movt/movw 2016 // pair. This is always cheaper. 2017 if (Subtarget->useMovt()) { 2018 ++NumMovwMovt; 2019 // FIXME: Once remat is capable of dealing with instructions with register 2020 // operands, expand this into two nodes. 2021 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2022 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2023 } else { 2024 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2025 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2026 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2027 MachinePointerInfo::getConstantPool(), 2028 false, false, 0); 2029 } 2030} 2031 2032SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2033 SelectionDAG &DAG) const { 2034 EVT PtrVT = getPointerTy(); 2035 DebugLoc dl = Op.getDebugLoc(); 2036 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2037 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2038 MachineFunction &MF = DAG.getMachineFunction(); 2039 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2040 2041 if (Subtarget->useMovt()) { 2042 ++NumMovwMovt; 2043 // FIXME: Once remat is capable of dealing with instructions with register 2044 // operands, expand this into two nodes. 2045 if (RelocM == Reloc::Static) 2046 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2047 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2048 2049 unsigned Wrapper = (RelocM == Reloc::PIC_) 2050 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2051 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2052 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2053 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2054 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2055 MachinePointerInfo::getGOT(), false, false, 0); 2056 return Result; 2057 } 2058 2059 unsigned ARMPCLabelIndex = 0; 2060 SDValue CPAddr; 2061 if (RelocM == Reloc::Static) { 2062 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2063 } else { 2064 ARMPCLabelIndex = AFI->createPICLabelUId(); 2065 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2066 ARMConstantPoolValue *CPV = 2067 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 2068 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2069 } 2070 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2071 2072 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2073 MachinePointerInfo::getConstantPool(), 2074 false, false, 0); 2075 SDValue Chain = Result.getValue(1); 2076 2077 if (RelocM == Reloc::PIC_) { 2078 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2079 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2080 } 2081 2082 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2083 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2084 false, false, 0); 2085 2086 return Result; 2087} 2088 2089SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2090 SelectionDAG &DAG) const { 2091 assert(Subtarget->isTargetELF() && 2092 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2093 MachineFunction &MF = DAG.getMachineFunction(); 2094 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2095 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2096 EVT PtrVT = getPointerTy(); 2097 DebugLoc dl = Op.getDebugLoc(); 2098 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2099 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 2100 "_GLOBAL_OFFSET_TABLE_", 2101 ARMPCLabelIndex, PCAdj); 2102 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2103 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2104 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2105 MachinePointerInfo::getConstantPool(), 2106 false, false, 0); 2107 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2108 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2109} 2110 2111SDValue 2112ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) 2113 const { 2114 DebugLoc dl = Op.getDebugLoc(); 2115 return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other, 2116 Op.getOperand(0)); 2117} 2118 2119SDValue 2120ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2121 DebugLoc dl = Op.getDebugLoc(); 2122 SDValue Val = DAG.getConstant(0, MVT::i32); 2123 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0), 2124 Op.getOperand(1), Val); 2125} 2126 2127SDValue 2128ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2129 DebugLoc dl = Op.getDebugLoc(); 2130 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2131 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2132} 2133 2134SDValue 2135ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2136 const ARMSubtarget *Subtarget) const { 2137 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2138 DebugLoc dl = Op.getDebugLoc(); 2139 switch (IntNo) { 2140 default: return SDValue(); // Don't custom lower most intrinsics. 2141 case Intrinsic::arm_thread_pointer: { 2142 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2143 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2144 } 2145 case Intrinsic::eh_sjlj_lsda: { 2146 MachineFunction &MF = DAG.getMachineFunction(); 2147 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2148 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2149 EVT PtrVT = getPointerTy(); 2150 DebugLoc dl = Op.getDebugLoc(); 2151 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2152 SDValue CPAddr; 2153 unsigned PCAdj = (RelocM != Reloc::PIC_) 2154 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2155 ARMConstantPoolValue *CPV = 2156 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 2157 ARMCP::CPLSDA, PCAdj); 2158 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2159 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2160 SDValue Result = 2161 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2162 MachinePointerInfo::getConstantPool(), 2163 false, false, 0); 2164 2165 if (RelocM == Reloc::PIC_) { 2166 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2167 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2168 } 2169 return Result; 2170 } 2171 case Intrinsic::arm_neon_vmulls: 2172 case Intrinsic::arm_neon_vmullu: { 2173 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2174 ? ARMISD::VMULLs : ARMISD::VMULLu; 2175 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2176 Op.getOperand(1), Op.getOperand(2)); 2177 } 2178 } 2179} 2180 2181static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 2182 const ARMSubtarget *Subtarget) { 2183 DebugLoc dl = Op.getDebugLoc(); 2184 if (!Subtarget->hasDataBarrier()) { 2185 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2186 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2187 // here. 2188 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2189 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2190 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2191 DAG.getConstant(0, MVT::i32)); 2192 } 2193 2194 SDValue Op5 = Op.getOperand(5); 2195 bool isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue() != 0; 2196 unsigned isLL = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2197 unsigned isLS = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2198 bool isOnlyStoreBarrier = (isLL == 0 && isLS == 0); 2199 2200 ARM_MB::MemBOpt DMBOpt; 2201 if (isDeviceBarrier) 2202 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ST : ARM_MB::SY; 2203 else 2204 DMBOpt = isOnlyStoreBarrier ? ARM_MB::ISHST : ARM_MB::ISH; 2205 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2206 DAG.getConstant(DMBOpt, MVT::i32)); 2207} 2208 2209static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2210 const ARMSubtarget *Subtarget) { 2211 // ARM pre v5TE and Thumb1 does not have preload instructions. 2212 if (!(Subtarget->isThumb2() || 2213 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2214 // Just preserve the chain. 2215 return Op.getOperand(0); 2216 2217 DebugLoc dl = Op.getDebugLoc(); 2218 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2219 if (!isRead && 2220 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2221 // ARMv7 with MP extension has PLDW. 2222 return Op.getOperand(0); 2223 2224 if (Subtarget->isThumb()) 2225 // Invert the bits. 2226 isRead = ~isRead & 1; 2227 unsigned isData = Subtarget->isThumb() ? 0 : 1; 2228 2229 // Currently there is no intrinsic that matches pli. 2230 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2231 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2232 DAG.getConstant(isData, MVT::i32)); 2233} 2234 2235static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2236 MachineFunction &MF = DAG.getMachineFunction(); 2237 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2238 2239 // vastart just stores the address of the VarArgsFrameIndex slot into the 2240 // memory location argument. 2241 DebugLoc dl = Op.getDebugLoc(); 2242 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2243 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2244 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2245 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2246 MachinePointerInfo(SV), false, false, 0); 2247} 2248 2249SDValue 2250ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2251 SDValue &Root, SelectionDAG &DAG, 2252 DebugLoc dl) const { 2253 MachineFunction &MF = DAG.getMachineFunction(); 2254 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2255 2256 TargetRegisterClass *RC; 2257 if (AFI->isThumb1OnlyFunction()) 2258 RC = ARM::tGPRRegisterClass; 2259 else 2260 RC = ARM::GPRRegisterClass; 2261 2262 // Transform the arguments stored in physical registers into virtual ones. 2263 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2264 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2265 2266 SDValue ArgValue2; 2267 if (NextVA.isMemLoc()) { 2268 MachineFrameInfo *MFI = MF.getFrameInfo(); 2269 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2270 2271 // Create load node to retrieve arguments from the stack. 2272 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2273 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2274 MachinePointerInfo::getFixedStack(FI), 2275 false, false, 0); 2276 } else { 2277 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2278 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2279 } 2280 2281 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2282} 2283 2284SDValue 2285ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2286 CallingConv::ID CallConv, bool isVarArg, 2287 const SmallVectorImpl<ISD::InputArg> 2288 &Ins, 2289 DebugLoc dl, SelectionDAG &DAG, 2290 SmallVectorImpl<SDValue> &InVals) 2291 const { 2292 2293 MachineFunction &MF = DAG.getMachineFunction(); 2294 MachineFrameInfo *MFI = MF.getFrameInfo(); 2295 2296 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2297 2298 // Assign locations to all of the incoming arguments. 2299 SmallVector<CCValAssign, 16> ArgLocs; 2300 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 2301 *DAG.getContext()); 2302 CCInfo.AnalyzeFormalArguments(Ins, 2303 CCAssignFnForNode(CallConv, /* Return*/ false, 2304 isVarArg)); 2305 2306 SmallVector<SDValue, 16> ArgValues; 2307 int lastInsIndex = -1; 2308 2309 SDValue ArgValue; 2310 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2311 CCValAssign &VA = ArgLocs[i]; 2312 2313 // Arguments stored in registers. 2314 if (VA.isRegLoc()) { 2315 EVT RegVT = VA.getLocVT(); 2316 2317 if (VA.needsCustom()) { 2318 // f64 and vector types are split up into multiple registers or 2319 // combinations of registers and stack slots. 2320 if (VA.getLocVT() == MVT::v2f64) { 2321 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2322 Chain, DAG, dl); 2323 VA = ArgLocs[++i]; // skip ahead to next loc 2324 SDValue ArgValue2; 2325 if (VA.isMemLoc()) { 2326 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2327 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2328 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2329 MachinePointerInfo::getFixedStack(FI), 2330 false, false, 0); 2331 } else { 2332 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2333 Chain, DAG, dl); 2334 } 2335 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2336 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2337 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2338 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2339 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2340 } else 2341 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2342 2343 } else { 2344 TargetRegisterClass *RC; 2345 2346 if (RegVT == MVT::f32) 2347 RC = ARM::SPRRegisterClass; 2348 else if (RegVT == MVT::f64) 2349 RC = ARM::DPRRegisterClass; 2350 else if (RegVT == MVT::v2f64) 2351 RC = ARM::QPRRegisterClass; 2352 else if (RegVT == MVT::i32) 2353 RC = (AFI->isThumb1OnlyFunction() ? 2354 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 2355 else 2356 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2357 2358 // Transform the arguments in physical registers into virtual ones. 2359 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2360 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2361 } 2362 2363 // If this is an 8 or 16-bit value, it is really passed promoted 2364 // to 32 bits. Insert an assert[sz]ext to capture this, then 2365 // truncate to the right size. 2366 switch (VA.getLocInfo()) { 2367 default: llvm_unreachable("Unknown loc info!"); 2368 case CCValAssign::Full: break; 2369 case CCValAssign::BCvt: 2370 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2371 break; 2372 case CCValAssign::SExt: 2373 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2374 DAG.getValueType(VA.getValVT())); 2375 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2376 break; 2377 case CCValAssign::ZExt: 2378 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2379 DAG.getValueType(VA.getValVT())); 2380 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2381 break; 2382 } 2383 2384 InVals.push_back(ArgValue); 2385 2386 } else { // VA.isRegLoc() 2387 2388 // sanity check 2389 assert(VA.isMemLoc()); 2390 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2391 2392 int index = ArgLocs[i].getValNo(); 2393 2394 // Some Ins[] entries become multiple ArgLoc[] entries. 2395 // Process them only once. 2396 if (index != lastInsIndex) 2397 { 2398 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2399 // FIXME: For now, all byval parameter objects are marked mutable. This can be 2400 // changed with more analysis. 2401 // In case of tail call optimization mark all arguments mutable. Since they 2402 // could be overwritten by lowering of arguments in case of a tail call. 2403 if (Flags.isByVal()) { 2404 unsigned Bytes = Flags.getByValSize(); 2405 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2406 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), false); 2407 InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); 2408 } else { 2409 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2410 VA.getLocMemOffset(), true); 2411 2412 // Create load nodes to retrieve arguments from the stack. 2413 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2414 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2415 MachinePointerInfo::getFixedStack(FI), 2416 false, false, 0)); 2417 } 2418 lastInsIndex = index; 2419 } 2420 } 2421 } 2422 2423 // varargs 2424 if (isVarArg) { 2425 static const unsigned GPRArgRegs[] = { 2426 ARM::R0, ARM::R1, ARM::R2, ARM::R3 2427 }; 2428 2429 unsigned NumGPRs = CCInfo.getFirstUnallocated 2430 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2431 2432 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2433 unsigned VARegSize = (4 - NumGPRs) * 4; 2434 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 2435 unsigned ArgOffset = CCInfo.getNextStackOffset(); 2436 if (VARegSaveSize) { 2437 // If this function is vararg, store any remaining integer argument regs 2438 // to their spots on the stack so that they may be loaded by deferencing 2439 // the result of va_next. 2440 AFI->setVarArgsRegSaveSize(VARegSaveSize); 2441 AFI->setVarArgsFrameIndex( 2442 MFI->CreateFixedObject(VARegSaveSize, 2443 ArgOffset + VARegSaveSize - VARegSize, 2444 false)); 2445 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 2446 getPointerTy()); 2447 2448 SmallVector<SDValue, 4> MemOps; 2449 for (; NumGPRs < 4; ++NumGPRs) { 2450 TargetRegisterClass *RC; 2451 if (AFI->isThumb1OnlyFunction()) 2452 RC = ARM::tGPRRegisterClass; 2453 else 2454 RC = ARM::GPRRegisterClass; 2455 2456 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 2457 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2458 SDValue Store = 2459 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2460 MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), 2461 false, false, 0); 2462 MemOps.push_back(Store); 2463 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2464 DAG.getConstant(4, getPointerTy())); 2465 } 2466 if (!MemOps.empty()) 2467 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2468 &MemOps[0], MemOps.size()); 2469 } else 2470 // This will point to the next argument passed via stack. 2471 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); 2472 } 2473 2474 return Chain; 2475} 2476 2477/// isFloatingPointZero - Return true if this is +0.0. 2478static bool isFloatingPointZero(SDValue Op) { 2479 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2480 return CFP->getValueAPF().isPosZero(); 2481 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2482 // Maybe this has already been legalized into the constant pool? 2483 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2484 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2485 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2486 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2487 return CFP->getValueAPF().isPosZero(); 2488 } 2489 } 2490 return false; 2491} 2492 2493/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2494/// the given operands. 2495SDValue 2496ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2497 SDValue &ARMcc, SelectionDAG &DAG, 2498 DebugLoc dl) const { 2499 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2500 unsigned C = RHSC->getZExtValue(); 2501 if (!isLegalICmpImmediate(C)) { 2502 // Constant does not fit, try adjusting it by one? 2503 switch (CC) { 2504 default: break; 2505 case ISD::SETLT: 2506 case ISD::SETGE: 2507 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2508 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2509 RHS = DAG.getConstant(C-1, MVT::i32); 2510 } 2511 break; 2512 case ISD::SETULT: 2513 case ISD::SETUGE: 2514 if (C != 0 && isLegalICmpImmediate(C-1)) { 2515 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2516 RHS = DAG.getConstant(C-1, MVT::i32); 2517 } 2518 break; 2519 case ISD::SETLE: 2520 case ISD::SETGT: 2521 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2522 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2523 RHS = DAG.getConstant(C+1, MVT::i32); 2524 } 2525 break; 2526 case ISD::SETULE: 2527 case ISD::SETUGT: 2528 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2529 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2530 RHS = DAG.getConstant(C+1, MVT::i32); 2531 } 2532 break; 2533 } 2534 } 2535 } 2536 2537 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2538 ARMISD::NodeType CompareType; 2539 switch (CondCode) { 2540 default: 2541 CompareType = ARMISD::CMP; 2542 break; 2543 case ARMCC::EQ: 2544 case ARMCC::NE: 2545 // Uses only Z Flag 2546 CompareType = ARMISD::CMPZ; 2547 break; 2548 } 2549 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2550 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2551} 2552 2553/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 2554SDValue 2555ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 2556 DebugLoc dl) const { 2557 SDValue Cmp; 2558 if (!isFloatingPointZero(RHS)) 2559 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 2560 else 2561 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 2562 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 2563} 2564 2565/// duplicateCmp - Glue values can have only one use, so this function 2566/// duplicates a comparison node. 2567SDValue 2568ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 2569 unsigned Opc = Cmp.getOpcode(); 2570 DebugLoc DL = Cmp.getDebugLoc(); 2571 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 2572 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2573 2574 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 2575 Cmp = Cmp.getOperand(0); 2576 Opc = Cmp.getOpcode(); 2577 if (Opc == ARMISD::CMPFP) 2578 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 2579 else { 2580 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 2581 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 2582 } 2583 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 2584} 2585 2586SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2587 SDValue Cond = Op.getOperand(0); 2588 SDValue SelectTrue = Op.getOperand(1); 2589 SDValue SelectFalse = Op.getOperand(2); 2590 DebugLoc dl = Op.getDebugLoc(); 2591 2592 // Convert: 2593 // 2594 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 2595 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 2596 // 2597 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 2598 const ConstantSDNode *CMOVTrue = 2599 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 2600 const ConstantSDNode *CMOVFalse = 2601 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2602 2603 if (CMOVTrue && CMOVFalse) { 2604 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 2605 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 2606 2607 SDValue True; 2608 SDValue False; 2609 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 2610 True = SelectTrue; 2611 False = SelectFalse; 2612 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 2613 True = SelectFalse; 2614 False = SelectTrue; 2615 } 2616 2617 if (True.getNode() && False.getNode()) { 2618 EVT VT = Cond.getValueType(); 2619 SDValue ARMcc = Cond.getOperand(2); 2620 SDValue CCR = Cond.getOperand(3); 2621 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 2622 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 2623 } 2624 } 2625 } 2626 2627 return DAG.getSelectCC(dl, Cond, 2628 DAG.getConstant(0, Cond.getValueType()), 2629 SelectTrue, SelectFalse, ISD::SETNE); 2630} 2631 2632SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2633 EVT VT = Op.getValueType(); 2634 SDValue LHS = Op.getOperand(0); 2635 SDValue RHS = Op.getOperand(1); 2636 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2637 SDValue TrueVal = Op.getOperand(2); 2638 SDValue FalseVal = Op.getOperand(3); 2639 DebugLoc dl = Op.getDebugLoc(); 2640 2641 if (LHS.getValueType() == MVT::i32) { 2642 SDValue ARMcc; 2643 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2644 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2645 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 2646 } 2647 2648 ARMCC::CondCodes CondCode, CondCode2; 2649 FPCCToARMCC(CC, CondCode, CondCode2); 2650 2651 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2652 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2653 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2654 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 2655 ARMcc, CCR, Cmp); 2656 if (CondCode2 != ARMCC::AL) { 2657 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 2658 // FIXME: Needs another CMP because flag can have but one use. 2659 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2660 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2661 Result, TrueVal, ARMcc2, CCR, Cmp2); 2662 } 2663 return Result; 2664} 2665 2666/// canChangeToInt - Given the fp compare operand, return true if it is suitable 2667/// to morph to an integer compare sequence. 2668static bool canChangeToInt(SDValue Op, bool &SeenZero, 2669 const ARMSubtarget *Subtarget) { 2670 SDNode *N = Op.getNode(); 2671 if (!N->hasOneUse()) 2672 // Otherwise it requires moving the value from fp to integer registers. 2673 return false; 2674 if (!N->getNumValues()) 2675 return false; 2676 EVT VT = Op.getValueType(); 2677 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 2678 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 2679 // vmrs are very slow, e.g. cortex-a8. 2680 return false; 2681 2682 if (isFloatingPointZero(Op)) { 2683 SeenZero = true; 2684 return true; 2685 } 2686 return ISD::isNormalLoad(N); 2687} 2688 2689static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 2690 if (isFloatingPointZero(Op)) 2691 return DAG.getConstant(0, MVT::i32); 2692 2693 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 2694 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2695 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 2696 Ld->isVolatile(), Ld->isNonTemporal(), 2697 Ld->getAlignment()); 2698 2699 llvm_unreachable("Unknown VFP cmp argument!"); 2700} 2701 2702static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 2703 SDValue &RetVal1, SDValue &RetVal2) { 2704 if (isFloatingPointZero(Op)) { 2705 RetVal1 = DAG.getConstant(0, MVT::i32); 2706 RetVal2 = DAG.getConstant(0, MVT::i32); 2707 return; 2708 } 2709 2710 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 2711 SDValue Ptr = Ld->getBasePtr(); 2712 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2713 Ld->getChain(), Ptr, 2714 Ld->getPointerInfo(), 2715 Ld->isVolatile(), Ld->isNonTemporal(), 2716 Ld->getAlignment()); 2717 2718 EVT PtrType = Ptr.getValueType(); 2719 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 2720 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 2721 PtrType, Ptr, DAG.getConstant(4, PtrType)); 2722 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 2723 Ld->getChain(), NewPtr, 2724 Ld->getPointerInfo().getWithOffset(4), 2725 Ld->isVolatile(), Ld->isNonTemporal(), 2726 NewAlign); 2727 return; 2728 } 2729 2730 llvm_unreachable("Unknown VFP cmp argument!"); 2731} 2732 2733/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 2734/// f32 and even f64 comparisons to integer ones. 2735SDValue 2736ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 2737 SDValue Chain = Op.getOperand(0); 2738 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2739 SDValue LHS = Op.getOperand(2); 2740 SDValue RHS = Op.getOperand(3); 2741 SDValue Dest = Op.getOperand(4); 2742 DebugLoc dl = Op.getDebugLoc(); 2743 2744 bool SeenZero = false; 2745 if (canChangeToInt(LHS, SeenZero, Subtarget) && 2746 canChangeToInt(RHS, SeenZero, Subtarget) && 2747 // If one of the operand is zero, it's safe to ignore the NaN case since 2748 // we only care about equality comparisons. 2749 (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) { 2750 // If unsafe fp math optimization is enabled and there are no other uses of 2751 // the CMP operands, and the condition code is EQ or NE, we can optimize it 2752 // to an integer comparison. 2753 if (CC == ISD::SETOEQ) 2754 CC = ISD::SETEQ; 2755 else if (CC == ISD::SETUNE) 2756 CC = ISD::SETNE; 2757 2758 SDValue ARMcc; 2759 if (LHS.getValueType() == MVT::f32) { 2760 LHS = bitcastf32Toi32(LHS, DAG); 2761 RHS = bitcastf32Toi32(RHS, DAG); 2762 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2763 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2764 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2765 Chain, Dest, ARMcc, CCR, Cmp); 2766 } 2767 2768 SDValue LHS1, LHS2; 2769 SDValue RHS1, RHS2; 2770 expandf64Toi32(LHS, DAG, LHS1, LHS2); 2771 expandf64Toi32(RHS, DAG, RHS1, RHS2); 2772 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2773 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2774 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2775 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 2776 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 2777 } 2778 2779 return SDValue(); 2780} 2781 2782SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2783 SDValue Chain = Op.getOperand(0); 2784 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2785 SDValue LHS = Op.getOperand(2); 2786 SDValue RHS = Op.getOperand(3); 2787 SDValue Dest = Op.getOperand(4); 2788 DebugLoc dl = Op.getDebugLoc(); 2789 2790 if (LHS.getValueType() == MVT::i32) { 2791 SDValue ARMcc; 2792 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 2793 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2794 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2795 Chain, Dest, ARMcc, CCR, Cmp); 2796 } 2797 2798 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2799 2800 if (UnsafeFPMath && 2801 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 2802 CC == ISD::SETNE || CC == ISD::SETUNE)) { 2803 SDValue Result = OptimizeVFPBrcond(Op, DAG); 2804 if (Result.getNode()) 2805 return Result; 2806 } 2807 2808 ARMCC::CondCodes CondCode, CondCode2; 2809 FPCCToARMCC(CC, CondCode, CondCode2); 2810 2811 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 2812 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2813 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2814 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 2815 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 2816 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2817 if (CondCode2 != ARMCC::AL) { 2818 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 2819 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 2820 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2821 } 2822 return Res; 2823} 2824 2825SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2826 SDValue Chain = Op.getOperand(0); 2827 SDValue Table = Op.getOperand(1); 2828 SDValue Index = Op.getOperand(2); 2829 DebugLoc dl = Op.getDebugLoc(); 2830 2831 EVT PTy = getPointerTy(); 2832 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2833 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2834 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2835 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2836 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2837 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2838 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2839 if (Subtarget->isThumb2()) { 2840 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2841 // which does another jump to the destination. This also makes it easier 2842 // to translate it to TBB / TBH later. 2843 // FIXME: This might not work if the function is extremely large. 2844 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2845 Addr, Op.getOperand(2), JTI, UId); 2846 } 2847 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2848 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2849 MachinePointerInfo::getJumpTable(), 2850 false, false, 0); 2851 Chain = Addr.getValue(1); 2852 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2853 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2854 } else { 2855 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2856 MachinePointerInfo::getJumpTable(), false, false, 0); 2857 Chain = Addr.getValue(1); 2858 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2859 } 2860} 2861 2862static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2863 DebugLoc dl = Op.getDebugLoc(); 2864 unsigned Opc; 2865 2866 switch (Op.getOpcode()) { 2867 default: 2868 assert(0 && "Invalid opcode!"); 2869 case ISD::FP_TO_SINT: 2870 Opc = ARMISD::FTOSI; 2871 break; 2872 case ISD::FP_TO_UINT: 2873 Opc = ARMISD::FTOUI; 2874 break; 2875 } 2876 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2877 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 2878} 2879 2880static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2881 EVT VT = Op.getValueType(); 2882 DebugLoc dl = Op.getDebugLoc(); 2883 2884 EVT OperandVT = Op.getOperand(0).getValueType(); 2885 assert(OperandVT == MVT::v4i16 && "Invalid type for custom lowering!"); 2886 if (VT != MVT::v4f32) 2887 return DAG.UnrollVectorOp(Op.getNode()); 2888 2889 unsigned CastOpc; 2890 unsigned Opc; 2891 switch (Op.getOpcode()) { 2892 default: 2893 assert(0 && "Invalid opcode!"); 2894 case ISD::SINT_TO_FP: 2895 CastOpc = ISD::SIGN_EXTEND; 2896 Opc = ISD::SINT_TO_FP; 2897 break; 2898 case ISD::UINT_TO_FP: 2899 CastOpc = ISD::ZERO_EXTEND; 2900 Opc = ISD::UINT_TO_FP; 2901 break; 2902 } 2903 2904 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 2905 return DAG.getNode(Opc, dl, VT, Op); 2906} 2907 2908static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2909 EVT VT = Op.getValueType(); 2910 if (VT.isVector()) 2911 return LowerVectorINT_TO_FP(Op, DAG); 2912 2913 DebugLoc dl = Op.getDebugLoc(); 2914 unsigned Opc; 2915 2916 switch (Op.getOpcode()) { 2917 default: 2918 assert(0 && "Invalid opcode!"); 2919 case ISD::SINT_TO_FP: 2920 Opc = ARMISD::SITOF; 2921 break; 2922 case ISD::UINT_TO_FP: 2923 Opc = ARMISD::UITOF; 2924 break; 2925 } 2926 2927 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 2928 return DAG.getNode(Opc, dl, VT, Op); 2929} 2930 2931SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 2932 // Implement fcopysign with a fabs and a conditional fneg. 2933 SDValue Tmp0 = Op.getOperand(0); 2934 SDValue Tmp1 = Op.getOperand(1); 2935 DebugLoc dl = Op.getDebugLoc(); 2936 EVT VT = Op.getValueType(); 2937 EVT SrcVT = Tmp1.getValueType(); 2938 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 2939 Tmp0.getOpcode() == ARMISD::VMOVDRR; 2940 bool UseNEON = !InGPR && Subtarget->hasNEON(); 2941 2942 if (UseNEON) { 2943 // Use VBSL to copy the sign bit. 2944 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 2945 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 2946 DAG.getTargetConstant(EncodedVal, MVT::i32)); 2947 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 2948 if (VT == MVT::f64) 2949 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 2950 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 2951 DAG.getConstant(32, MVT::i32)); 2952 else /*if (VT == MVT::f32)*/ 2953 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 2954 if (SrcVT == MVT::f32) { 2955 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 2956 if (VT == MVT::f64) 2957 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 2958 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 2959 DAG.getConstant(32, MVT::i32)); 2960 } 2961 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 2962 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 2963 2964 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 2965 MVT::i32); 2966 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 2967 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 2968 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 2969 2970 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 2971 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 2972 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 2973 if (VT == MVT::f32) { 2974 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 2975 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 2976 DAG.getConstant(0, MVT::i32)); 2977 } else { 2978 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 2979 } 2980 2981 return Res; 2982 } 2983 2984 // Bitcast operand 1 to i32. 2985 if (SrcVT == MVT::f64) 2986 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 2987 &Tmp1, 1).getValue(1); 2988 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 2989 2990 // Or in the signbit with integer operations. 2991 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 2992 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 2993 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 2994 if (VT == MVT::f32) { 2995 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 2996 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 2997 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 2998 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 2999 } 3000 3001 // f64: Or the high part with signbit and then combine two parts. 3002 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3003 &Tmp0, 1); 3004 SDValue Lo = Tmp0.getValue(0); 3005 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3006 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3007 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3008} 3009 3010SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3011 MachineFunction &MF = DAG.getMachineFunction(); 3012 MachineFrameInfo *MFI = MF.getFrameInfo(); 3013 MFI->setReturnAddressIsTaken(true); 3014 3015 EVT VT = Op.getValueType(); 3016 DebugLoc dl = Op.getDebugLoc(); 3017 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3018 if (Depth) { 3019 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3020 SDValue Offset = DAG.getConstant(4, MVT::i32); 3021 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3022 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3023 MachinePointerInfo(), false, false, 0); 3024 } 3025 3026 // Return LR, which contains the return address. Mark it an implicit live-in. 3027 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3028 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3029} 3030 3031SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3032 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3033 MFI->setFrameAddressIsTaken(true); 3034 3035 EVT VT = Op.getValueType(); 3036 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3037 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3038 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3039 ? ARM::R7 : ARM::R11; 3040 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3041 while (Depth--) 3042 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3043 MachinePointerInfo(), 3044 false, false, 0); 3045 return FrameAddr; 3046} 3047 3048/// ExpandBITCAST - If the target supports VFP, this function is called to 3049/// expand a bit convert where either the source or destination type is i64 to 3050/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3051/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3052/// vectors), since the legalizer won't know what to do with that. 3053static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3054 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3055 DebugLoc dl = N->getDebugLoc(); 3056 SDValue Op = N->getOperand(0); 3057 3058 // This function is only supposed to be called for i64 types, either as the 3059 // source or destination of the bit convert. 3060 EVT SrcVT = Op.getValueType(); 3061 EVT DstVT = N->getValueType(0); 3062 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3063 "ExpandBITCAST called for non-i64 type"); 3064 3065 // Turn i64->f64 into VMOVDRR. 3066 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3067 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3068 DAG.getConstant(0, MVT::i32)); 3069 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3070 DAG.getConstant(1, MVT::i32)); 3071 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3072 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3073 } 3074 3075 // Turn f64->i64 into VMOVRRD. 3076 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3077 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3078 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3079 // Merge the pieces into a single i64 value. 3080 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3081 } 3082 3083 return SDValue(); 3084} 3085 3086/// getZeroVector - Returns a vector of specified type with all zero elements. 3087/// Zero vectors are used to represent vector negation and in those cases 3088/// will be implemented with the NEON VNEG instruction. However, VNEG does 3089/// not support i64 elements, so sometimes the zero vectors will need to be 3090/// explicitly constructed. Regardless, use a canonical VMOV to create the 3091/// zero vector. 3092static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3093 assert(VT.isVector() && "Expected a vector type"); 3094 // The canonical modified immediate encoding of a zero vector is....0! 3095 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3096 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3097 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3098 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3099} 3100 3101/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3102/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3103SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3104 SelectionDAG &DAG) const { 3105 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3106 EVT VT = Op.getValueType(); 3107 unsigned VTBits = VT.getSizeInBits(); 3108 DebugLoc dl = Op.getDebugLoc(); 3109 SDValue ShOpLo = Op.getOperand(0); 3110 SDValue ShOpHi = Op.getOperand(1); 3111 SDValue ShAmt = Op.getOperand(2); 3112 SDValue ARMcc; 3113 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3114 3115 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3116 3117 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3118 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3119 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3120 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3121 DAG.getConstant(VTBits, MVT::i32)); 3122 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3123 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3124 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3125 3126 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3127 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3128 ARMcc, DAG, dl); 3129 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3130 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3131 CCR, Cmp); 3132 3133 SDValue Ops[2] = { Lo, Hi }; 3134 return DAG.getMergeValues(Ops, 2, dl); 3135} 3136 3137/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3138/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3139SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3140 SelectionDAG &DAG) const { 3141 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3142 EVT VT = Op.getValueType(); 3143 unsigned VTBits = VT.getSizeInBits(); 3144 DebugLoc dl = Op.getDebugLoc(); 3145 SDValue ShOpLo = Op.getOperand(0); 3146 SDValue ShOpHi = Op.getOperand(1); 3147 SDValue ShAmt = Op.getOperand(2); 3148 SDValue ARMcc; 3149 3150 assert(Op.getOpcode() == ISD::SHL_PARTS); 3151 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3152 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3153 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3154 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3155 DAG.getConstant(VTBits, MVT::i32)); 3156 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3157 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3158 3159 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3160 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3161 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3162 ARMcc, DAG, dl); 3163 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3164 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3165 CCR, Cmp); 3166 3167 SDValue Ops[2] = { Lo, Hi }; 3168 return DAG.getMergeValues(Ops, 2, dl); 3169} 3170 3171SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3172 SelectionDAG &DAG) const { 3173 // The rounding mode is in bits 23:22 of the FPSCR. 3174 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3175 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3176 // so that the shift + and get folded into a bitfield extract. 3177 DebugLoc dl = Op.getDebugLoc(); 3178 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3179 DAG.getConstant(Intrinsic::arm_get_fpscr, 3180 MVT::i32)); 3181 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3182 DAG.getConstant(1U << 22, MVT::i32)); 3183 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3184 DAG.getConstant(22, MVT::i32)); 3185 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3186 DAG.getConstant(3, MVT::i32)); 3187} 3188 3189static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3190 const ARMSubtarget *ST) { 3191 EVT VT = N->getValueType(0); 3192 DebugLoc dl = N->getDebugLoc(); 3193 3194 if (!ST->hasV6T2Ops()) 3195 return SDValue(); 3196 3197 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3198 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3199} 3200 3201static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3202 const ARMSubtarget *ST) { 3203 EVT VT = N->getValueType(0); 3204 DebugLoc dl = N->getDebugLoc(); 3205 3206 if (!VT.isVector()) 3207 return SDValue(); 3208 3209 // Lower vector shifts on NEON to use VSHL. 3210 assert(ST->hasNEON() && "unexpected vector shift"); 3211 3212 // Left shifts translate directly to the vshiftu intrinsic. 3213 if (N->getOpcode() == ISD::SHL) 3214 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3215 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3216 N->getOperand(0), N->getOperand(1)); 3217 3218 assert((N->getOpcode() == ISD::SRA || 3219 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3220 3221 // NEON uses the same intrinsics for both left and right shifts. For 3222 // right shifts, the shift amounts are negative, so negate the vector of 3223 // shift amounts. 3224 EVT ShiftVT = N->getOperand(1).getValueType(); 3225 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3226 getZeroVector(ShiftVT, DAG, dl), 3227 N->getOperand(1)); 3228 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3229 Intrinsic::arm_neon_vshifts : 3230 Intrinsic::arm_neon_vshiftu); 3231 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3232 DAG.getConstant(vshiftInt, MVT::i32), 3233 N->getOperand(0), NegatedCount); 3234} 3235 3236static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3237 const ARMSubtarget *ST) { 3238 EVT VT = N->getValueType(0); 3239 DebugLoc dl = N->getDebugLoc(); 3240 3241 // We can get here for a node like i32 = ISD::SHL i32, i64 3242 if (VT != MVT::i64) 3243 return SDValue(); 3244 3245 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3246 "Unknown shift to lower!"); 3247 3248 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3249 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3250 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3251 return SDValue(); 3252 3253 // If we are in thumb mode, we don't have RRX. 3254 if (ST->isThumb1Only()) return SDValue(); 3255 3256 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3257 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3258 DAG.getConstant(0, MVT::i32)); 3259 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3260 DAG.getConstant(1, MVT::i32)); 3261 3262 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3263 // captures the result into a carry flag. 3264 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3265 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3266 3267 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3268 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3269 3270 // Merge the pieces into a single i64 value. 3271 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3272} 3273 3274static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3275 SDValue TmpOp0, TmpOp1; 3276 bool Invert = false; 3277 bool Swap = false; 3278 unsigned Opc = 0; 3279 3280 SDValue Op0 = Op.getOperand(0); 3281 SDValue Op1 = Op.getOperand(1); 3282 SDValue CC = Op.getOperand(2); 3283 EVT VT = Op.getValueType(); 3284 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3285 DebugLoc dl = Op.getDebugLoc(); 3286 3287 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3288 switch (SetCCOpcode) { 3289 default: llvm_unreachable("Illegal FP comparison"); break; 3290 case ISD::SETUNE: 3291 case ISD::SETNE: Invert = true; // Fallthrough 3292 case ISD::SETOEQ: 3293 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3294 case ISD::SETOLT: 3295 case ISD::SETLT: Swap = true; // Fallthrough 3296 case ISD::SETOGT: 3297 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3298 case ISD::SETOLE: 3299 case ISD::SETLE: Swap = true; // Fallthrough 3300 case ISD::SETOGE: 3301 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3302 case ISD::SETUGE: Swap = true; // Fallthrough 3303 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3304 case ISD::SETUGT: Swap = true; // Fallthrough 3305 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3306 case ISD::SETUEQ: Invert = true; // Fallthrough 3307 case ISD::SETONE: 3308 // Expand this to (OLT | OGT). 3309 TmpOp0 = Op0; 3310 TmpOp1 = Op1; 3311 Opc = ISD::OR; 3312 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3313 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3314 break; 3315 case ISD::SETUO: Invert = true; // Fallthrough 3316 case ISD::SETO: 3317 // Expand this to (OLT | OGE). 3318 TmpOp0 = Op0; 3319 TmpOp1 = Op1; 3320 Opc = ISD::OR; 3321 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3322 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3323 break; 3324 } 3325 } else { 3326 // Integer comparisons. 3327 switch (SetCCOpcode) { 3328 default: llvm_unreachable("Illegal integer comparison"); break; 3329 case ISD::SETNE: Invert = true; 3330 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3331 case ISD::SETLT: Swap = true; 3332 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3333 case ISD::SETLE: Swap = true; 3334 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3335 case ISD::SETULT: Swap = true; 3336 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3337 case ISD::SETULE: Swap = true; 3338 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3339 } 3340 3341 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3342 if (Opc == ARMISD::VCEQ) { 3343 3344 SDValue AndOp; 3345 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3346 AndOp = Op0; 3347 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3348 AndOp = Op1; 3349 3350 // Ignore bitconvert. 3351 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3352 AndOp = AndOp.getOperand(0); 3353 3354 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3355 Opc = ARMISD::VTST; 3356 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3357 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3358 Invert = !Invert; 3359 } 3360 } 3361 } 3362 3363 if (Swap) 3364 std::swap(Op0, Op1); 3365 3366 // If one of the operands is a constant vector zero, attempt to fold the 3367 // comparison to a specialized compare-against-zero form. 3368 SDValue SingleOp; 3369 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3370 SingleOp = Op0; 3371 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 3372 if (Opc == ARMISD::VCGE) 3373 Opc = ARMISD::VCLEZ; 3374 else if (Opc == ARMISD::VCGT) 3375 Opc = ARMISD::VCLTZ; 3376 SingleOp = Op1; 3377 } 3378 3379 SDValue Result; 3380 if (SingleOp.getNode()) { 3381 switch (Opc) { 3382 case ARMISD::VCEQ: 3383 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 3384 case ARMISD::VCGE: 3385 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 3386 case ARMISD::VCLEZ: 3387 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 3388 case ARMISD::VCGT: 3389 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 3390 case ARMISD::VCLTZ: 3391 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 3392 default: 3393 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3394 } 3395 } else { 3396 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 3397 } 3398 3399 if (Invert) 3400 Result = DAG.getNOT(dl, Result, VT); 3401 3402 return Result; 3403} 3404 3405/// isNEONModifiedImm - Check if the specified splat value corresponds to a 3406/// valid vector constant for a NEON instruction with a "modified immediate" 3407/// operand (e.g., VMOV). If so, return the encoded value. 3408static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 3409 unsigned SplatBitSize, SelectionDAG &DAG, 3410 EVT &VT, bool is128Bits, NEONModImmType type) { 3411 unsigned OpCmode, Imm; 3412 3413 // SplatBitSize is set to the smallest size that splats the vector, so a 3414 // zero vector will always have SplatBitSize == 8. However, NEON modified 3415 // immediate instructions others than VMOV do not support the 8-bit encoding 3416 // of a zero vector, and the default encoding of zero is supposed to be the 3417 // 32-bit version. 3418 if (SplatBits == 0) 3419 SplatBitSize = 32; 3420 3421 switch (SplatBitSize) { 3422 case 8: 3423 if (type != VMOVModImm) 3424 return SDValue(); 3425 // Any 1-byte value is OK. Op=0, Cmode=1110. 3426 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 3427 OpCmode = 0xe; 3428 Imm = SplatBits; 3429 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 3430 break; 3431 3432 case 16: 3433 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 3434 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 3435 if ((SplatBits & ~0xff) == 0) { 3436 // Value = 0x00nn: Op=x, Cmode=100x. 3437 OpCmode = 0x8; 3438 Imm = SplatBits; 3439 break; 3440 } 3441 if ((SplatBits & ~0xff00) == 0) { 3442 // Value = 0xnn00: Op=x, Cmode=101x. 3443 OpCmode = 0xa; 3444 Imm = SplatBits >> 8; 3445 break; 3446 } 3447 return SDValue(); 3448 3449 case 32: 3450 // NEON's 32-bit VMOV supports splat values where: 3451 // * only one byte is nonzero, or 3452 // * the least significant byte is 0xff and the second byte is nonzero, or 3453 // * the least significant 2 bytes are 0xff and the third is nonzero. 3454 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 3455 if ((SplatBits & ~0xff) == 0) { 3456 // Value = 0x000000nn: Op=x, Cmode=000x. 3457 OpCmode = 0; 3458 Imm = SplatBits; 3459 break; 3460 } 3461 if ((SplatBits & ~0xff00) == 0) { 3462 // Value = 0x0000nn00: Op=x, Cmode=001x. 3463 OpCmode = 0x2; 3464 Imm = SplatBits >> 8; 3465 break; 3466 } 3467 if ((SplatBits & ~0xff0000) == 0) { 3468 // Value = 0x00nn0000: Op=x, Cmode=010x. 3469 OpCmode = 0x4; 3470 Imm = SplatBits >> 16; 3471 break; 3472 } 3473 if ((SplatBits & ~0xff000000) == 0) { 3474 // Value = 0xnn000000: Op=x, Cmode=011x. 3475 OpCmode = 0x6; 3476 Imm = SplatBits >> 24; 3477 break; 3478 } 3479 3480 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 3481 if (type == OtherModImm) return SDValue(); 3482 3483 if ((SplatBits & ~0xffff) == 0 && 3484 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 3485 // Value = 0x0000nnff: Op=x, Cmode=1100. 3486 OpCmode = 0xc; 3487 Imm = SplatBits >> 8; 3488 SplatBits |= 0xff; 3489 break; 3490 } 3491 3492 if ((SplatBits & ~0xffffff) == 0 && 3493 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3494 // Value = 0x00nnffff: Op=x, Cmode=1101. 3495 OpCmode = 0xd; 3496 Imm = SplatBits >> 16; 3497 SplatBits |= 0xffff; 3498 break; 3499 } 3500 3501 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 3502 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 3503 // VMOV.I32. A (very) minor optimization would be to replicate the value 3504 // and fall through here to test for a valid 64-bit splat. But, then the 3505 // caller would also need to check and handle the change in size. 3506 return SDValue(); 3507 3508 case 64: { 3509 if (type != VMOVModImm) 3510 return SDValue(); 3511 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 3512 uint64_t BitMask = 0xff; 3513 uint64_t Val = 0; 3514 unsigned ImmMask = 1; 3515 Imm = 0; 3516 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3517 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3518 Val |= BitMask; 3519 Imm |= ImmMask; 3520 } else if ((SplatBits & BitMask) != 0) { 3521 return SDValue(); 3522 } 3523 BitMask <<= 8; 3524 ImmMask <<= 1; 3525 } 3526 // Op=1, Cmode=1110. 3527 OpCmode = 0x1e; 3528 SplatBits = Val; 3529 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3530 break; 3531 } 3532 3533 default: 3534 llvm_unreachable("unexpected size for isNEONModifiedImm"); 3535 return SDValue(); 3536 } 3537 3538 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 3539 return DAG.getTargetConstant(EncodedVal, MVT::i32); 3540} 3541 3542static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 3543 bool &ReverseVEXT, unsigned &Imm) { 3544 unsigned NumElts = VT.getVectorNumElements(); 3545 ReverseVEXT = false; 3546 3547 // Assume that the first shuffle index is not UNDEF. Fail if it is. 3548 if (M[0] < 0) 3549 return false; 3550 3551 Imm = M[0]; 3552 3553 // If this is a VEXT shuffle, the immediate value is the index of the first 3554 // element. The other shuffle indices must be the successive elements after 3555 // the first one. 3556 unsigned ExpectedElt = Imm; 3557 for (unsigned i = 1; i < NumElts; ++i) { 3558 // Increment the expected index. If it wraps around, it may still be 3559 // a VEXT but the source vectors must be swapped. 3560 ExpectedElt += 1; 3561 if (ExpectedElt == NumElts * 2) { 3562 ExpectedElt = 0; 3563 ReverseVEXT = true; 3564 } 3565 3566 if (M[i] < 0) continue; // ignore UNDEF indices 3567 if (ExpectedElt != static_cast<unsigned>(M[i])) 3568 return false; 3569 } 3570 3571 // Adjust the index value if the source operands will be swapped. 3572 if (ReverseVEXT) 3573 Imm -= NumElts; 3574 3575 return true; 3576} 3577 3578/// isVREVMask - Check if a vector shuffle corresponds to a VREV 3579/// instruction with the specified blocksize. (The order of the elements 3580/// within each block of the vector is reversed.) 3581static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 3582 unsigned BlockSize) { 3583 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 3584 "Only possible block sizes for VREV are: 16, 32, 64"); 3585 3586 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3587 if (EltSz == 64) 3588 return false; 3589 3590 unsigned NumElts = VT.getVectorNumElements(); 3591 unsigned BlockElts = M[0] + 1; 3592 // If the first shuffle index is UNDEF, be optimistic. 3593 if (M[0] < 0) 3594 BlockElts = BlockSize / EltSz; 3595 3596 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3597 return false; 3598 3599 for (unsigned i = 0; i < NumElts; ++i) { 3600 if (M[i] < 0) continue; // ignore UNDEF indices 3601 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 3602 return false; 3603 } 3604 3605 return true; 3606} 3607 3608static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) { 3609 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 3610 // range, then 0 is placed into the resulting vector. So pretty much any mask 3611 // of 8 elements can work here. 3612 return VT == MVT::v8i8 && M.size() == 8; 3613} 3614 3615static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 3616 unsigned &WhichResult) { 3617 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3618 if (EltSz == 64) 3619 return false; 3620 3621 unsigned NumElts = VT.getVectorNumElements(); 3622 WhichResult = (M[0] == 0 ? 0 : 1); 3623 for (unsigned i = 0; i < NumElts; i += 2) { 3624 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3625 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 3626 return false; 3627 } 3628 return true; 3629} 3630 3631/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 3632/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3633/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 3634static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3635 unsigned &WhichResult) { 3636 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3637 if (EltSz == 64) 3638 return false; 3639 3640 unsigned NumElts = VT.getVectorNumElements(); 3641 WhichResult = (M[0] == 0 ? 0 : 1); 3642 for (unsigned i = 0; i < NumElts; i += 2) { 3643 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 3644 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 3645 return false; 3646 } 3647 return true; 3648} 3649 3650static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 3651 unsigned &WhichResult) { 3652 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3653 if (EltSz == 64) 3654 return false; 3655 3656 unsigned NumElts = VT.getVectorNumElements(); 3657 WhichResult = (M[0] == 0 ? 0 : 1); 3658 for (unsigned i = 0; i != NumElts; ++i) { 3659 if (M[i] < 0) continue; // ignore UNDEF indices 3660 if ((unsigned) M[i] != 2 * i + WhichResult) 3661 return false; 3662 } 3663 3664 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3665 if (VT.is64BitVector() && EltSz == 32) 3666 return false; 3667 3668 return true; 3669} 3670 3671/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 3672/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3673/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 3674static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3675 unsigned &WhichResult) { 3676 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3677 if (EltSz == 64) 3678 return false; 3679 3680 unsigned Half = VT.getVectorNumElements() / 2; 3681 WhichResult = (M[0] == 0 ? 0 : 1); 3682 for (unsigned j = 0; j != 2; ++j) { 3683 unsigned Idx = WhichResult; 3684 for (unsigned i = 0; i != Half; ++i) { 3685 int MIdx = M[i + j * Half]; 3686 if (MIdx >= 0 && (unsigned) MIdx != Idx) 3687 return false; 3688 Idx += 2; 3689 } 3690 } 3691 3692 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3693 if (VT.is64BitVector() && EltSz == 32) 3694 return false; 3695 3696 return true; 3697} 3698 3699static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 3700 unsigned &WhichResult) { 3701 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3702 if (EltSz == 64) 3703 return false; 3704 3705 unsigned NumElts = VT.getVectorNumElements(); 3706 WhichResult = (M[0] == 0 ? 0 : 1); 3707 unsigned Idx = WhichResult * NumElts / 2; 3708 for (unsigned i = 0; i != NumElts; i += 2) { 3709 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3710 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 3711 return false; 3712 Idx += 1; 3713 } 3714 3715 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3716 if (VT.is64BitVector() && EltSz == 32) 3717 return false; 3718 3719 return true; 3720} 3721 3722/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 3723/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 3724/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 3725static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 3726 unsigned &WhichResult) { 3727 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3728 if (EltSz == 64) 3729 return false; 3730 3731 unsigned NumElts = VT.getVectorNumElements(); 3732 WhichResult = (M[0] == 0 ? 0 : 1); 3733 unsigned Idx = WhichResult * NumElts / 2; 3734 for (unsigned i = 0; i != NumElts; i += 2) { 3735 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 3736 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 3737 return false; 3738 Idx += 1; 3739 } 3740 3741 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 3742 if (VT.is64BitVector() && EltSz == 32) 3743 return false; 3744 3745 return true; 3746} 3747 3748// If N is an integer constant that can be moved into a register in one 3749// instruction, return an SDValue of such a constant (will become a MOV 3750// instruction). Otherwise return null. 3751static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 3752 const ARMSubtarget *ST, DebugLoc dl) { 3753 uint64_t Val; 3754 if (!isa<ConstantSDNode>(N)) 3755 return SDValue(); 3756 Val = cast<ConstantSDNode>(N)->getZExtValue(); 3757 3758 if (ST->isThumb1Only()) { 3759 if (Val <= 255 || ~Val <= 255) 3760 return DAG.getConstant(Val, MVT::i32); 3761 } else { 3762 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 3763 return DAG.getConstant(Val, MVT::i32); 3764 } 3765 return SDValue(); 3766} 3767 3768// If this is a case we can't handle, return null and let the default 3769// expansion code take care of it. 3770SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3771 const ARMSubtarget *ST) const { 3772 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3773 DebugLoc dl = Op.getDebugLoc(); 3774 EVT VT = Op.getValueType(); 3775 3776 APInt SplatBits, SplatUndef; 3777 unsigned SplatBitSize; 3778 bool HasAnyUndefs; 3779 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3780 if (SplatBitSize <= 64) { 3781 // Check if an immediate VMOV works. 3782 EVT VmovVT; 3783 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 3784 SplatUndef.getZExtValue(), SplatBitSize, 3785 DAG, VmovVT, VT.is128BitVector(), 3786 VMOVModImm); 3787 if (Val.getNode()) { 3788 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 3789 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3790 } 3791 3792 // Try an immediate VMVN. 3793 uint64_t NegatedImm = (SplatBits.getZExtValue() ^ 3794 ((1LL << SplatBitSize) - 1)); 3795 Val = isNEONModifiedImm(NegatedImm, 3796 SplatUndef.getZExtValue(), SplatBitSize, 3797 DAG, VmovVT, VT.is128BitVector(), 3798 VMVNModImm); 3799 if (Val.getNode()) { 3800 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 3801 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3802 } 3803 } 3804 } 3805 3806 // Scan through the operands to see if only one value is used. 3807 unsigned NumElts = VT.getVectorNumElements(); 3808 bool isOnlyLowElement = true; 3809 bool usesOnlyOneValue = true; 3810 bool isConstant = true; 3811 SDValue Value; 3812 for (unsigned i = 0; i < NumElts; ++i) { 3813 SDValue V = Op.getOperand(i); 3814 if (V.getOpcode() == ISD::UNDEF) 3815 continue; 3816 if (i > 0) 3817 isOnlyLowElement = false; 3818 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3819 isConstant = false; 3820 3821 if (!Value.getNode()) 3822 Value = V; 3823 else if (V != Value) 3824 usesOnlyOneValue = false; 3825 } 3826 3827 if (!Value.getNode()) 3828 return DAG.getUNDEF(VT); 3829 3830 if (isOnlyLowElement) 3831 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 3832 3833 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3834 3835 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 3836 // i32 and try again. 3837 if (usesOnlyOneValue && EltSize <= 32) { 3838 if (!isConstant) 3839 return DAG.getNode(ARMISD::VDUP, dl, VT, Value); 3840 if (VT.getVectorElementType().isFloatingPoint()) { 3841 SmallVector<SDValue, 8> Ops; 3842 for (unsigned i = 0; i < NumElts; ++i) 3843 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 3844 Op.getOperand(i))); 3845 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 3846 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 3847 Val = LowerBUILD_VECTOR(Val, DAG, ST); 3848 if (Val.getNode()) 3849 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3850 } 3851 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 3852 if (Val.getNode()) 3853 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 3854 } 3855 3856 // If all elements are constants and the case above didn't get hit, fall back 3857 // to the default expansion, which will generate a load from the constant 3858 // pool. 3859 if (isConstant) 3860 return SDValue(); 3861 3862 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 3863 if (NumElts >= 4) { 3864 SDValue shuffle = ReconstructShuffle(Op, DAG); 3865 if (shuffle != SDValue()) 3866 return shuffle; 3867 } 3868 3869 // Vectors with 32- or 64-bit elements can be built by directly assigning 3870 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 3871 // will be legalized. 3872 if (EltSize >= 32) { 3873 // Do the expansion with floating-point types, since that is what the VFP 3874 // registers are defined to use, and since i64 is not legal. 3875 EVT EltVT = EVT::getFloatingPointVT(EltSize); 3876 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 3877 SmallVector<SDValue, 8> Ops; 3878 for (unsigned i = 0; i < NumElts; ++i) 3879 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 3880 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 3881 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 3882 } 3883 3884 return SDValue(); 3885} 3886 3887// Gather data to see if the operation can be modelled as a 3888// shuffle in combination with VEXTs. 3889SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 3890 SelectionDAG &DAG) const { 3891 DebugLoc dl = Op.getDebugLoc(); 3892 EVT VT = Op.getValueType(); 3893 unsigned NumElts = VT.getVectorNumElements(); 3894 3895 SmallVector<SDValue, 2> SourceVecs; 3896 SmallVector<unsigned, 2> MinElts; 3897 SmallVector<unsigned, 2> MaxElts; 3898 3899 for (unsigned i = 0; i < NumElts; ++i) { 3900 SDValue V = Op.getOperand(i); 3901 if (V.getOpcode() == ISD::UNDEF) 3902 continue; 3903 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 3904 // A shuffle can only come from building a vector from various 3905 // elements of other vectors. 3906 return SDValue(); 3907 } 3908 3909 // Record this extraction against the appropriate vector if possible... 3910 SDValue SourceVec = V.getOperand(0); 3911 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 3912 bool FoundSource = false; 3913 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 3914 if (SourceVecs[j] == SourceVec) { 3915 if (MinElts[j] > EltNo) 3916 MinElts[j] = EltNo; 3917 if (MaxElts[j] < EltNo) 3918 MaxElts[j] = EltNo; 3919 FoundSource = true; 3920 break; 3921 } 3922 } 3923 3924 // Or record a new source if not... 3925 if (!FoundSource) { 3926 SourceVecs.push_back(SourceVec); 3927 MinElts.push_back(EltNo); 3928 MaxElts.push_back(EltNo); 3929 } 3930 } 3931 3932 // Currently only do something sane when at most two source vectors 3933 // involved. 3934 if (SourceVecs.size() > 2) 3935 return SDValue(); 3936 3937 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 3938 int VEXTOffsets[2] = {0, 0}; 3939 3940 // This loop extracts the usage patterns of the source vectors 3941 // and prepares appropriate SDValues for a shuffle if possible. 3942 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 3943 if (SourceVecs[i].getValueType() == VT) { 3944 // No VEXT necessary 3945 ShuffleSrcs[i] = SourceVecs[i]; 3946 VEXTOffsets[i] = 0; 3947 continue; 3948 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 3949 // It probably isn't worth padding out a smaller vector just to 3950 // break it down again in a shuffle. 3951 return SDValue(); 3952 } 3953 3954 // Since only 64-bit and 128-bit vectors are legal on ARM and 3955 // we've eliminated the other cases... 3956 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 3957 "unexpected vector sizes in ReconstructShuffle"); 3958 3959 if (MaxElts[i] - MinElts[i] >= NumElts) { 3960 // Span too large for a VEXT to cope 3961 return SDValue(); 3962 } 3963 3964 if (MinElts[i] >= NumElts) { 3965 // The extraction can just take the second half 3966 VEXTOffsets[i] = NumElts; 3967 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3968 SourceVecs[i], 3969 DAG.getIntPtrConstant(NumElts)); 3970 } else if (MaxElts[i] < NumElts) { 3971 // The extraction can just take the first half 3972 VEXTOffsets[i] = 0; 3973 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3974 SourceVecs[i], 3975 DAG.getIntPtrConstant(0)); 3976 } else { 3977 // An actual VEXT is needed 3978 VEXTOffsets[i] = MinElts[i]; 3979 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3980 SourceVecs[i], 3981 DAG.getIntPtrConstant(0)); 3982 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 3983 SourceVecs[i], 3984 DAG.getIntPtrConstant(NumElts)); 3985 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 3986 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 3987 } 3988 } 3989 3990 SmallVector<int, 8> Mask; 3991 3992 for (unsigned i = 0; i < NumElts; ++i) { 3993 SDValue Entry = Op.getOperand(i); 3994 if (Entry.getOpcode() == ISD::UNDEF) { 3995 Mask.push_back(-1); 3996 continue; 3997 } 3998 3999 SDValue ExtractVec = Entry.getOperand(0); 4000 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4001 .getOperand(1))->getSExtValue(); 4002 if (ExtractVec == SourceVecs[0]) { 4003 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4004 } else { 4005 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4006 } 4007 } 4008 4009 // Final check before we try to produce nonsense... 4010 if (isShuffleMaskLegal(Mask, VT)) 4011 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4012 &Mask[0]); 4013 4014 return SDValue(); 4015} 4016 4017/// isShuffleMaskLegal - Targets can use this to indicate that they only 4018/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4019/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4020/// are assumed to be legal. 4021bool 4022ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4023 EVT VT) const { 4024 if (VT.getVectorNumElements() == 4 && 4025 (VT.is128BitVector() || VT.is64BitVector())) { 4026 unsigned PFIndexes[4]; 4027 for (unsigned i = 0; i != 4; ++i) { 4028 if (M[i] < 0) 4029 PFIndexes[i] = 8; 4030 else 4031 PFIndexes[i] = M[i]; 4032 } 4033 4034 // Compute the index in the perfect shuffle table. 4035 unsigned PFTableIndex = 4036 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4037 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4038 unsigned Cost = (PFEntry >> 30); 4039 4040 if (Cost <= 4) 4041 return true; 4042 } 4043 4044 bool ReverseVEXT; 4045 unsigned Imm, WhichResult; 4046 4047 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4048 return (EltSize >= 32 || 4049 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4050 isVREVMask(M, VT, 64) || 4051 isVREVMask(M, VT, 32) || 4052 isVREVMask(M, VT, 16) || 4053 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4054 isVTBLMask(M, VT) || 4055 isVTRNMask(M, VT, WhichResult) || 4056 isVUZPMask(M, VT, WhichResult) || 4057 isVZIPMask(M, VT, WhichResult) || 4058 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4059 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4060 isVZIP_v_undef_Mask(M, VT, WhichResult)); 4061} 4062 4063/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4064/// the specified operations to build the shuffle. 4065static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4066 SDValue RHS, SelectionDAG &DAG, 4067 DebugLoc dl) { 4068 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4069 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4070 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4071 4072 enum { 4073 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4074 OP_VREV, 4075 OP_VDUP0, 4076 OP_VDUP1, 4077 OP_VDUP2, 4078 OP_VDUP3, 4079 OP_VEXT1, 4080 OP_VEXT2, 4081 OP_VEXT3, 4082 OP_VUZPL, // VUZP, left result 4083 OP_VUZPR, // VUZP, right result 4084 OP_VZIPL, // VZIP, left result 4085 OP_VZIPR, // VZIP, right result 4086 OP_VTRNL, // VTRN, left result 4087 OP_VTRNR // VTRN, right result 4088 }; 4089 4090 if (OpNum == OP_COPY) { 4091 if (LHSID == (1*9+2)*9+3) return LHS; 4092 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4093 return RHS; 4094 } 4095 4096 SDValue OpLHS, OpRHS; 4097 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4098 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4099 EVT VT = OpLHS.getValueType(); 4100 4101 switch (OpNum) { 4102 default: llvm_unreachable("Unknown shuffle opcode!"); 4103 case OP_VREV: 4104 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4105 case OP_VDUP0: 4106 case OP_VDUP1: 4107 case OP_VDUP2: 4108 case OP_VDUP3: 4109 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4110 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4111 case OP_VEXT1: 4112 case OP_VEXT2: 4113 case OP_VEXT3: 4114 return DAG.getNode(ARMISD::VEXT, dl, VT, 4115 OpLHS, OpRHS, 4116 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4117 case OP_VUZPL: 4118 case OP_VUZPR: 4119 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4120 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4121 case OP_VZIPL: 4122 case OP_VZIPR: 4123 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4124 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4125 case OP_VTRNL: 4126 case OP_VTRNR: 4127 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4128 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4129 } 4130} 4131 4132static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4133 SmallVectorImpl<int> &ShuffleMask, 4134 SelectionDAG &DAG) { 4135 // Check to see if we can use the VTBL instruction. 4136 SDValue V1 = Op.getOperand(0); 4137 SDValue V2 = Op.getOperand(1); 4138 DebugLoc DL = Op.getDebugLoc(); 4139 4140 SmallVector<SDValue, 8> VTBLMask; 4141 for (SmallVectorImpl<int>::iterator 4142 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4143 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4144 4145 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4146 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4147 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4148 &VTBLMask[0], 8)); 4149 4150 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4151 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4152 &VTBLMask[0], 8)); 4153} 4154 4155static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4156 SDValue V1 = Op.getOperand(0); 4157 SDValue V2 = Op.getOperand(1); 4158 DebugLoc dl = Op.getDebugLoc(); 4159 EVT VT = Op.getValueType(); 4160 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4161 SmallVector<int, 8> ShuffleMask; 4162 4163 // Convert shuffles that are directly supported on NEON to target-specific 4164 // DAG nodes, instead of keeping them as shuffles and matching them again 4165 // during code selection. This is more efficient and avoids the possibility 4166 // of inconsistencies between legalization and selection. 4167 // FIXME: floating-point vectors should be canonicalized to integer vectors 4168 // of the same time so that they get CSEd properly. 4169 SVN->getMask(ShuffleMask); 4170 4171 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4172 if (EltSize <= 32) { 4173 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4174 int Lane = SVN->getSplatIndex(); 4175 // If this is undef splat, generate it via "just" vdup, if possible. 4176 if (Lane == -1) Lane = 0; 4177 4178 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4179 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 4180 } 4181 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 4182 DAG.getConstant(Lane, MVT::i32)); 4183 } 4184 4185 bool ReverseVEXT; 4186 unsigned Imm; 4187 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 4188 if (ReverseVEXT) 4189 std::swap(V1, V2); 4190 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 4191 DAG.getConstant(Imm, MVT::i32)); 4192 } 4193 4194 if (isVREVMask(ShuffleMask, VT, 64)) 4195 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 4196 if (isVREVMask(ShuffleMask, VT, 32)) 4197 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 4198 if (isVREVMask(ShuffleMask, VT, 16)) 4199 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 4200 4201 // Check for Neon shuffles that modify both input vectors in place. 4202 // If both results are used, i.e., if there are two shuffles with the same 4203 // source operands and with masks corresponding to both results of one of 4204 // these operations, DAG memoization will ensure that a single node is 4205 // used for both shuffles. 4206 unsigned WhichResult; 4207 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 4208 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4209 V1, V2).getValue(WhichResult); 4210 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 4211 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4212 V1, V2).getValue(WhichResult); 4213 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 4214 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4215 V1, V2).getValue(WhichResult); 4216 4217 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4218 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4219 V1, V1).getValue(WhichResult); 4220 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4221 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4222 V1, V1).getValue(WhichResult); 4223 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 4224 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4225 V1, V1).getValue(WhichResult); 4226 } 4227 4228 // If the shuffle is not directly supported and it has 4 elements, use 4229 // the PerfectShuffle-generated table to synthesize it from other shuffles. 4230 unsigned NumElts = VT.getVectorNumElements(); 4231 if (NumElts == 4) { 4232 unsigned PFIndexes[4]; 4233 for (unsigned i = 0; i != 4; ++i) { 4234 if (ShuffleMask[i] < 0) 4235 PFIndexes[i] = 8; 4236 else 4237 PFIndexes[i] = ShuffleMask[i]; 4238 } 4239 4240 // Compute the index in the perfect shuffle table. 4241 unsigned PFTableIndex = 4242 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4243 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4244 unsigned Cost = (PFEntry >> 30); 4245 4246 if (Cost <= 4) 4247 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4248 } 4249 4250 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 4251 if (EltSize >= 32) { 4252 // Do the expansion with floating-point types, since that is what the VFP 4253 // registers are defined to use, and since i64 is not legal. 4254 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4255 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4256 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 4257 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 4258 SmallVector<SDValue, 8> Ops; 4259 for (unsigned i = 0; i < NumElts; ++i) { 4260 if (ShuffleMask[i] < 0) 4261 Ops.push_back(DAG.getUNDEF(EltVT)); 4262 else 4263 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 4264 ShuffleMask[i] < (int)NumElts ? V1 : V2, 4265 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 4266 MVT::i32))); 4267 } 4268 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4269 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4270 } 4271 4272 if (VT == MVT::v8i8) { 4273 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 4274 if (NewOp.getNode()) 4275 return NewOp; 4276 } 4277 4278 return SDValue(); 4279} 4280 4281static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 4282 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 4283 SDValue Lane = Op.getOperand(1); 4284 if (!isa<ConstantSDNode>(Lane)) 4285 return SDValue(); 4286 4287 SDValue Vec = Op.getOperand(0); 4288 if (Op.getValueType() == MVT::i32 && 4289 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 4290 DebugLoc dl = Op.getDebugLoc(); 4291 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 4292 } 4293 4294 return Op; 4295} 4296 4297static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 4298 // The only time a CONCAT_VECTORS operation can have legal types is when 4299 // two 64-bit vectors are concatenated to a 128-bit vector. 4300 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 4301 "unexpected CONCAT_VECTORS"); 4302 DebugLoc dl = Op.getDebugLoc(); 4303 SDValue Val = DAG.getUNDEF(MVT::v2f64); 4304 SDValue Op0 = Op.getOperand(0); 4305 SDValue Op1 = Op.getOperand(1); 4306 if (Op0.getOpcode() != ISD::UNDEF) 4307 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4308 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 4309 DAG.getIntPtrConstant(0)); 4310 if (Op1.getOpcode() != ISD::UNDEF) 4311 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 4312 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 4313 DAG.getIntPtrConstant(1)); 4314 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 4315} 4316 4317/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 4318/// element has been zero/sign-extended, depending on the isSigned parameter, 4319/// from an integer type half its size. 4320static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 4321 bool isSigned) { 4322 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 4323 EVT VT = N->getValueType(0); 4324 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 4325 SDNode *BVN = N->getOperand(0).getNode(); 4326 if (BVN->getValueType(0) != MVT::v4i32 || 4327 BVN->getOpcode() != ISD::BUILD_VECTOR) 4328 return false; 4329 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4330 unsigned HiElt = 1 - LoElt; 4331 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 4332 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 4333 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 4334 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 4335 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 4336 return false; 4337 if (isSigned) { 4338 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 4339 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 4340 return true; 4341 } else { 4342 if (Hi0->isNullValue() && Hi1->isNullValue()) 4343 return true; 4344 } 4345 return false; 4346 } 4347 4348 if (N->getOpcode() != ISD::BUILD_VECTOR) 4349 return false; 4350 4351 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 4352 SDNode *Elt = N->getOperand(i).getNode(); 4353 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 4354 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4355 unsigned HalfSize = EltSize / 2; 4356 if (isSigned) { 4357 int64_t SExtVal = C->getSExtValue(); 4358 if ((SExtVal >> HalfSize) != (SExtVal >> EltSize)) 4359 return false; 4360 } else { 4361 if ((C->getZExtValue() >> HalfSize) != 0) 4362 return false; 4363 } 4364 continue; 4365 } 4366 return false; 4367 } 4368 4369 return true; 4370} 4371 4372/// isSignExtended - Check if a node is a vector value that is sign-extended 4373/// or a constant BUILD_VECTOR with sign-extended elements. 4374static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 4375 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 4376 return true; 4377 if (isExtendedBUILD_VECTOR(N, DAG, true)) 4378 return true; 4379 return false; 4380} 4381 4382/// isZeroExtended - Check if a node is a vector value that is zero-extended 4383/// or a constant BUILD_VECTOR with zero-extended elements. 4384static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 4385 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 4386 return true; 4387 if (isExtendedBUILD_VECTOR(N, DAG, false)) 4388 return true; 4389 return false; 4390} 4391 4392/// SkipExtension - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending 4393/// load, or BUILD_VECTOR with extended elements, return the unextended value. 4394static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) { 4395 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 4396 return N->getOperand(0); 4397 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 4398 return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(), 4399 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 4400 LD->isNonTemporal(), LD->getAlignment()); 4401 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 4402 // have been legalized as a BITCAST from v4i32. 4403 if (N->getOpcode() == ISD::BITCAST) { 4404 SDNode *BVN = N->getOperand(0).getNode(); 4405 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 4406 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 4407 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 4408 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 4409 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 4410 } 4411 // Construct a new BUILD_VECTOR with elements truncated to half the size. 4412 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 4413 EVT VT = N->getValueType(0); 4414 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 4415 unsigned NumElts = VT.getVectorNumElements(); 4416 MVT TruncVT = MVT::getIntegerVT(EltSize); 4417 SmallVector<SDValue, 8> Ops; 4418 for (unsigned i = 0; i != NumElts; ++i) { 4419 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 4420 const APInt &CInt = C->getAPIntValue(); 4421 Ops.push_back(DAG.getConstant(CInt.trunc(EltSize), TruncVT)); 4422 } 4423 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 4424 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 4425} 4426 4427static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 4428 unsigned Opcode = N->getOpcode(); 4429 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4430 SDNode *N0 = N->getOperand(0).getNode(); 4431 SDNode *N1 = N->getOperand(1).getNode(); 4432 return N0->hasOneUse() && N1->hasOneUse() && 4433 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 4434 } 4435 return false; 4436} 4437 4438static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 4439 unsigned Opcode = N->getOpcode(); 4440 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 4441 SDNode *N0 = N->getOperand(0).getNode(); 4442 SDNode *N1 = N->getOperand(1).getNode(); 4443 return N0->hasOneUse() && N1->hasOneUse() && 4444 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 4445 } 4446 return false; 4447} 4448 4449static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 4450 // Multiplications are only custom-lowered for 128-bit vectors so that 4451 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 4452 EVT VT = Op.getValueType(); 4453 assert(VT.is128BitVector() && "unexpected type for custom-lowering ISD::MUL"); 4454 SDNode *N0 = Op.getOperand(0).getNode(); 4455 SDNode *N1 = Op.getOperand(1).getNode(); 4456 unsigned NewOpc = 0; 4457 bool isMLA = false; 4458 bool isN0SExt = isSignExtended(N0, DAG); 4459 bool isN1SExt = isSignExtended(N1, DAG); 4460 if (isN0SExt && isN1SExt) 4461 NewOpc = ARMISD::VMULLs; 4462 else { 4463 bool isN0ZExt = isZeroExtended(N0, DAG); 4464 bool isN1ZExt = isZeroExtended(N1, DAG); 4465 if (isN0ZExt && isN1ZExt) 4466 NewOpc = ARMISD::VMULLu; 4467 else if (isN1SExt || isN1ZExt) { 4468 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 4469 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 4470 if (isN1SExt && isAddSubSExt(N0, DAG)) { 4471 NewOpc = ARMISD::VMULLs; 4472 isMLA = true; 4473 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 4474 NewOpc = ARMISD::VMULLu; 4475 isMLA = true; 4476 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 4477 std::swap(N0, N1); 4478 NewOpc = ARMISD::VMULLu; 4479 isMLA = true; 4480 } 4481 } 4482 4483 if (!NewOpc) { 4484 if (VT == MVT::v2i64) 4485 // Fall through to expand this. It is not legal. 4486 return SDValue(); 4487 else 4488 // Other vector multiplications are legal. 4489 return Op; 4490 } 4491 } 4492 4493 // Legalize to a VMULL instruction. 4494 DebugLoc DL = Op.getDebugLoc(); 4495 SDValue Op0; 4496 SDValue Op1 = SkipExtension(N1, DAG); 4497 if (!isMLA) { 4498 Op0 = SkipExtension(N0, DAG); 4499 assert(Op0.getValueType().is64BitVector() && 4500 Op1.getValueType().is64BitVector() && 4501 "unexpected types for extended operands to VMULL"); 4502 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 4503 } 4504 4505 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 4506 // isel lowering to take advantage of no-stall back to back vmul + vmla. 4507 // vmull q0, d4, d6 4508 // vmlal q0, d5, d6 4509 // is faster than 4510 // vaddl q0, d4, d5 4511 // vmovl q1, d6 4512 // vmul q0, q0, q1 4513 SDValue N00 = SkipExtension(N0->getOperand(0).getNode(), DAG); 4514 SDValue N01 = SkipExtension(N0->getOperand(1).getNode(), DAG); 4515 EVT Op1VT = Op1.getValueType(); 4516 return DAG.getNode(N0->getOpcode(), DL, VT, 4517 DAG.getNode(NewOpc, DL, VT, 4518 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 4519 DAG.getNode(NewOpc, DL, VT, 4520 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 4521} 4522 4523static SDValue 4524LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 4525 // Convert to float 4526 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 4527 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 4528 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 4529 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 4530 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 4531 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 4532 // Get reciprocal estimate. 4533 // float4 recip = vrecpeq_f32(yf); 4534 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4535 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 4536 // Because char has a smaller range than uchar, we can actually get away 4537 // without any newton steps. This requires that we use a weird bias 4538 // of 0xb000, however (again, this has been exhaustively tested). 4539 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 4540 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 4541 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 4542 Y = DAG.getConstant(0xb000, MVT::i32); 4543 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 4544 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 4545 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 4546 // Convert back to short. 4547 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 4548 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 4549 return X; 4550} 4551 4552static SDValue 4553LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 4554 SDValue N2; 4555 // Convert to float. 4556 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 4557 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 4558 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 4559 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 4560 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4561 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4562 4563 // Use reciprocal estimate and one refinement step. 4564 // float4 recip = vrecpeq_f32(yf); 4565 // recip *= vrecpsq_f32(yf, recip); 4566 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4567 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4568 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4569 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4570 N1, N2); 4571 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4572 // Because short has a smaller range than ushort, we can actually get away 4573 // with only a single newton step. This requires that we use a weird bias 4574 // of 89, however (again, this has been exhaustively tested). 4575 // float4 result = as_float4(as_int4(xf*recip) + 89); 4576 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4577 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4578 N1 = DAG.getConstant(89, MVT::i32); 4579 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4580 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4581 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4582 // Convert back to integer and return. 4583 // return vmovn_s32(vcvt_s32_f32(result)); 4584 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4585 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4586 return N0; 4587} 4588 4589static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 4590 EVT VT = Op.getValueType(); 4591 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4592 "unexpected type for custom-lowering ISD::SDIV"); 4593 4594 DebugLoc dl = Op.getDebugLoc(); 4595 SDValue N0 = Op.getOperand(0); 4596 SDValue N1 = Op.getOperand(1); 4597 SDValue N2, N3; 4598 4599 if (VT == MVT::v8i8) { 4600 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 4601 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 4602 4603 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4604 DAG.getIntPtrConstant(4)); 4605 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4606 DAG.getIntPtrConstant(4)); 4607 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4608 DAG.getIntPtrConstant(0)); 4609 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4610 DAG.getIntPtrConstant(0)); 4611 4612 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 4613 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 4614 4615 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4616 N0 = LowerCONCAT_VECTORS(N0, DAG); 4617 4618 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 4619 return N0; 4620 } 4621 return LowerSDIV_v4i16(N0, N1, dl, DAG); 4622} 4623 4624static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 4625 EVT VT = Op.getValueType(); 4626 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 4627 "unexpected type for custom-lowering ISD::UDIV"); 4628 4629 DebugLoc dl = Op.getDebugLoc(); 4630 SDValue N0 = Op.getOperand(0); 4631 SDValue N1 = Op.getOperand(1); 4632 SDValue N2, N3; 4633 4634 if (VT == MVT::v8i8) { 4635 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 4636 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 4637 4638 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4639 DAG.getIntPtrConstant(4)); 4640 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4641 DAG.getIntPtrConstant(4)); 4642 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 4643 DAG.getIntPtrConstant(0)); 4644 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 4645 DAG.getIntPtrConstant(0)); 4646 4647 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 4648 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 4649 4650 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 4651 N0 = LowerCONCAT_VECTORS(N0, DAG); 4652 4653 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 4654 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 4655 N0); 4656 return N0; 4657 } 4658 4659 // v4i16 sdiv ... Convert to float. 4660 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 4661 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 4662 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 4663 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 4664 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 4665 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 4666 4667 // Use reciprocal estimate and two refinement steps. 4668 // float4 recip = vrecpeq_f32(yf); 4669 // recip *= vrecpsq_f32(yf, recip); 4670 // recip *= vrecpsq_f32(yf, recip); 4671 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4672 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 4673 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4674 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4675 N1, N2); 4676 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4677 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 4678 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 4679 N1, N2); 4680 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 4681 // Simply multiplying by the reciprocal estimate can leave us a few ulps 4682 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 4683 // and that it will never cause us to return an answer too large). 4684 // float4 result = as_float4(as_int4(xf*recip) + 89); 4685 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 4686 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 4687 N1 = DAG.getConstant(2, MVT::i32); 4688 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 4689 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 4690 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 4691 // Convert back to integer and return. 4692 // return vmovn_u32(vcvt_s32_f32(result)); 4693 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 4694 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 4695 return N0; 4696} 4697 4698SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4699 switch (Op.getOpcode()) { 4700 default: llvm_unreachable("Don't know how to custom lower this!"); 4701 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4702 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4703 case ISD::GlobalAddress: 4704 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 4705 LowerGlobalAddressELF(Op, DAG); 4706 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4707 case ISD::SELECT: return LowerSELECT(Op, DAG); 4708 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4709 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 4710 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 4711 case ISD::VASTART: return LowerVASTART(Op, DAG); 4712 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 4713 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 4714 case ISD::SINT_TO_FP: 4715 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 4716 case ISD::FP_TO_SINT: 4717 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 4718 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 4719 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4720 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4721 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 4722 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 4723 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 4724 case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); 4725 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 4726 Subtarget); 4727 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 4728 case ISD::SHL: 4729 case ISD::SRL: 4730 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 4731 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 4732 case ISD::SRL_PARTS: 4733 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 4734 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 4735 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 4736 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 4737 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4738 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 4739 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 4740 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4741 case ISD::MUL: return LowerMUL(Op, DAG); 4742 case ISD::SDIV: return LowerSDIV(Op, DAG); 4743 case ISD::UDIV: return LowerUDIV(Op, DAG); 4744 } 4745 return SDValue(); 4746} 4747 4748/// ReplaceNodeResults - Replace the results of node with an illegal result 4749/// type with new values built out of custom code. 4750void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 4751 SmallVectorImpl<SDValue>&Results, 4752 SelectionDAG &DAG) const { 4753 SDValue Res; 4754 switch (N->getOpcode()) { 4755 default: 4756 llvm_unreachable("Don't know how to custom expand this!"); 4757 break; 4758 case ISD::BITCAST: 4759 Res = ExpandBITCAST(N, DAG); 4760 break; 4761 case ISD::SRL: 4762 case ISD::SRA: 4763 Res = Expand64BitShift(N, DAG, Subtarget); 4764 break; 4765 } 4766 if (Res.getNode()) 4767 Results.push_back(Res); 4768} 4769 4770//===----------------------------------------------------------------------===// 4771// ARM Scheduler Hooks 4772//===----------------------------------------------------------------------===// 4773 4774MachineBasicBlock * 4775ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 4776 MachineBasicBlock *BB, 4777 unsigned Size) const { 4778 unsigned dest = MI->getOperand(0).getReg(); 4779 unsigned ptr = MI->getOperand(1).getReg(); 4780 unsigned oldval = MI->getOperand(2).getReg(); 4781 unsigned newval = MI->getOperand(3).getReg(); 4782 unsigned scratch = BB->getParent()->getRegInfo() 4783 .createVirtualRegister(ARM::GPRRegisterClass); 4784 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4785 DebugLoc dl = MI->getDebugLoc(); 4786 bool isThumb2 = Subtarget->isThumb2(); 4787 4788 unsigned ldrOpc, strOpc; 4789 switch (Size) { 4790 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4791 case 1: 4792 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4793 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4794 break; 4795 case 2: 4796 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4797 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4798 break; 4799 case 4: 4800 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4801 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4802 break; 4803 } 4804 4805 MachineFunction *MF = BB->getParent(); 4806 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4807 MachineFunction::iterator It = BB; 4808 ++It; // insert the new blocks after the current block 4809 4810 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4811 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 4812 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4813 MF->insert(It, loop1MBB); 4814 MF->insert(It, loop2MBB); 4815 MF->insert(It, exitMBB); 4816 4817 // Transfer the remainder of BB and its successor edges to exitMBB. 4818 exitMBB->splice(exitMBB->begin(), BB, 4819 llvm::next(MachineBasicBlock::iterator(MI)), 4820 BB->end()); 4821 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4822 4823 // thisMBB: 4824 // ... 4825 // fallthrough --> loop1MBB 4826 BB->addSuccessor(loop1MBB); 4827 4828 // loop1MBB: 4829 // ldrex dest, [ptr] 4830 // cmp dest, oldval 4831 // bne exitMBB 4832 BB = loop1MBB; 4833 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4834 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 4835 .addReg(dest).addReg(oldval)); 4836 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4837 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4838 BB->addSuccessor(loop2MBB); 4839 BB->addSuccessor(exitMBB); 4840 4841 // loop2MBB: 4842 // strex scratch, newval, [ptr] 4843 // cmp scratch, #0 4844 // bne loop1MBB 4845 BB = loop2MBB; 4846 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 4847 .addReg(ptr)); 4848 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4849 .addReg(scratch).addImm(0)); 4850 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4851 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4852 BB->addSuccessor(loop1MBB); 4853 BB->addSuccessor(exitMBB); 4854 4855 // exitMBB: 4856 // ... 4857 BB = exitMBB; 4858 4859 MI->eraseFromParent(); // The instruction is gone now. 4860 4861 return BB; 4862} 4863 4864MachineBasicBlock * 4865ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4866 unsigned Size, unsigned BinOpcode) const { 4867 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4868 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4869 4870 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4871 MachineFunction *MF = BB->getParent(); 4872 MachineFunction::iterator It = BB; 4873 ++It; 4874 4875 unsigned dest = MI->getOperand(0).getReg(); 4876 unsigned ptr = MI->getOperand(1).getReg(); 4877 unsigned incr = MI->getOperand(2).getReg(); 4878 DebugLoc dl = MI->getDebugLoc(); 4879 4880 bool isThumb2 = Subtarget->isThumb2(); 4881 unsigned ldrOpc, strOpc; 4882 switch (Size) { 4883 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 4884 case 1: 4885 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 4886 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 4887 break; 4888 case 2: 4889 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 4890 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 4891 break; 4892 case 4: 4893 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 4894 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 4895 break; 4896 } 4897 4898 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4899 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 4900 MF->insert(It, loopMBB); 4901 MF->insert(It, exitMBB); 4902 4903 // Transfer the remainder of BB and its successor edges to exitMBB. 4904 exitMBB->splice(exitMBB->begin(), BB, 4905 llvm::next(MachineBasicBlock::iterator(MI)), 4906 BB->end()); 4907 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 4908 4909 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4910 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4911 unsigned scratch2 = (!BinOpcode) ? incr : 4912 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 4913 4914 // thisMBB: 4915 // ... 4916 // fallthrough --> loopMBB 4917 BB->addSuccessor(loopMBB); 4918 4919 // loopMBB: 4920 // ldrex dest, ptr 4921 // <binop> scratch2, dest, incr 4922 // strex scratch, scratch2, ptr 4923 // cmp scratch, #0 4924 // bne- loopMBB 4925 // fallthrough --> exitMBB 4926 BB = loopMBB; 4927 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 4928 if (BinOpcode) { 4929 // operand order needs to go the other way for NAND 4930 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 4931 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4932 addReg(incr).addReg(dest)).addReg(0); 4933 else 4934 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 4935 addReg(dest).addReg(incr)).addReg(0); 4936 } 4937 4938 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 4939 .addReg(ptr)); 4940 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 4941 .addReg(scratch).addImm(0)); 4942 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 4943 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 4944 4945 BB->addSuccessor(loopMBB); 4946 BB->addSuccessor(exitMBB); 4947 4948 // exitMBB: 4949 // ... 4950 BB = exitMBB; 4951 4952 MI->eraseFromParent(); // The instruction is gone now. 4953 4954 return BB; 4955} 4956 4957static 4958MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 4959 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 4960 E = MBB->succ_end(); I != E; ++I) 4961 if (*I != Succ) 4962 return *I; 4963 llvm_unreachable("Expecting a BB with two successors!"); 4964} 4965 4966MachineBasicBlock * 4967ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4968 MachineBasicBlock *BB) const { 4969 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4970 DebugLoc dl = MI->getDebugLoc(); 4971 bool isThumb2 = Subtarget->isThumb2(); 4972 switch (MI->getOpcode()) { 4973 default: 4974 MI->dump(); 4975 llvm_unreachable("Unexpected instr type to insert"); 4976 4977 case ARM::ATOMIC_LOAD_ADD_I8: 4978 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4979 case ARM::ATOMIC_LOAD_ADD_I16: 4980 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4981 case ARM::ATOMIC_LOAD_ADD_I32: 4982 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 4983 4984 case ARM::ATOMIC_LOAD_AND_I8: 4985 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4986 case ARM::ATOMIC_LOAD_AND_I16: 4987 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4988 case ARM::ATOMIC_LOAD_AND_I32: 4989 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 4990 4991 case ARM::ATOMIC_LOAD_OR_I8: 4992 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4993 case ARM::ATOMIC_LOAD_OR_I16: 4994 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4995 case ARM::ATOMIC_LOAD_OR_I32: 4996 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 4997 4998 case ARM::ATOMIC_LOAD_XOR_I8: 4999 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5000 case ARM::ATOMIC_LOAD_XOR_I16: 5001 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5002 case ARM::ATOMIC_LOAD_XOR_I32: 5003 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 5004 5005 case ARM::ATOMIC_LOAD_NAND_I8: 5006 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5007 case ARM::ATOMIC_LOAD_NAND_I16: 5008 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5009 case ARM::ATOMIC_LOAD_NAND_I32: 5010 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 5011 5012 case ARM::ATOMIC_LOAD_SUB_I8: 5013 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5014 case ARM::ATOMIC_LOAD_SUB_I16: 5015 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5016 case ARM::ATOMIC_LOAD_SUB_I32: 5017 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 5018 5019 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 5020 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 5021 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 5022 5023 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 5024 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 5025 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 5026 5027 case ARM::tMOVCCr_pseudo: { 5028 // To "insert" a SELECT_CC instruction, we actually have to insert the 5029 // diamond control-flow pattern. The incoming instruction knows the 5030 // destination vreg to set, the condition code register to branch on, the 5031 // true/false values to select between, and a branch opcode to use. 5032 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5033 MachineFunction::iterator It = BB; 5034 ++It; 5035 5036 // thisMBB: 5037 // ... 5038 // TrueVal = ... 5039 // cmpTY ccX, r1, r2 5040 // bCC copy1MBB 5041 // fallthrough --> copy0MBB 5042 MachineBasicBlock *thisMBB = BB; 5043 MachineFunction *F = BB->getParent(); 5044 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 5045 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 5046 F->insert(It, copy0MBB); 5047 F->insert(It, sinkMBB); 5048 5049 // Transfer the remainder of BB and its successor edges to sinkMBB. 5050 sinkMBB->splice(sinkMBB->begin(), BB, 5051 llvm::next(MachineBasicBlock::iterator(MI)), 5052 BB->end()); 5053 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 5054 5055 BB->addSuccessor(copy0MBB); 5056 BB->addSuccessor(sinkMBB); 5057 5058 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 5059 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 5060 5061 // copy0MBB: 5062 // %FalseValue = ... 5063 // # fallthrough to sinkMBB 5064 BB = copy0MBB; 5065 5066 // Update machine-CFG edges 5067 BB->addSuccessor(sinkMBB); 5068 5069 // sinkMBB: 5070 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 5071 // ... 5072 BB = sinkMBB; 5073 BuildMI(*BB, BB->begin(), dl, 5074 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 5075 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 5076 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 5077 5078 MI->eraseFromParent(); // The pseudo instruction is gone now. 5079 return BB; 5080 } 5081 5082 case ARM::BCCi64: 5083 case ARM::BCCZi64: { 5084 // If there is an unconditional branch to the other successor, remove it. 5085 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 5086 5087 // Compare both parts that make up the double comparison separately for 5088 // equality. 5089 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 5090 5091 unsigned LHS1 = MI->getOperand(1).getReg(); 5092 unsigned LHS2 = MI->getOperand(2).getReg(); 5093 if (RHSisZero) { 5094 AddDefaultPred(BuildMI(BB, dl, 5095 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5096 .addReg(LHS1).addImm(0)); 5097 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5098 .addReg(LHS2).addImm(0) 5099 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5100 } else { 5101 unsigned RHS1 = MI->getOperand(3).getReg(); 5102 unsigned RHS2 = MI->getOperand(4).getReg(); 5103 AddDefaultPred(BuildMI(BB, dl, 5104 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5105 .addReg(LHS1).addReg(RHS1)); 5106 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5107 .addReg(LHS2).addReg(RHS2) 5108 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 5109 } 5110 5111 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 5112 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 5113 if (MI->getOperand(0).getImm() == ARMCC::NE) 5114 std::swap(destMBB, exitMBB); 5115 5116 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5117 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 5118 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B)) 5119 .addMBB(exitMBB); 5120 5121 MI->eraseFromParent(); // The pseudo instruction is gone now. 5122 return BB; 5123 } 5124 } 5125} 5126 5127//===----------------------------------------------------------------------===// 5128// ARM Optimization Hooks 5129//===----------------------------------------------------------------------===// 5130 5131static 5132SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 5133 TargetLowering::DAGCombinerInfo &DCI) { 5134 SelectionDAG &DAG = DCI.DAG; 5135 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5136 EVT VT = N->getValueType(0); 5137 unsigned Opc = N->getOpcode(); 5138 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 5139 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 5140 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 5141 ISD::CondCode CC = ISD::SETCC_INVALID; 5142 5143 if (isSlctCC) { 5144 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 5145 } else { 5146 SDValue CCOp = Slct.getOperand(0); 5147 if (CCOp.getOpcode() == ISD::SETCC) 5148 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 5149 } 5150 5151 bool DoXform = false; 5152 bool InvCC = false; 5153 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 5154 "Bad input!"); 5155 5156 if (LHS.getOpcode() == ISD::Constant && 5157 cast<ConstantSDNode>(LHS)->isNullValue()) { 5158 DoXform = true; 5159 } else if (CC != ISD::SETCC_INVALID && 5160 RHS.getOpcode() == ISD::Constant && 5161 cast<ConstantSDNode>(RHS)->isNullValue()) { 5162 std::swap(LHS, RHS); 5163 SDValue Op0 = Slct.getOperand(0); 5164 EVT OpVT = isSlctCC ? Op0.getValueType() : 5165 Op0.getOperand(0).getValueType(); 5166 bool isInt = OpVT.isInteger(); 5167 CC = ISD::getSetCCInverse(CC, isInt); 5168 5169 if (!TLI.isCondCodeLegal(CC, OpVT)) 5170 return SDValue(); // Inverse operator isn't legal. 5171 5172 DoXform = true; 5173 InvCC = true; 5174 } 5175 5176 if (DoXform) { 5177 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 5178 if (isSlctCC) 5179 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 5180 Slct.getOperand(0), Slct.getOperand(1), CC); 5181 SDValue CCOp = Slct.getOperand(0); 5182 if (InvCC) 5183 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 5184 CCOp.getOperand(0), CCOp.getOperand(1), CC); 5185 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 5186 CCOp, OtherOp, Result); 5187 } 5188 return SDValue(); 5189} 5190 5191/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 5192/// operands N0 and N1. This is a helper for PerformADDCombine that is 5193/// called with the default operands, and if that fails, with commuted 5194/// operands. 5195static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 5196 TargetLowering::DAGCombinerInfo &DCI) { 5197 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 5198 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 5199 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 5200 if (Result.getNode()) return Result; 5201 } 5202 return SDValue(); 5203} 5204 5205/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 5206/// 5207static SDValue PerformADDCombine(SDNode *N, 5208 TargetLowering::DAGCombinerInfo &DCI) { 5209 SDValue N0 = N->getOperand(0); 5210 SDValue N1 = N->getOperand(1); 5211 5212 // First try with the default operand order. 5213 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI); 5214 if (Result.getNode()) 5215 return Result; 5216 5217 // If that didn't work, try again with the operands commuted. 5218 return PerformADDCombineWithOperands(N, N1, N0, DCI); 5219} 5220 5221/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 5222/// 5223static SDValue PerformSUBCombine(SDNode *N, 5224 TargetLowering::DAGCombinerInfo &DCI) { 5225 SDValue N0 = N->getOperand(0); 5226 SDValue N1 = N->getOperand(1); 5227 5228 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 5229 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 5230 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 5231 if (Result.getNode()) return Result; 5232 } 5233 5234 return SDValue(); 5235} 5236 5237/// PerformVMULCombine 5238/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 5239/// special multiplier accumulator forwarding. 5240/// vmul d3, d0, d2 5241/// vmla d3, d1, d2 5242/// is faster than 5243/// vadd d3, d0, d1 5244/// vmul d3, d3, d2 5245static SDValue PerformVMULCombine(SDNode *N, 5246 TargetLowering::DAGCombinerInfo &DCI, 5247 const ARMSubtarget *Subtarget) { 5248 if (!Subtarget->hasVMLxForwarding()) 5249 return SDValue(); 5250 5251 SelectionDAG &DAG = DCI.DAG; 5252 SDValue N0 = N->getOperand(0); 5253 SDValue N1 = N->getOperand(1); 5254 unsigned Opcode = N0.getOpcode(); 5255 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 5256 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 5257 Opcode = N0.getOpcode(); 5258 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 5259 Opcode != ISD::FADD && Opcode != ISD::FSUB) 5260 return SDValue(); 5261 std::swap(N0, N1); 5262 } 5263 5264 EVT VT = N->getValueType(0); 5265 DebugLoc DL = N->getDebugLoc(); 5266 SDValue N00 = N0->getOperand(0); 5267 SDValue N01 = N0->getOperand(1); 5268 return DAG.getNode(Opcode, DL, VT, 5269 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 5270 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 5271} 5272 5273static SDValue PerformMULCombine(SDNode *N, 5274 TargetLowering::DAGCombinerInfo &DCI, 5275 const ARMSubtarget *Subtarget) { 5276 SelectionDAG &DAG = DCI.DAG; 5277 5278 if (Subtarget->isThumb1Only()) 5279 return SDValue(); 5280 5281 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 5282 return SDValue(); 5283 5284 EVT VT = N->getValueType(0); 5285 if (VT.is64BitVector() || VT.is128BitVector()) 5286 return PerformVMULCombine(N, DCI, Subtarget); 5287 if (VT != MVT::i32) 5288 return SDValue(); 5289 5290 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5291 if (!C) 5292 return SDValue(); 5293 5294 uint64_t MulAmt = C->getZExtValue(); 5295 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 5296 ShiftAmt = ShiftAmt & (32 - 1); 5297 SDValue V = N->getOperand(0); 5298 DebugLoc DL = N->getDebugLoc(); 5299 5300 SDValue Res; 5301 MulAmt >>= ShiftAmt; 5302 if (isPowerOf2_32(MulAmt - 1)) { 5303 // (mul x, 2^N + 1) => (add (shl x, N), x) 5304 Res = DAG.getNode(ISD::ADD, DL, VT, 5305 V, DAG.getNode(ISD::SHL, DL, VT, 5306 V, DAG.getConstant(Log2_32(MulAmt-1), 5307 MVT::i32))); 5308 } else if (isPowerOf2_32(MulAmt + 1)) { 5309 // (mul x, 2^N - 1) => (sub (shl x, N), x) 5310 Res = DAG.getNode(ISD::SUB, DL, VT, 5311 DAG.getNode(ISD::SHL, DL, VT, 5312 V, DAG.getConstant(Log2_32(MulAmt+1), 5313 MVT::i32)), 5314 V); 5315 } else 5316 return SDValue(); 5317 5318 if (ShiftAmt != 0) 5319 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 5320 DAG.getConstant(ShiftAmt, MVT::i32)); 5321 5322 // Do not add new nodes to DAG combiner worklist. 5323 DCI.CombineTo(N, Res, false); 5324 return SDValue(); 5325} 5326 5327static SDValue PerformANDCombine(SDNode *N, 5328 TargetLowering::DAGCombinerInfo &DCI) { 5329 5330 // Attempt to use immediate-form VBIC 5331 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 5332 DebugLoc dl = N->getDebugLoc(); 5333 EVT VT = N->getValueType(0); 5334 SelectionDAG &DAG = DCI.DAG; 5335 5336 APInt SplatBits, SplatUndef; 5337 unsigned SplatBitSize; 5338 bool HasAnyUndefs; 5339 if (BVN && 5340 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5341 if (SplatBitSize <= 64) { 5342 EVT VbicVT; 5343 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 5344 SplatUndef.getZExtValue(), SplatBitSize, 5345 DAG, VbicVT, VT.is128BitVector(), 5346 OtherModImm); 5347 if (Val.getNode()) { 5348 SDValue Input = 5349 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 5350 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 5351 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 5352 } 5353 } 5354 } 5355 5356 return SDValue(); 5357} 5358 5359/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 5360static SDValue PerformORCombine(SDNode *N, 5361 TargetLowering::DAGCombinerInfo &DCI, 5362 const ARMSubtarget *Subtarget) { 5363 // Attempt to use immediate-form VORR 5364 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 5365 DebugLoc dl = N->getDebugLoc(); 5366 EVT VT = N->getValueType(0); 5367 SelectionDAG &DAG = DCI.DAG; 5368 5369 APInt SplatBits, SplatUndef; 5370 unsigned SplatBitSize; 5371 bool HasAnyUndefs; 5372 if (BVN && Subtarget->hasNEON() && 5373 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5374 if (SplatBitSize <= 64) { 5375 EVT VorrVT; 5376 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 5377 SplatUndef.getZExtValue(), SplatBitSize, 5378 DAG, VorrVT, VT.is128BitVector(), 5379 OtherModImm); 5380 if (Val.getNode()) { 5381 SDValue Input = 5382 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 5383 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 5384 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 5385 } 5386 } 5387 } 5388 5389 SDValue N0 = N->getOperand(0); 5390 if (N0.getOpcode() != ISD::AND) 5391 return SDValue(); 5392 SDValue N1 = N->getOperand(1); 5393 5394 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 5395 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 5396 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 5397 APInt SplatUndef; 5398 unsigned SplatBitSize; 5399 bool HasAnyUndefs; 5400 5401 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 5402 APInt SplatBits0; 5403 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 5404 HasAnyUndefs) && !HasAnyUndefs) { 5405 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 5406 APInt SplatBits1; 5407 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 5408 HasAnyUndefs) && !HasAnyUndefs && 5409 SplatBits0 == ~SplatBits1) { 5410 // Canonicalize the vector type to make instruction selection simpler. 5411 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 5412 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 5413 N0->getOperand(1), N0->getOperand(0), 5414 N1->getOperand(1)); 5415 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 5416 } 5417 } 5418 } 5419 5420 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 5421 // reasonable. 5422 5423 // BFI is only available on V6T2+ 5424 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 5425 return SDValue(); 5426 5427 DebugLoc DL = N->getDebugLoc(); 5428 // 1) or (and A, mask), val => ARMbfi A, val, mask 5429 // iff (val & mask) == val 5430 // 5431 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 5432 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 5433 // && mask == ~mask2 5434 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 5435 // && ~mask == mask2 5436 // (i.e., copy a bitfield value into another bitfield of the same width) 5437 5438 if (VT != MVT::i32) 5439 return SDValue(); 5440 5441 SDValue N00 = N0.getOperand(0); 5442 5443 // The value and the mask need to be constants so we can verify this is 5444 // actually a bitfield set. If the mask is 0xffff, we can do better 5445 // via a movt instruction, so don't use BFI in that case. 5446 SDValue MaskOp = N0.getOperand(1); 5447 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 5448 if (!MaskC) 5449 return SDValue(); 5450 unsigned Mask = MaskC->getZExtValue(); 5451 if (Mask == 0xffff) 5452 return SDValue(); 5453 SDValue Res; 5454 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 5455 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 5456 if (N1C) { 5457 unsigned Val = N1C->getZExtValue(); 5458 if ((Val & ~Mask) != Val) 5459 return SDValue(); 5460 5461 if (ARM::isBitFieldInvertedMask(Mask)) { 5462 Val >>= CountTrailingZeros_32(~Mask); 5463 5464 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 5465 DAG.getConstant(Val, MVT::i32), 5466 DAG.getConstant(Mask, MVT::i32)); 5467 5468 // Do not add new nodes to DAG combiner worklist. 5469 DCI.CombineTo(N, Res, false); 5470 return SDValue(); 5471 } 5472 } else if (N1.getOpcode() == ISD::AND) { 5473 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 5474 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 5475 if (!N11C) 5476 return SDValue(); 5477 unsigned Mask2 = N11C->getZExtValue(); 5478 5479 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 5480 // as is to match. 5481 if (ARM::isBitFieldInvertedMask(Mask) && 5482 (Mask == ~Mask2)) { 5483 // The pack halfword instruction works better for masks that fit it, 5484 // so use that when it's available. 5485 if (Subtarget->hasT2ExtractPack() && 5486 (Mask == 0xffff || Mask == 0xffff0000)) 5487 return SDValue(); 5488 // 2a 5489 unsigned amt = CountTrailingZeros_32(Mask2); 5490 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 5491 DAG.getConstant(amt, MVT::i32)); 5492 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 5493 DAG.getConstant(Mask, MVT::i32)); 5494 // Do not add new nodes to DAG combiner worklist. 5495 DCI.CombineTo(N, Res, false); 5496 return SDValue(); 5497 } else if (ARM::isBitFieldInvertedMask(~Mask) && 5498 (~Mask == Mask2)) { 5499 // The pack halfword instruction works better for masks that fit it, 5500 // so use that when it's available. 5501 if (Subtarget->hasT2ExtractPack() && 5502 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 5503 return SDValue(); 5504 // 2b 5505 unsigned lsb = CountTrailingZeros_32(Mask); 5506 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 5507 DAG.getConstant(lsb, MVT::i32)); 5508 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 5509 DAG.getConstant(Mask2, MVT::i32)); 5510 // Do not add new nodes to DAG combiner worklist. 5511 DCI.CombineTo(N, Res, false); 5512 return SDValue(); 5513 } 5514 } 5515 5516 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 5517 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 5518 ARM::isBitFieldInvertedMask(~Mask)) { 5519 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 5520 // where lsb(mask) == #shamt and masked bits of B are known zero. 5521 SDValue ShAmt = N00.getOperand(1); 5522 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 5523 unsigned LSB = CountTrailingZeros_32(Mask); 5524 if (ShAmtC != LSB) 5525 return SDValue(); 5526 5527 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 5528 DAG.getConstant(~Mask, MVT::i32)); 5529 5530 // Do not add new nodes to DAG combiner worklist. 5531 DCI.CombineTo(N, Res, false); 5532 } 5533 5534 return SDValue(); 5535} 5536 5537/// PerformBFICombine - (bfi A, (and B, C1), C2) -> (bfi A, B, C2) iff 5538/// C1 & C2 == C1. 5539static SDValue PerformBFICombine(SDNode *N, 5540 TargetLowering::DAGCombinerInfo &DCI) { 5541 SDValue N1 = N->getOperand(1); 5542 if (N1.getOpcode() == ISD::AND) { 5543 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 5544 if (!N11C) 5545 return SDValue(); 5546 unsigned Mask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 5547 unsigned Mask2 = N11C->getZExtValue(); 5548 if ((Mask & Mask2) == Mask2) 5549 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 5550 N->getOperand(0), N1.getOperand(0), 5551 N->getOperand(2)); 5552 } 5553 return SDValue(); 5554} 5555 5556/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 5557/// ARMISD::VMOVRRD. 5558static SDValue PerformVMOVRRDCombine(SDNode *N, 5559 TargetLowering::DAGCombinerInfo &DCI) { 5560 // vmovrrd(vmovdrr x, y) -> x,y 5561 SDValue InDouble = N->getOperand(0); 5562 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 5563 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 5564 5565 // vmovrrd(load f64) -> (load i32), (load i32) 5566 SDNode *InNode = InDouble.getNode(); 5567 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 5568 InNode->getValueType(0) == MVT::f64 && 5569 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 5570 !cast<LoadSDNode>(InNode)->isVolatile()) { 5571 // TODO: Should this be done for non-FrameIndex operands? 5572 LoadSDNode *LD = cast<LoadSDNode>(InNode); 5573 5574 SelectionDAG &DAG = DCI.DAG; 5575 DebugLoc DL = LD->getDebugLoc(); 5576 SDValue BasePtr = LD->getBasePtr(); 5577 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 5578 LD->getPointerInfo(), LD->isVolatile(), 5579 LD->isNonTemporal(), LD->getAlignment()); 5580 5581 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 5582 DAG.getConstant(4, MVT::i32)); 5583 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 5584 LD->getPointerInfo(), LD->isVolatile(), 5585 LD->isNonTemporal(), 5586 std::min(4U, LD->getAlignment() / 2)); 5587 5588 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 5589 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 5590 DCI.RemoveFromWorklist(LD); 5591 DAG.DeleteNode(LD); 5592 return Result; 5593 } 5594 5595 return SDValue(); 5596} 5597 5598/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 5599/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 5600static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 5601 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 5602 SDValue Op0 = N->getOperand(0); 5603 SDValue Op1 = N->getOperand(1); 5604 if (Op0.getOpcode() == ISD::BITCAST) 5605 Op0 = Op0.getOperand(0); 5606 if (Op1.getOpcode() == ISD::BITCAST) 5607 Op1 = Op1.getOperand(0); 5608 if (Op0.getOpcode() == ARMISD::VMOVRRD && 5609 Op0.getNode() == Op1.getNode() && 5610 Op0.getResNo() == 0 && Op1.getResNo() == 1) 5611 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 5612 N->getValueType(0), Op0.getOperand(0)); 5613 return SDValue(); 5614} 5615 5616/// PerformSTORECombine - Target-specific dag combine xforms for 5617/// ISD::STORE. 5618static SDValue PerformSTORECombine(SDNode *N, 5619 TargetLowering::DAGCombinerInfo &DCI) { 5620 // Bitcast an i64 store extracted from a vector to f64. 5621 // Otherwise, the i64 value will be legalized to a pair of i32 values. 5622 StoreSDNode *St = cast<StoreSDNode>(N); 5623 SDValue StVal = St->getValue(); 5624 if (!ISD::isNormalStore(St) || St->isVolatile() || 5625 StVal.getValueType() != MVT::i64 || 5626 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5627 return SDValue(); 5628 5629 SelectionDAG &DAG = DCI.DAG; 5630 DebugLoc dl = StVal.getDebugLoc(); 5631 SDValue IntVec = StVal.getOperand(0); 5632 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 5633 IntVec.getValueType().getVectorNumElements()); 5634 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 5635 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 5636 Vec, StVal.getOperand(1)); 5637 dl = N->getDebugLoc(); 5638 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 5639 // Make the DAGCombiner fold the bitcasts. 5640 DCI.AddToWorklist(Vec.getNode()); 5641 DCI.AddToWorklist(ExtElt.getNode()); 5642 DCI.AddToWorklist(V.getNode()); 5643 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 5644 St->getPointerInfo(), St->isVolatile(), 5645 St->isNonTemporal(), St->getAlignment(), 5646 St->getTBAAInfo()); 5647} 5648 5649/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 5650/// are normal, non-volatile loads. If so, it is profitable to bitcast an 5651/// i64 vector to have f64 elements, since the value can then be loaded 5652/// directly into a VFP register. 5653static bool hasNormalLoadOperand(SDNode *N) { 5654 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 5655 for (unsigned i = 0; i < NumElts; ++i) { 5656 SDNode *Elt = N->getOperand(i).getNode(); 5657 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 5658 return true; 5659 } 5660 return false; 5661} 5662 5663/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 5664/// ISD::BUILD_VECTOR. 5665static SDValue PerformBUILD_VECTORCombine(SDNode *N, 5666 TargetLowering::DAGCombinerInfo &DCI){ 5667 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 5668 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 5669 // into a pair of GPRs, which is fine when the value is used as a scalar, 5670 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 5671 SelectionDAG &DAG = DCI.DAG; 5672 if (N->getNumOperands() == 2) { 5673 SDValue RV = PerformVMOVDRRCombine(N, DAG); 5674 if (RV.getNode()) 5675 return RV; 5676 } 5677 5678 // Load i64 elements as f64 values so that type legalization does not split 5679 // them up into i32 values. 5680 EVT VT = N->getValueType(0); 5681 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 5682 return SDValue(); 5683 DebugLoc dl = N->getDebugLoc(); 5684 SmallVector<SDValue, 8> Ops; 5685 unsigned NumElts = VT.getVectorNumElements(); 5686 for (unsigned i = 0; i < NumElts; ++i) { 5687 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 5688 Ops.push_back(V); 5689 // Make the DAGCombiner fold the bitcast. 5690 DCI.AddToWorklist(V.getNode()); 5691 } 5692 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 5693 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 5694 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 5695} 5696 5697/// PerformInsertEltCombine - Target-specific dag combine xforms for 5698/// ISD::INSERT_VECTOR_ELT. 5699static SDValue PerformInsertEltCombine(SDNode *N, 5700 TargetLowering::DAGCombinerInfo &DCI) { 5701 // Bitcast an i64 load inserted into a vector to f64. 5702 // Otherwise, the i64 value will be legalized to a pair of i32 values. 5703 EVT VT = N->getValueType(0); 5704 SDNode *Elt = N->getOperand(1).getNode(); 5705 if (VT.getVectorElementType() != MVT::i64 || 5706 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 5707 return SDValue(); 5708 5709 SelectionDAG &DAG = DCI.DAG; 5710 DebugLoc dl = N->getDebugLoc(); 5711 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 5712 VT.getVectorNumElements()); 5713 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 5714 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 5715 // Make the DAGCombiner fold the bitcasts. 5716 DCI.AddToWorklist(Vec.getNode()); 5717 DCI.AddToWorklist(V.getNode()); 5718 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 5719 Vec, V, N->getOperand(2)); 5720 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 5721} 5722 5723/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 5724/// ISD::VECTOR_SHUFFLE. 5725static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 5726 // The LLVM shufflevector instruction does not require the shuffle mask 5727 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 5728 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 5729 // operands do not match the mask length, they are extended by concatenating 5730 // them with undef vectors. That is probably the right thing for other 5731 // targets, but for NEON it is better to concatenate two double-register 5732 // size vector operands into a single quad-register size vector. Do that 5733 // transformation here: 5734 // shuffle(concat(v1, undef), concat(v2, undef)) -> 5735 // shuffle(concat(v1, v2), undef) 5736 SDValue Op0 = N->getOperand(0); 5737 SDValue Op1 = N->getOperand(1); 5738 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 5739 Op1.getOpcode() != ISD::CONCAT_VECTORS || 5740 Op0.getNumOperands() != 2 || 5741 Op1.getNumOperands() != 2) 5742 return SDValue(); 5743 SDValue Concat0Op1 = Op0.getOperand(1); 5744 SDValue Concat1Op1 = Op1.getOperand(1); 5745 if (Concat0Op1.getOpcode() != ISD::UNDEF || 5746 Concat1Op1.getOpcode() != ISD::UNDEF) 5747 return SDValue(); 5748 // Skip the transformation if any of the types are illegal. 5749 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5750 EVT VT = N->getValueType(0); 5751 if (!TLI.isTypeLegal(VT) || 5752 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 5753 !TLI.isTypeLegal(Concat1Op1.getValueType())) 5754 return SDValue(); 5755 5756 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 5757 Op0.getOperand(0), Op1.getOperand(0)); 5758 // Translate the shuffle mask. 5759 SmallVector<int, 16> NewMask; 5760 unsigned NumElts = VT.getVectorNumElements(); 5761 unsigned HalfElts = NumElts/2; 5762 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 5763 for (unsigned n = 0; n < NumElts; ++n) { 5764 int MaskElt = SVN->getMaskElt(n); 5765 int NewElt = -1; 5766 if (MaskElt < (int)HalfElts) 5767 NewElt = MaskElt; 5768 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 5769 NewElt = HalfElts + MaskElt - NumElts; 5770 NewMask.push_back(NewElt); 5771 } 5772 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 5773 DAG.getUNDEF(VT), NewMask.data()); 5774} 5775 5776/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 5777/// NEON load/store intrinsics to merge base address updates. 5778static SDValue CombineBaseUpdate(SDNode *N, 5779 TargetLowering::DAGCombinerInfo &DCI) { 5780 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 5781 return SDValue(); 5782 5783 SelectionDAG &DAG = DCI.DAG; 5784 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 5785 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 5786 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 5787 SDValue Addr = N->getOperand(AddrOpIdx); 5788 5789 // Search for a use of the address operand that is an increment. 5790 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 5791 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 5792 SDNode *User = *UI; 5793 if (User->getOpcode() != ISD::ADD || 5794 UI.getUse().getResNo() != Addr.getResNo()) 5795 continue; 5796 5797 // Check that the add is independent of the load/store. Otherwise, folding 5798 // it would create a cycle. 5799 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 5800 continue; 5801 5802 // Find the new opcode for the updating load/store. 5803 bool isLoad = true; 5804 bool isLaneOp = false; 5805 unsigned NewOpc = 0; 5806 unsigned NumVecs = 0; 5807 if (isIntrinsic) { 5808 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 5809 switch (IntNo) { 5810 default: assert(0 && "unexpected intrinsic for Neon base update"); 5811 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 5812 NumVecs = 1; break; 5813 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 5814 NumVecs = 2; break; 5815 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 5816 NumVecs = 3; break; 5817 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 5818 NumVecs = 4; break; 5819 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 5820 NumVecs = 2; isLaneOp = true; break; 5821 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 5822 NumVecs = 3; isLaneOp = true; break; 5823 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 5824 NumVecs = 4; isLaneOp = true; break; 5825 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 5826 NumVecs = 1; isLoad = false; break; 5827 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 5828 NumVecs = 2; isLoad = false; break; 5829 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 5830 NumVecs = 3; isLoad = false; break; 5831 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 5832 NumVecs = 4; isLoad = false; break; 5833 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 5834 NumVecs = 2; isLoad = false; isLaneOp = true; break; 5835 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 5836 NumVecs = 3; isLoad = false; isLaneOp = true; break; 5837 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 5838 NumVecs = 4; isLoad = false; isLaneOp = true; break; 5839 } 5840 } else { 5841 isLaneOp = true; 5842 switch (N->getOpcode()) { 5843 default: assert(0 && "unexpected opcode for Neon base update"); 5844 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 5845 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 5846 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 5847 } 5848 } 5849 5850 // Find the size of memory referenced by the load/store. 5851 EVT VecTy; 5852 if (isLoad) 5853 VecTy = N->getValueType(0); 5854 else 5855 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 5856 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 5857 if (isLaneOp) 5858 NumBytes /= VecTy.getVectorNumElements(); 5859 5860 // If the increment is a constant, it must match the memory ref size. 5861 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 5862 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 5863 uint64_t IncVal = CInc->getZExtValue(); 5864 if (IncVal != NumBytes) 5865 continue; 5866 } else if (NumBytes >= 3 * 16) { 5867 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 5868 // separate instructions that make it harder to use a non-constant update. 5869 continue; 5870 } 5871 5872 // Create the new updating load/store node. 5873 EVT Tys[6]; 5874 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 5875 unsigned n; 5876 for (n = 0; n < NumResultVecs; ++n) 5877 Tys[n] = VecTy; 5878 Tys[n++] = MVT::i32; 5879 Tys[n] = MVT::Other; 5880 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 5881 SmallVector<SDValue, 8> Ops; 5882 Ops.push_back(N->getOperand(0)); // incoming chain 5883 Ops.push_back(N->getOperand(AddrOpIdx)); 5884 Ops.push_back(Inc); 5885 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 5886 Ops.push_back(N->getOperand(i)); 5887 } 5888 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 5889 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 5890 Ops.data(), Ops.size(), 5891 MemInt->getMemoryVT(), 5892 MemInt->getMemOperand()); 5893 5894 // Update the uses. 5895 std::vector<SDValue> NewResults; 5896 for (unsigned i = 0; i < NumResultVecs; ++i) { 5897 NewResults.push_back(SDValue(UpdN.getNode(), i)); 5898 } 5899 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 5900 DCI.CombineTo(N, NewResults); 5901 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 5902 5903 break; 5904 } 5905 return SDValue(); 5906} 5907 5908/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 5909/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 5910/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 5911/// return true. 5912static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 5913 SelectionDAG &DAG = DCI.DAG; 5914 EVT VT = N->getValueType(0); 5915 // vldN-dup instructions only support 64-bit vectors for N > 1. 5916 if (!VT.is64BitVector()) 5917 return false; 5918 5919 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 5920 SDNode *VLD = N->getOperand(0).getNode(); 5921 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 5922 return false; 5923 unsigned NumVecs = 0; 5924 unsigned NewOpc = 0; 5925 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 5926 if (IntNo == Intrinsic::arm_neon_vld2lane) { 5927 NumVecs = 2; 5928 NewOpc = ARMISD::VLD2DUP; 5929 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 5930 NumVecs = 3; 5931 NewOpc = ARMISD::VLD3DUP; 5932 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 5933 NumVecs = 4; 5934 NewOpc = ARMISD::VLD4DUP; 5935 } else { 5936 return false; 5937 } 5938 5939 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 5940 // numbers match the load. 5941 unsigned VLDLaneNo = 5942 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 5943 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 5944 UI != UE; ++UI) { 5945 // Ignore uses of the chain result. 5946 if (UI.getUse().getResNo() == NumVecs) 5947 continue; 5948 SDNode *User = *UI; 5949 if (User->getOpcode() != ARMISD::VDUPLANE || 5950 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 5951 return false; 5952 } 5953 5954 // Create the vldN-dup node. 5955 EVT Tys[5]; 5956 unsigned n; 5957 for (n = 0; n < NumVecs; ++n) 5958 Tys[n] = VT; 5959 Tys[n] = MVT::Other; 5960 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 5961 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 5962 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 5963 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 5964 Ops, 2, VLDMemInt->getMemoryVT(), 5965 VLDMemInt->getMemOperand()); 5966 5967 // Update the uses. 5968 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 5969 UI != UE; ++UI) { 5970 unsigned ResNo = UI.getUse().getResNo(); 5971 // Ignore uses of the chain result. 5972 if (ResNo == NumVecs) 5973 continue; 5974 SDNode *User = *UI; 5975 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 5976 } 5977 5978 // Now the vldN-lane intrinsic is dead except for its chain result. 5979 // Update uses of the chain. 5980 std::vector<SDValue> VLDDupResults; 5981 for (unsigned n = 0; n < NumVecs; ++n) 5982 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 5983 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 5984 DCI.CombineTo(VLD, VLDDupResults); 5985 5986 return true; 5987} 5988 5989/// PerformVDUPLANECombine - Target-specific dag combine xforms for 5990/// ARMISD::VDUPLANE. 5991static SDValue PerformVDUPLANECombine(SDNode *N, 5992 TargetLowering::DAGCombinerInfo &DCI) { 5993 SDValue Op = N->getOperand(0); 5994 5995 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 5996 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 5997 if (CombineVLDDUP(N, DCI)) 5998 return SDValue(N, 0); 5999 6000 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 6001 // redundant. Ignore bit_converts for now; element sizes are checked below. 6002 while (Op.getOpcode() == ISD::BITCAST) 6003 Op = Op.getOperand(0); 6004 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 6005 return SDValue(); 6006 6007 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 6008 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 6009 // The canonical VMOV for a zero vector uses a 32-bit element size. 6010 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6011 unsigned EltBits; 6012 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 6013 EltSize = 8; 6014 EVT VT = N->getValueType(0); 6015 if (EltSize > VT.getVectorElementType().getSizeInBits()) 6016 return SDValue(); 6017 6018 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 6019} 6020 6021/// getVShiftImm - Check if this is a valid build_vector for the immediate 6022/// operand of a vector shift operation, where all the elements of the 6023/// build_vector must have the same constant integer value. 6024static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 6025 // Ignore bit_converts. 6026 while (Op.getOpcode() == ISD::BITCAST) 6027 Op = Op.getOperand(0); 6028 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6029 APInt SplatBits, SplatUndef; 6030 unsigned SplatBitSize; 6031 bool HasAnyUndefs; 6032 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 6033 HasAnyUndefs, ElementBits) || 6034 SplatBitSize > ElementBits) 6035 return false; 6036 Cnt = SplatBits.getSExtValue(); 6037 return true; 6038} 6039 6040/// isVShiftLImm - Check if this is a valid build_vector for the immediate 6041/// operand of a vector shift left operation. That value must be in the range: 6042/// 0 <= Value < ElementBits for a left shift; or 6043/// 0 <= Value <= ElementBits for a long left shift. 6044static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 6045 assert(VT.isVector() && "vector shift count is not a vector type"); 6046 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6047 if (! getVShiftImm(Op, ElementBits, Cnt)) 6048 return false; 6049 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 6050} 6051 6052/// isVShiftRImm - Check if this is a valid build_vector for the immediate 6053/// operand of a vector shift right operation. For a shift opcode, the value 6054/// is positive, but for an intrinsic the value count must be negative. The 6055/// absolute value must be in the range: 6056/// 1 <= |Value| <= ElementBits for a right shift; or 6057/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 6058static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 6059 int64_t &Cnt) { 6060 assert(VT.isVector() && "vector shift count is not a vector type"); 6061 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 6062 if (! getVShiftImm(Op, ElementBits, Cnt)) 6063 return false; 6064 if (isIntrinsic) 6065 Cnt = -Cnt; 6066 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 6067} 6068 6069/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 6070static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 6071 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 6072 switch (IntNo) { 6073 default: 6074 // Don't do anything for most intrinsics. 6075 break; 6076 6077 // Vector shifts: check for immediate versions and lower them. 6078 // Note: This is done during DAG combining instead of DAG legalizing because 6079 // the build_vectors for 64-bit vector element shift counts are generally 6080 // not legal, and it is hard to see their values after they get legalized to 6081 // loads from a constant pool. 6082 case Intrinsic::arm_neon_vshifts: 6083 case Intrinsic::arm_neon_vshiftu: 6084 case Intrinsic::arm_neon_vshiftls: 6085 case Intrinsic::arm_neon_vshiftlu: 6086 case Intrinsic::arm_neon_vshiftn: 6087 case Intrinsic::arm_neon_vrshifts: 6088 case Intrinsic::arm_neon_vrshiftu: 6089 case Intrinsic::arm_neon_vrshiftn: 6090 case Intrinsic::arm_neon_vqshifts: 6091 case Intrinsic::arm_neon_vqshiftu: 6092 case Intrinsic::arm_neon_vqshiftsu: 6093 case Intrinsic::arm_neon_vqshiftns: 6094 case Intrinsic::arm_neon_vqshiftnu: 6095 case Intrinsic::arm_neon_vqshiftnsu: 6096 case Intrinsic::arm_neon_vqrshiftns: 6097 case Intrinsic::arm_neon_vqrshiftnu: 6098 case Intrinsic::arm_neon_vqrshiftnsu: { 6099 EVT VT = N->getOperand(1).getValueType(); 6100 int64_t Cnt; 6101 unsigned VShiftOpc = 0; 6102 6103 switch (IntNo) { 6104 case Intrinsic::arm_neon_vshifts: 6105 case Intrinsic::arm_neon_vshiftu: 6106 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 6107 VShiftOpc = ARMISD::VSHL; 6108 break; 6109 } 6110 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 6111 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 6112 ARMISD::VSHRs : ARMISD::VSHRu); 6113 break; 6114 } 6115 return SDValue(); 6116 6117 case Intrinsic::arm_neon_vshiftls: 6118 case Intrinsic::arm_neon_vshiftlu: 6119 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 6120 break; 6121 llvm_unreachable("invalid shift count for vshll intrinsic"); 6122 6123 case Intrinsic::arm_neon_vrshifts: 6124 case Intrinsic::arm_neon_vrshiftu: 6125 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 6126 break; 6127 return SDValue(); 6128 6129 case Intrinsic::arm_neon_vqshifts: 6130 case Intrinsic::arm_neon_vqshiftu: 6131 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 6132 break; 6133 return SDValue(); 6134 6135 case Intrinsic::arm_neon_vqshiftsu: 6136 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 6137 break; 6138 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 6139 6140 case Intrinsic::arm_neon_vshiftn: 6141 case Intrinsic::arm_neon_vrshiftn: 6142 case Intrinsic::arm_neon_vqshiftns: 6143 case Intrinsic::arm_neon_vqshiftnu: 6144 case Intrinsic::arm_neon_vqshiftnsu: 6145 case Intrinsic::arm_neon_vqrshiftns: 6146 case Intrinsic::arm_neon_vqrshiftnu: 6147 case Intrinsic::arm_neon_vqrshiftnsu: 6148 // Narrowing shifts require an immediate right shift. 6149 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 6150 break; 6151 llvm_unreachable("invalid shift count for narrowing vector shift " 6152 "intrinsic"); 6153 6154 default: 6155 llvm_unreachable("unhandled vector shift"); 6156 } 6157 6158 switch (IntNo) { 6159 case Intrinsic::arm_neon_vshifts: 6160 case Intrinsic::arm_neon_vshiftu: 6161 // Opcode already set above. 6162 break; 6163 case Intrinsic::arm_neon_vshiftls: 6164 case Intrinsic::arm_neon_vshiftlu: 6165 if (Cnt == VT.getVectorElementType().getSizeInBits()) 6166 VShiftOpc = ARMISD::VSHLLi; 6167 else 6168 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 6169 ARMISD::VSHLLs : ARMISD::VSHLLu); 6170 break; 6171 case Intrinsic::arm_neon_vshiftn: 6172 VShiftOpc = ARMISD::VSHRN; break; 6173 case Intrinsic::arm_neon_vrshifts: 6174 VShiftOpc = ARMISD::VRSHRs; break; 6175 case Intrinsic::arm_neon_vrshiftu: 6176 VShiftOpc = ARMISD::VRSHRu; break; 6177 case Intrinsic::arm_neon_vrshiftn: 6178 VShiftOpc = ARMISD::VRSHRN; break; 6179 case Intrinsic::arm_neon_vqshifts: 6180 VShiftOpc = ARMISD::VQSHLs; break; 6181 case Intrinsic::arm_neon_vqshiftu: 6182 VShiftOpc = ARMISD::VQSHLu; break; 6183 case Intrinsic::arm_neon_vqshiftsu: 6184 VShiftOpc = ARMISD::VQSHLsu; break; 6185 case Intrinsic::arm_neon_vqshiftns: 6186 VShiftOpc = ARMISD::VQSHRNs; break; 6187 case Intrinsic::arm_neon_vqshiftnu: 6188 VShiftOpc = ARMISD::VQSHRNu; break; 6189 case Intrinsic::arm_neon_vqshiftnsu: 6190 VShiftOpc = ARMISD::VQSHRNsu; break; 6191 case Intrinsic::arm_neon_vqrshiftns: 6192 VShiftOpc = ARMISD::VQRSHRNs; break; 6193 case Intrinsic::arm_neon_vqrshiftnu: 6194 VShiftOpc = ARMISD::VQRSHRNu; break; 6195 case Intrinsic::arm_neon_vqrshiftnsu: 6196 VShiftOpc = ARMISD::VQRSHRNsu; break; 6197 } 6198 6199 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 6200 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 6201 } 6202 6203 case Intrinsic::arm_neon_vshiftins: { 6204 EVT VT = N->getOperand(1).getValueType(); 6205 int64_t Cnt; 6206 unsigned VShiftOpc = 0; 6207 6208 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 6209 VShiftOpc = ARMISD::VSLI; 6210 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 6211 VShiftOpc = ARMISD::VSRI; 6212 else { 6213 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 6214 } 6215 6216 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 6217 N->getOperand(1), N->getOperand(2), 6218 DAG.getConstant(Cnt, MVT::i32)); 6219 } 6220 6221 case Intrinsic::arm_neon_vqrshifts: 6222 case Intrinsic::arm_neon_vqrshiftu: 6223 // No immediate versions of these to check for. 6224 break; 6225 } 6226 6227 return SDValue(); 6228} 6229 6230/// PerformShiftCombine - Checks for immediate versions of vector shifts and 6231/// lowers them. As with the vector shift intrinsics, this is done during DAG 6232/// combining instead of DAG legalizing because the build_vectors for 64-bit 6233/// vector element shift counts are generally not legal, and it is hard to see 6234/// their values after they get legalized to loads from a constant pool. 6235static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 6236 const ARMSubtarget *ST) { 6237 EVT VT = N->getValueType(0); 6238 6239 // Nothing to be done for scalar shifts. 6240 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6241 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 6242 return SDValue(); 6243 6244 assert(ST->hasNEON() && "unexpected vector shift"); 6245 int64_t Cnt; 6246 6247 switch (N->getOpcode()) { 6248 default: llvm_unreachable("unexpected shift opcode"); 6249 6250 case ISD::SHL: 6251 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 6252 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 6253 DAG.getConstant(Cnt, MVT::i32)); 6254 break; 6255 6256 case ISD::SRA: 6257 case ISD::SRL: 6258 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 6259 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 6260 ARMISD::VSHRs : ARMISD::VSHRu); 6261 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 6262 DAG.getConstant(Cnt, MVT::i32)); 6263 } 6264 } 6265 return SDValue(); 6266} 6267 6268/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 6269/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 6270static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 6271 const ARMSubtarget *ST) { 6272 SDValue N0 = N->getOperand(0); 6273 6274 // Check for sign- and zero-extensions of vector extract operations of 8- 6275 // and 16-bit vector elements. NEON supports these directly. They are 6276 // handled during DAG combining because type legalization will promote them 6277 // to 32-bit types and it is messy to recognize the operations after that. 6278 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 6279 SDValue Vec = N0.getOperand(0); 6280 SDValue Lane = N0.getOperand(1); 6281 EVT VT = N->getValueType(0); 6282 EVT EltVT = N0.getValueType(); 6283 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6284 6285 if (VT == MVT::i32 && 6286 (EltVT == MVT::i8 || EltVT == MVT::i16) && 6287 TLI.isTypeLegal(Vec.getValueType()) && 6288 isa<ConstantSDNode>(Lane)) { 6289 6290 unsigned Opc = 0; 6291 switch (N->getOpcode()) { 6292 default: llvm_unreachable("unexpected opcode"); 6293 case ISD::SIGN_EXTEND: 6294 Opc = ARMISD::VGETLANEs; 6295 break; 6296 case ISD::ZERO_EXTEND: 6297 case ISD::ANY_EXTEND: 6298 Opc = ARMISD::VGETLANEu; 6299 break; 6300 } 6301 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 6302 } 6303 } 6304 6305 return SDValue(); 6306} 6307 6308/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 6309/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 6310static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 6311 const ARMSubtarget *ST) { 6312 // If the target supports NEON, try to use vmax/vmin instructions for f32 6313 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 6314 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 6315 // a NaN; only do the transformation when it matches that behavior. 6316 6317 // For now only do this when using NEON for FP operations; if using VFP, it 6318 // is not obvious that the benefit outweighs the cost of switching to the 6319 // NEON pipeline. 6320 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 6321 N->getValueType(0) != MVT::f32) 6322 return SDValue(); 6323 6324 SDValue CondLHS = N->getOperand(0); 6325 SDValue CondRHS = N->getOperand(1); 6326 SDValue LHS = N->getOperand(2); 6327 SDValue RHS = N->getOperand(3); 6328 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 6329 6330 unsigned Opcode = 0; 6331 bool IsReversed; 6332 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 6333 IsReversed = false; // x CC y ? x : y 6334 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 6335 IsReversed = true ; // x CC y ? y : x 6336 } else { 6337 return SDValue(); 6338 } 6339 6340 bool IsUnordered; 6341 switch (CC) { 6342 default: break; 6343 case ISD::SETOLT: 6344 case ISD::SETOLE: 6345 case ISD::SETLT: 6346 case ISD::SETLE: 6347 case ISD::SETULT: 6348 case ISD::SETULE: 6349 // If LHS is NaN, an ordered comparison will be false and the result will 6350 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 6351 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 6352 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 6353 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 6354 break; 6355 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 6356 // will return -0, so vmin can only be used for unsafe math or if one of 6357 // the operands is known to be nonzero. 6358 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 6359 !UnsafeFPMath && 6360 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 6361 break; 6362 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 6363 break; 6364 6365 case ISD::SETOGT: 6366 case ISD::SETOGE: 6367 case ISD::SETGT: 6368 case ISD::SETGE: 6369 case ISD::SETUGT: 6370 case ISD::SETUGE: 6371 // If LHS is NaN, an ordered comparison will be false and the result will 6372 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 6373 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 6374 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 6375 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 6376 break; 6377 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 6378 // will return +0, so vmax can only be used for unsafe math or if one of 6379 // the operands is known to be nonzero. 6380 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 6381 !UnsafeFPMath && 6382 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 6383 break; 6384 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 6385 break; 6386 } 6387 6388 if (!Opcode) 6389 return SDValue(); 6390 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 6391} 6392 6393SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 6394 DAGCombinerInfo &DCI) const { 6395 switch (N->getOpcode()) { 6396 default: break; 6397 case ISD::ADD: return PerformADDCombine(N, DCI); 6398 case ISD::SUB: return PerformSUBCombine(N, DCI); 6399 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 6400 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 6401 case ISD::AND: return PerformANDCombine(N, DCI); 6402 case ARMISD::BFI: return PerformBFICombine(N, DCI); 6403 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 6404 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 6405 case ISD::STORE: return PerformSTORECombine(N, DCI); 6406 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 6407 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 6408 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 6409 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 6410 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 6411 case ISD::SHL: 6412 case ISD::SRA: 6413 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 6414 case ISD::SIGN_EXTEND: 6415 case ISD::ZERO_EXTEND: 6416 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 6417 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 6418 case ARMISD::VLD2DUP: 6419 case ARMISD::VLD3DUP: 6420 case ARMISD::VLD4DUP: 6421 return CombineBaseUpdate(N, DCI); 6422 case ISD::INTRINSIC_VOID: 6423 case ISD::INTRINSIC_W_CHAIN: 6424 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 6425 case Intrinsic::arm_neon_vld1: 6426 case Intrinsic::arm_neon_vld2: 6427 case Intrinsic::arm_neon_vld3: 6428 case Intrinsic::arm_neon_vld4: 6429 case Intrinsic::arm_neon_vld2lane: 6430 case Intrinsic::arm_neon_vld3lane: 6431 case Intrinsic::arm_neon_vld4lane: 6432 case Intrinsic::arm_neon_vst1: 6433 case Intrinsic::arm_neon_vst2: 6434 case Intrinsic::arm_neon_vst3: 6435 case Intrinsic::arm_neon_vst4: 6436 case Intrinsic::arm_neon_vst2lane: 6437 case Intrinsic::arm_neon_vst3lane: 6438 case Intrinsic::arm_neon_vst4lane: 6439 return CombineBaseUpdate(N, DCI); 6440 default: break; 6441 } 6442 break; 6443 } 6444 return SDValue(); 6445} 6446 6447bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 6448 EVT VT) const { 6449 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 6450} 6451 6452bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 6453 if (!Subtarget->allowsUnalignedMem()) 6454 return false; 6455 6456 switch (VT.getSimpleVT().SimpleTy) { 6457 default: 6458 return false; 6459 case MVT::i8: 6460 case MVT::i16: 6461 case MVT::i32: 6462 return true; 6463 // FIXME: VLD1 etc with standard alignment is legal. 6464 } 6465} 6466 6467static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 6468 if (V < 0) 6469 return false; 6470 6471 unsigned Scale = 1; 6472 switch (VT.getSimpleVT().SimpleTy) { 6473 default: return false; 6474 case MVT::i1: 6475 case MVT::i8: 6476 // Scale == 1; 6477 break; 6478 case MVT::i16: 6479 // Scale == 2; 6480 Scale = 2; 6481 break; 6482 case MVT::i32: 6483 // Scale == 4; 6484 Scale = 4; 6485 break; 6486 } 6487 6488 if ((V & (Scale - 1)) != 0) 6489 return false; 6490 V /= Scale; 6491 return V == (V & ((1LL << 5) - 1)); 6492} 6493 6494static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 6495 const ARMSubtarget *Subtarget) { 6496 bool isNeg = false; 6497 if (V < 0) { 6498 isNeg = true; 6499 V = - V; 6500 } 6501 6502 switch (VT.getSimpleVT().SimpleTy) { 6503 default: return false; 6504 case MVT::i1: 6505 case MVT::i8: 6506 case MVT::i16: 6507 case MVT::i32: 6508 // + imm12 or - imm8 6509 if (isNeg) 6510 return V == (V & ((1LL << 8) - 1)); 6511 return V == (V & ((1LL << 12) - 1)); 6512 case MVT::f32: 6513 case MVT::f64: 6514 // Same as ARM mode. FIXME: NEON? 6515 if (!Subtarget->hasVFP2()) 6516 return false; 6517 if ((V & 3) != 0) 6518 return false; 6519 V >>= 2; 6520 return V == (V & ((1LL << 8) - 1)); 6521 } 6522} 6523 6524/// isLegalAddressImmediate - Return true if the integer value can be used 6525/// as the offset of the target addressing mode for load / store of the 6526/// given type. 6527static bool isLegalAddressImmediate(int64_t V, EVT VT, 6528 const ARMSubtarget *Subtarget) { 6529 if (V == 0) 6530 return true; 6531 6532 if (!VT.isSimple()) 6533 return false; 6534 6535 if (Subtarget->isThumb1Only()) 6536 return isLegalT1AddressImmediate(V, VT); 6537 else if (Subtarget->isThumb2()) 6538 return isLegalT2AddressImmediate(V, VT, Subtarget); 6539 6540 // ARM mode. 6541 if (V < 0) 6542 V = - V; 6543 switch (VT.getSimpleVT().SimpleTy) { 6544 default: return false; 6545 case MVT::i1: 6546 case MVT::i8: 6547 case MVT::i32: 6548 // +- imm12 6549 return V == (V & ((1LL << 12) - 1)); 6550 case MVT::i16: 6551 // +- imm8 6552 return V == (V & ((1LL << 8) - 1)); 6553 case MVT::f32: 6554 case MVT::f64: 6555 if (!Subtarget->hasVFP2()) // FIXME: NEON? 6556 return false; 6557 if ((V & 3) != 0) 6558 return false; 6559 V >>= 2; 6560 return V == (V & ((1LL << 8) - 1)); 6561 } 6562} 6563 6564bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 6565 EVT VT) const { 6566 int Scale = AM.Scale; 6567 if (Scale < 0) 6568 return false; 6569 6570 switch (VT.getSimpleVT().SimpleTy) { 6571 default: return false; 6572 case MVT::i1: 6573 case MVT::i8: 6574 case MVT::i16: 6575 case MVT::i32: 6576 if (Scale == 1) 6577 return true; 6578 // r + r << imm 6579 Scale = Scale & ~1; 6580 return Scale == 2 || Scale == 4 || Scale == 8; 6581 case MVT::i64: 6582 // r + r 6583 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 6584 return true; 6585 return false; 6586 case MVT::isVoid: 6587 // Note, we allow "void" uses (basically, uses that aren't loads or 6588 // stores), because arm allows folding a scale into many arithmetic 6589 // operations. This should be made more precise and revisited later. 6590 6591 // Allow r << imm, but the imm has to be a multiple of two. 6592 if (Scale & 1) return false; 6593 return isPowerOf2_32(Scale); 6594 } 6595} 6596 6597/// isLegalAddressingMode - Return true if the addressing mode represented 6598/// by AM is legal for this target, for a load/store of the specified type. 6599bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 6600 const Type *Ty) const { 6601 EVT VT = getValueType(Ty, true); 6602 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 6603 return false; 6604 6605 // Can never fold addr of global into load/store. 6606 if (AM.BaseGV) 6607 return false; 6608 6609 switch (AM.Scale) { 6610 case 0: // no scale reg, must be "r+i" or "r", or "i". 6611 break; 6612 case 1: 6613 if (Subtarget->isThumb1Only()) 6614 return false; 6615 // FALL THROUGH. 6616 default: 6617 // ARM doesn't support any R+R*scale+imm addr modes. 6618 if (AM.BaseOffs) 6619 return false; 6620 6621 if (!VT.isSimple()) 6622 return false; 6623 6624 if (Subtarget->isThumb2()) 6625 return isLegalT2ScaledAddressingMode(AM, VT); 6626 6627 int Scale = AM.Scale; 6628 switch (VT.getSimpleVT().SimpleTy) { 6629 default: return false; 6630 case MVT::i1: 6631 case MVT::i8: 6632 case MVT::i32: 6633 if (Scale < 0) Scale = -Scale; 6634 if (Scale == 1) 6635 return true; 6636 // r + r << imm 6637 return isPowerOf2_32(Scale & ~1); 6638 case MVT::i16: 6639 case MVT::i64: 6640 // r + r 6641 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 6642 return true; 6643 return false; 6644 6645 case MVT::isVoid: 6646 // Note, we allow "void" uses (basically, uses that aren't loads or 6647 // stores), because arm allows folding a scale into many arithmetic 6648 // operations. This should be made more precise and revisited later. 6649 6650 // Allow r << imm, but the imm has to be a multiple of two. 6651 if (Scale & 1) return false; 6652 return isPowerOf2_32(Scale); 6653 } 6654 break; 6655 } 6656 return true; 6657} 6658 6659/// isLegalICmpImmediate - Return true if the specified immediate is legal 6660/// icmp immediate, that is the target has icmp instructions which can compare 6661/// a register against the immediate without having to materialize the 6662/// immediate into a register. 6663bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 6664 if (!Subtarget->isThumb()) 6665 return ARM_AM::getSOImmVal(Imm) != -1; 6666 if (Subtarget->isThumb2()) 6667 return ARM_AM::getT2SOImmVal(Imm) != -1; 6668 return Imm >= 0 && Imm <= 255; 6669} 6670 6671static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 6672 bool isSEXTLoad, SDValue &Base, 6673 SDValue &Offset, bool &isInc, 6674 SelectionDAG &DAG) { 6675 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 6676 return false; 6677 6678 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 6679 // AddressingMode 3 6680 Base = Ptr->getOperand(0); 6681 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 6682 int RHSC = (int)RHS->getZExtValue(); 6683 if (RHSC < 0 && RHSC > -256) { 6684 assert(Ptr->getOpcode() == ISD::ADD); 6685 isInc = false; 6686 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 6687 return true; 6688 } 6689 } 6690 isInc = (Ptr->getOpcode() == ISD::ADD); 6691 Offset = Ptr->getOperand(1); 6692 return true; 6693 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 6694 // AddressingMode 2 6695 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 6696 int RHSC = (int)RHS->getZExtValue(); 6697 if (RHSC < 0 && RHSC > -0x1000) { 6698 assert(Ptr->getOpcode() == ISD::ADD); 6699 isInc = false; 6700 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 6701 Base = Ptr->getOperand(0); 6702 return true; 6703 } 6704 } 6705 6706 if (Ptr->getOpcode() == ISD::ADD) { 6707 isInc = true; 6708 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 6709 if (ShOpcVal != ARM_AM::no_shift) { 6710 Base = Ptr->getOperand(1); 6711 Offset = Ptr->getOperand(0); 6712 } else { 6713 Base = Ptr->getOperand(0); 6714 Offset = Ptr->getOperand(1); 6715 } 6716 return true; 6717 } 6718 6719 isInc = (Ptr->getOpcode() == ISD::ADD); 6720 Base = Ptr->getOperand(0); 6721 Offset = Ptr->getOperand(1); 6722 return true; 6723 } 6724 6725 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 6726 return false; 6727} 6728 6729static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 6730 bool isSEXTLoad, SDValue &Base, 6731 SDValue &Offset, bool &isInc, 6732 SelectionDAG &DAG) { 6733 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 6734 return false; 6735 6736 Base = Ptr->getOperand(0); 6737 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 6738 int RHSC = (int)RHS->getZExtValue(); 6739 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 6740 assert(Ptr->getOpcode() == ISD::ADD); 6741 isInc = false; 6742 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 6743 return true; 6744 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 6745 isInc = Ptr->getOpcode() == ISD::ADD; 6746 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 6747 return true; 6748 } 6749 } 6750 6751 return false; 6752} 6753 6754/// getPreIndexedAddressParts - returns true by value, base pointer and 6755/// offset pointer and addressing mode by reference if the node's address 6756/// can be legally represented as pre-indexed load / store address. 6757bool 6758ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 6759 SDValue &Offset, 6760 ISD::MemIndexedMode &AM, 6761 SelectionDAG &DAG) const { 6762 if (Subtarget->isThumb1Only()) 6763 return false; 6764 6765 EVT VT; 6766 SDValue Ptr; 6767 bool isSEXTLoad = false; 6768 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 6769 Ptr = LD->getBasePtr(); 6770 VT = LD->getMemoryVT(); 6771 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 6772 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 6773 Ptr = ST->getBasePtr(); 6774 VT = ST->getMemoryVT(); 6775 } else 6776 return false; 6777 6778 bool isInc; 6779 bool isLegal = false; 6780 if (Subtarget->isThumb2()) 6781 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 6782 Offset, isInc, DAG); 6783 else 6784 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 6785 Offset, isInc, DAG); 6786 if (!isLegal) 6787 return false; 6788 6789 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 6790 return true; 6791} 6792 6793/// getPostIndexedAddressParts - returns true by value, base pointer and 6794/// offset pointer and addressing mode by reference if this node can be 6795/// combined with a load / store to form a post-indexed load / store. 6796bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 6797 SDValue &Base, 6798 SDValue &Offset, 6799 ISD::MemIndexedMode &AM, 6800 SelectionDAG &DAG) const { 6801 if (Subtarget->isThumb1Only()) 6802 return false; 6803 6804 EVT VT; 6805 SDValue Ptr; 6806 bool isSEXTLoad = false; 6807 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 6808 VT = LD->getMemoryVT(); 6809 Ptr = LD->getBasePtr(); 6810 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 6811 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 6812 VT = ST->getMemoryVT(); 6813 Ptr = ST->getBasePtr(); 6814 } else 6815 return false; 6816 6817 bool isInc; 6818 bool isLegal = false; 6819 if (Subtarget->isThumb2()) 6820 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 6821 isInc, DAG); 6822 else 6823 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 6824 isInc, DAG); 6825 if (!isLegal) 6826 return false; 6827 6828 if (Ptr != Base) { 6829 // Swap base ptr and offset to catch more post-index load / store when 6830 // it's legal. In Thumb2 mode, offset must be an immediate. 6831 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 6832 !Subtarget->isThumb2()) 6833 std::swap(Base, Offset); 6834 6835 // Post-indexed load / store update the base pointer. 6836 if (Ptr != Base) 6837 return false; 6838 } 6839 6840 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 6841 return true; 6842} 6843 6844void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 6845 const APInt &Mask, 6846 APInt &KnownZero, 6847 APInt &KnownOne, 6848 const SelectionDAG &DAG, 6849 unsigned Depth) const { 6850 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 6851 switch (Op.getOpcode()) { 6852 default: break; 6853 case ARMISD::CMOV: { 6854 // Bits are known zero/one if known on the LHS and RHS. 6855 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 6856 if (KnownZero == 0 && KnownOne == 0) return; 6857 6858 APInt KnownZeroRHS, KnownOneRHS; 6859 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 6860 KnownZeroRHS, KnownOneRHS, Depth+1); 6861 KnownZero &= KnownZeroRHS; 6862 KnownOne &= KnownOneRHS; 6863 return; 6864 } 6865 } 6866} 6867 6868//===----------------------------------------------------------------------===// 6869// ARM Inline Assembly Support 6870//===----------------------------------------------------------------------===// 6871 6872bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 6873 // Looking for "rev" which is V6+. 6874 if (!Subtarget->hasV6Ops()) 6875 return false; 6876 6877 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 6878 std::string AsmStr = IA->getAsmString(); 6879 SmallVector<StringRef, 4> AsmPieces; 6880 SplitString(AsmStr, AsmPieces, ";\n"); 6881 6882 switch (AsmPieces.size()) { 6883 default: return false; 6884 case 1: 6885 AsmStr = AsmPieces[0]; 6886 AsmPieces.clear(); 6887 SplitString(AsmStr, AsmPieces, " \t,"); 6888 6889 // rev $0, $1 6890 if (AsmPieces.size() == 3 && 6891 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 6892 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 6893 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 6894 if (Ty && Ty->getBitWidth() == 32) 6895 return IntrinsicLowering::LowerToByteSwap(CI); 6896 } 6897 break; 6898 } 6899 6900 return false; 6901} 6902 6903/// getConstraintType - Given a constraint letter, return the type of 6904/// constraint it is for this target. 6905ARMTargetLowering::ConstraintType 6906ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 6907 if (Constraint.size() == 1) { 6908 switch (Constraint[0]) { 6909 default: break; 6910 case 'l': return C_RegisterClass; 6911 case 'w': return C_RegisterClass; 6912 } 6913 } 6914 return TargetLowering::getConstraintType(Constraint); 6915} 6916 6917/// Examine constraint type and operand type and determine a weight value. 6918/// This object must already have been set up with the operand type 6919/// and the current alternative constraint selected. 6920TargetLowering::ConstraintWeight 6921ARMTargetLowering::getSingleConstraintMatchWeight( 6922 AsmOperandInfo &info, const char *constraint) const { 6923 ConstraintWeight weight = CW_Invalid; 6924 Value *CallOperandVal = info.CallOperandVal; 6925 // If we don't have a value, we can't do a match, 6926 // but allow it at the lowest weight. 6927 if (CallOperandVal == NULL) 6928 return CW_Default; 6929 const Type *type = CallOperandVal->getType(); 6930 // Look at the constraint type. 6931 switch (*constraint) { 6932 default: 6933 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 6934 break; 6935 case 'l': 6936 if (type->isIntegerTy()) { 6937 if (Subtarget->isThumb()) 6938 weight = CW_SpecificReg; 6939 else 6940 weight = CW_Register; 6941 } 6942 break; 6943 case 'w': 6944 if (type->isFloatingPointTy()) 6945 weight = CW_Register; 6946 break; 6947 } 6948 return weight; 6949} 6950 6951std::pair<unsigned, const TargetRegisterClass*> 6952ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 6953 EVT VT) const { 6954 if (Constraint.size() == 1) { 6955 // GCC ARM Constraint Letters 6956 switch (Constraint[0]) { 6957 case 'l': 6958 if (Subtarget->isThumb()) 6959 return std::make_pair(0U, ARM::tGPRRegisterClass); 6960 else 6961 return std::make_pair(0U, ARM::GPRRegisterClass); 6962 case 'r': 6963 return std::make_pair(0U, ARM::GPRRegisterClass); 6964 case 'w': 6965 if (VT == MVT::f32) 6966 return std::make_pair(0U, ARM::SPRRegisterClass); 6967 if (VT.getSizeInBits() == 64) 6968 return std::make_pair(0U, ARM::DPRRegisterClass); 6969 if (VT.getSizeInBits() == 128) 6970 return std::make_pair(0U, ARM::QPRRegisterClass); 6971 break; 6972 } 6973 } 6974 if (StringRef("{cc}").equals_lower(Constraint)) 6975 return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass); 6976 6977 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 6978} 6979 6980std::vector<unsigned> ARMTargetLowering:: 6981getRegClassForInlineAsmConstraint(const std::string &Constraint, 6982 EVT VT) const { 6983 if (Constraint.size() != 1) 6984 return std::vector<unsigned>(); 6985 6986 switch (Constraint[0]) { // GCC ARM Constraint Letters 6987 default: break; 6988 case 'l': 6989 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 6990 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 6991 0); 6992 case 'r': 6993 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 6994 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 6995 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 6996 ARM::R12, ARM::LR, 0); 6997 case 'w': 6998 if (VT == MVT::f32) 6999 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 7000 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 7001 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 7002 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 7003 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 7004 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 7005 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 7006 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 7007 if (VT.getSizeInBits() == 64) 7008 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 7009 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 7010 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 7011 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 7012 if (VT.getSizeInBits() == 128) 7013 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 7014 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 7015 break; 7016 } 7017 7018 return std::vector<unsigned>(); 7019} 7020 7021/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 7022/// vector. If it is invalid, don't add anything to Ops. 7023void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 7024 char Constraint, 7025 std::vector<SDValue>&Ops, 7026 SelectionDAG &DAG) const { 7027 SDValue Result(0, 0); 7028 7029 switch (Constraint) { 7030 default: break; 7031 case 'I': case 'J': case 'K': case 'L': 7032 case 'M': case 'N': case 'O': 7033 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 7034 if (!C) 7035 return; 7036 7037 int64_t CVal64 = C->getSExtValue(); 7038 int CVal = (int) CVal64; 7039 // None of these constraints allow values larger than 32 bits. Check 7040 // that the value fits in an int. 7041 if (CVal != CVal64) 7042 return; 7043 7044 switch (Constraint) { 7045 case 'I': 7046 if (Subtarget->isThumb1Only()) { 7047 // This must be a constant between 0 and 255, for ADD 7048 // immediates. 7049 if (CVal >= 0 && CVal <= 255) 7050 break; 7051 } else if (Subtarget->isThumb2()) { 7052 // A constant that can be used as an immediate value in a 7053 // data-processing instruction. 7054 if (ARM_AM::getT2SOImmVal(CVal) != -1) 7055 break; 7056 } else { 7057 // A constant that can be used as an immediate value in a 7058 // data-processing instruction. 7059 if (ARM_AM::getSOImmVal(CVal) != -1) 7060 break; 7061 } 7062 return; 7063 7064 case 'J': 7065 if (Subtarget->isThumb()) { // FIXME thumb2 7066 // This must be a constant between -255 and -1, for negated ADD 7067 // immediates. This can be used in GCC with an "n" modifier that 7068 // prints the negated value, for use with SUB instructions. It is 7069 // not useful otherwise but is implemented for compatibility. 7070 if (CVal >= -255 && CVal <= -1) 7071 break; 7072 } else { 7073 // This must be a constant between -4095 and 4095. It is not clear 7074 // what this constraint is intended for. Implemented for 7075 // compatibility with GCC. 7076 if (CVal >= -4095 && CVal <= 4095) 7077 break; 7078 } 7079 return; 7080 7081 case 'K': 7082 if (Subtarget->isThumb1Only()) { 7083 // A 32-bit value where only one byte has a nonzero value. Exclude 7084 // zero to match GCC. This constraint is used by GCC internally for 7085 // constants that can be loaded with a move/shift combination. 7086 // It is not useful otherwise but is implemented for compatibility. 7087 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 7088 break; 7089 } else if (Subtarget->isThumb2()) { 7090 // A constant whose bitwise inverse can be used as an immediate 7091 // value in a data-processing instruction. This can be used in GCC 7092 // with a "B" modifier that prints the inverted value, for use with 7093 // BIC and MVN instructions. It is not useful otherwise but is 7094 // implemented for compatibility. 7095 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 7096 break; 7097 } else { 7098 // A constant whose bitwise inverse can be used as an immediate 7099 // value in a data-processing instruction. This can be used in GCC 7100 // with a "B" modifier that prints the inverted value, for use with 7101 // BIC and MVN instructions. It is not useful otherwise but is 7102 // implemented for compatibility. 7103 if (ARM_AM::getSOImmVal(~CVal) != -1) 7104 break; 7105 } 7106 return; 7107 7108 case 'L': 7109 if (Subtarget->isThumb1Only()) { 7110 // This must be a constant between -7 and 7, 7111 // for 3-operand ADD/SUB immediate instructions. 7112 if (CVal >= -7 && CVal < 7) 7113 break; 7114 } else if (Subtarget->isThumb2()) { 7115 // A constant whose negation can be used as an immediate value in a 7116 // data-processing instruction. This can be used in GCC with an "n" 7117 // modifier that prints the negated value, for use with SUB 7118 // instructions. It is not useful otherwise but is implemented for 7119 // compatibility. 7120 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 7121 break; 7122 } else { 7123 // A constant whose negation can be used as an immediate value in a 7124 // data-processing instruction. This can be used in GCC with an "n" 7125 // modifier that prints the negated value, for use with SUB 7126 // instructions. It is not useful otherwise but is implemented for 7127 // compatibility. 7128 if (ARM_AM::getSOImmVal(-CVal) != -1) 7129 break; 7130 } 7131 return; 7132 7133 case 'M': 7134 if (Subtarget->isThumb()) { // FIXME thumb2 7135 // This must be a multiple of 4 between 0 and 1020, for 7136 // ADD sp + immediate. 7137 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 7138 break; 7139 } else { 7140 // A power of two or a constant between 0 and 32. This is used in 7141 // GCC for the shift amount on shifted register operands, but it is 7142 // useful in general for any shift amounts. 7143 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 7144 break; 7145 } 7146 return; 7147 7148 case 'N': 7149 if (Subtarget->isThumb()) { // FIXME thumb2 7150 // This must be a constant between 0 and 31, for shift amounts. 7151 if (CVal >= 0 && CVal <= 31) 7152 break; 7153 } 7154 return; 7155 7156 case 'O': 7157 if (Subtarget->isThumb()) { // FIXME thumb2 7158 // This must be a multiple of 4 between -508 and 508, for 7159 // ADD/SUB sp = sp + immediate. 7160 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 7161 break; 7162 } 7163 return; 7164 } 7165 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 7166 break; 7167 } 7168 7169 if (Result.getNode()) { 7170 Ops.push_back(Result); 7171 return; 7172 } 7173 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7174} 7175 7176bool 7177ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 7178 // The ARM target isn't yet aware of offsets. 7179 return false; 7180} 7181 7182int ARM::getVFPf32Imm(const APFloat &FPImm) { 7183 APInt Imm = FPImm.bitcastToAPInt(); 7184 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 7185 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 7186 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 7187 7188 // We can handle 4 bits of mantissa. 7189 // mantissa = (16+UInt(e:f:g:h))/16. 7190 if (Mantissa & 0x7ffff) 7191 return -1; 7192 Mantissa >>= 19; 7193 if ((Mantissa & 0xf) != Mantissa) 7194 return -1; 7195 7196 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 7197 if (Exp < -3 || Exp > 4) 7198 return -1; 7199 Exp = ((Exp+3) & 0x7) ^ 4; 7200 7201 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 7202} 7203 7204int ARM::getVFPf64Imm(const APFloat &FPImm) { 7205 APInt Imm = FPImm.bitcastToAPInt(); 7206 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 7207 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 7208 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 7209 7210 // We can handle 4 bits of mantissa. 7211 // mantissa = (16+UInt(e:f:g:h))/16. 7212 if (Mantissa & 0xffffffffffffLL) 7213 return -1; 7214 Mantissa >>= 48; 7215 if ((Mantissa & 0xf) != Mantissa) 7216 return -1; 7217 7218 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 7219 if (Exp < -3 || Exp > 4) 7220 return -1; 7221 Exp = ((Exp+3) & 0x7) ^ 4; 7222 7223 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 7224} 7225 7226bool ARM::isBitFieldInvertedMask(unsigned v) { 7227 if (v == 0xffffffff) 7228 return 0; 7229 // there can be 1's on either or both "outsides", all the "inside" 7230 // bits must be 0's 7231 unsigned int lsb = 0, msb = 31; 7232 while (v & (1 << msb)) --msb; 7233 while (v & (1 << lsb)) ++lsb; 7234 for (unsigned int i = lsb; i <= msb; ++i) { 7235 if (v & (1 << i)) 7236 return 0; 7237 } 7238 return 1; 7239} 7240 7241/// isFPImmLegal - Returns true if the target can instruction select the 7242/// specified FP immediate natively. If false, the legalizer will 7243/// materialize the FP immediate as a load from a constant pool. 7244bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 7245 if (!Subtarget->hasVFP3()) 7246 return false; 7247 if (VT == MVT::f32) 7248 return ARM::getVFPf32Imm(Imm) != -1; 7249 if (VT == MVT::f64) 7250 return ARM::getVFPf64Imm(Imm) != -1; 7251 return false; 7252} 7253 7254/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 7255/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 7256/// specified in the intrinsic calls. 7257bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 7258 const CallInst &I, 7259 unsigned Intrinsic) const { 7260 switch (Intrinsic) { 7261 case Intrinsic::arm_neon_vld1: 7262 case Intrinsic::arm_neon_vld2: 7263 case Intrinsic::arm_neon_vld3: 7264 case Intrinsic::arm_neon_vld4: 7265 case Intrinsic::arm_neon_vld2lane: 7266 case Intrinsic::arm_neon_vld3lane: 7267 case Intrinsic::arm_neon_vld4lane: { 7268 Info.opc = ISD::INTRINSIC_W_CHAIN; 7269 // Conservatively set memVT to the entire set of vectors loaded. 7270 uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; 7271 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7272 Info.ptrVal = I.getArgOperand(0); 7273 Info.offset = 0; 7274 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 7275 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 7276 Info.vol = false; // volatile loads with NEON intrinsics not supported 7277 Info.readMem = true; 7278 Info.writeMem = false; 7279 return true; 7280 } 7281 case Intrinsic::arm_neon_vst1: 7282 case Intrinsic::arm_neon_vst2: 7283 case Intrinsic::arm_neon_vst3: 7284 case Intrinsic::arm_neon_vst4: 7285 case Intrinsic::arm_neon_vst2lane: 7286 case Intrinsic::arm_neon_vst3lane: 7287 case Intrinsic::arm_neon_vst4lane: { 7288 Info.opc = ISD::INTRINSIC_VOID; 7289 // Conservatively set memVT to the entire set of vectors stored. 7290 unsigned NumElts = 0; 7291 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 7292 const Type *ArgTy = I.getArgOperand(ArgI)->getType(); 7293 if (!ArgTy->isVectorTy()) 7294 break; 7295 NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; 7296 } 7297 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7298 Info.ptrVal = I.getArgOperand(0); 7299 Info.offset = 0; 7300 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 7301 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 7302 Info.vol = false; // volatile stores with NEON intrinsics not supported 7303 Info.readMem = false; 7304 Info.writeMem = true; 7305 return true; 7306 } 7307 default: 7308 break; 7309 } 7310 7311 return false; 7312} 7313