ARMISelLowering.cpp revision 1cc3984148be113c6e5e470f23c9ddbd37679c5f
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "ARM.h" 16#include "ARMAddressingModes.h" 17#include "ARMConstantPoolValue.h" 18#include "ARMISelLowering.h" 19#include "ARMMachineFunctionInfo.h" 20#include "ARMPerfectShuffle.h" 21#include "ARMRegisterInfo.h" 22#include "ARMSubtarget.h" 23#include "ARMTargetMachine.h" 24#include "ARMTargetObjectFile.h" 25#include "llvm/CallingConv.h" 26#include "llvm/Constants.h" 27#include "llvm/Function.h" 28#include "llvm/GlobalValue.h" 29#include "llvm/Instruction.h" 30#include "llvm/Intrinsics.h" 31#include "llvm/Type.h" 32#include "llvm/CodeGen/CallingConvLower.h" 33#include "llvm/CodeGen/MachineBasicBlock.h" 34#include "llvm/CodeGen/MachineFrameInfo.h" 35#include "llvm/CodeGen/MachineFunction.h" 36#include "llvm/CodeGen/MachineInstrBuilder.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/CodeGen/SelectionDAG.h" 40#include "llvm/MC/MCSectionMachO.h" 41#include "llvm/Target/TargetOptions.h" 42#include "llvm/ADT/VectorExtras.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/ErrorHandling.h" 45#include "llvm/Support/MathExtras.h" 46#include "llvm/Support/raw_ostream.h" 47#include <sstream> 48using namespace llvm; 49 50static cl::opt<bool> 51EnableARMLongCalls("arm-long-calls", cl::Hidden, 52 cl::desc("Generate calls via indirect call instructions."), 53 cl::init(false)); 54 55static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 56 CCValAssign::LocInfo &LocInfo, 57 ISD::ArgFlagsTy &ArgFlags, 58 CCState &State); 59static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 60 CCValAssign::LocInfo &LocInfo, 61 ISD::ArgFlagsTy &ArgFlags, 62 CCState &State); 63static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 64 CCValAssign::LocInfo &LocInfo, 65 ISD::ArgFlagsTy &ArgFlags, 66 CCState &State); 67static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 68 CCValAssign::LocInfo &LocInfo, 69 ISD::ArgFlagsTy &ArgFlags, 70 CCState &State); 71 72void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, 73 EVT PromotedBitwiseVT) { 74 if (VT != PromotedLdStVT) { 75 setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); 76 AddPromotedToType (ISD::LOAD, VT.getSimpleVT(), 77 PromotedLdStVT.getSimpleVT()); 78 79 setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); 80 AddPromotedToType (ISD::STORE, VT.getSimpleVT(), 81 PromotedLdStVT.getSimpleVT()); 82 } 83 84 EVT ElemTy = VT.getVectorElementType(); 85 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 86 setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom); 87 if (ElemTy == MVT::i8 || ElemTy == MVT::i16) 88 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); 89 if (ElemTy != MVT::i32) { 90 setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand); 91 setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand); 92 setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand); 93 setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand); 94 } 95 setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); 96 setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); 97 if (llvm::ModelWithRegSequence()) 98 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); 99 else 100 setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom); 101 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand); 102 setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); 103 setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); 104 if (VT.isInteger()) { 105 setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); 106 setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); 107 setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); 108 } 109 110 // Promote all bit-wise operations. 111 if (VT.isInteger() && VT != PromotedBitwiseVT) { 112 setOperationAction(ISD::AND, VT.getSimpleVT(), Promote); 113 AddPromotedToType (ISD::AND, VT.getSimpleVT(), 114 PromotedBitwiseVT.getSimpleVT()); 115 setOperationAction(ISD::OR, VT.getSimpleVT(), Promote); 116 AddPromotedToType (ISD::OR, VT.getSimpleVT(), 117 PromotedBitwiseVT.getSimpleVT()); 118 setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote); 119 AddPromotedToType (ISD::XOR, VT.getSimpleVT(), 120 PromotedBitwiseVT.getSimpleVT()); 121 } 122 123 // Neon does not support vector divide/remainder operations. 124 setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); 125 setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); 126 setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand); 127 setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); 128 setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); 129 setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); 130} 131 132void ARMTargetLowering::addDRTypeForNEON(EVT VT) { 133 addRegisterClass(VT, ARM::DPRRegisterClass); 134 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 135} 136 137void ARMTargetLowering::addQRTypeForNEON(EVT VT) { 138 addRegisterClass(VT, ARM::QPRRegisterClass); 139 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 140} 141 142static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 143 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 144 return new TargetLoweringObjectFileMachO(); 145 146 return new ARMElfTargetObjectFile(); 147} 148 149ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 150 : TargetLowering(TM, createTLOF(TM)) { 151 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 152 153 if (Subtarget->isTargetDarwin()) { 154 // Uses VFP for Thumb libfuncs if available. 155 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 156 // Single-precision floating-point arithmetic. 157 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 158 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 159 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 160 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 161 162 // Double-precision floating-point arithmetic. 163 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 164 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 165 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 166 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 167 168 // Single-precision comparisons. 169 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 170 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 171 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 172 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 173 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 174 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 175 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 176 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 177 178 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 179 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 180 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 181 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 182 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 183 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 184 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 185 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 186 187 // Double-precision comparisons. 188 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 189 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 190 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 191 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 192 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 193 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 194 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 195 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 196 197 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 198 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 199 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 200 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 201 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 202 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 205 206 // Floating-point to integer conversions. 207 // i64 conversions are done via library routines even when generating VFP 208 // instructions, so use the same ones. 209 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 210 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 211 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 212 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 213 214 // Conversions between floating types. 215 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 216 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 217 218 // Integer to floating-point conversions. 219 // i64 conversions are done via library routines even when generating VFP 220 // instructions, so use the same ones. 221 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 222 // e.g., __floatunsidf vs. __floatunssidfvfp. 223 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 224 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 225 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 226 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 227 } 228 } 229 230 // These libcalls are not available in 32-bit. 231 setLibcallName(RTLIB::SHL_I128, 0); 232 setLibcallName(RTLIB::SRL_I128, 0); 233 setLibcallName(RTLIB::SRA_I128, 0); 234 235 // Libcalls should use the AAPCS base standard ABI, even if hard float 236 // is in effect, as per the ARM RTABI specification, section 4.1.2. 237 if (Subtarget->isAAPCS_ABI()) { 238 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) { 239 setLibcallCallingConv(static_cast<RTLIB::Libcall>(i), 240 CallingConv::ARM_AAPCS); 241 } 242 } 243 244 if (Subtarget->isThumb1Only()) 245 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass); 246 else 247 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 248 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 249 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 250 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 251 252 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 253 } 254 255 if (Subtarget->hasNEON()) { 256 addDRTypeForNEON(MVT::v2f32); 257 addDRTypeForNEON(MVT::v8i8); 258 addDRTypeForNEON(MVT::v4i16); 259 addDRTypeForNEON(MVT::v2i32); 260 addDRTypeForNEON(MVT::v1i64); 261 262 addQRTypeForNEON(MVT::v4f32); 263 addQRTypeForNEON(MVT::v2f64); 264 addQRTypeForNEON(MVT::v16i8); 265 addQRTypeForNEON(MVT::v8i16); 266 addQRTypeForNEON(MVT::v4i32); 267 addQRTypeForNEON(MVT::v2i64); 268 269 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 270 // neither Neon nor VFP support any arithmetic operations on it. 271 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 272 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 273 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 274 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 275 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 276 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 277 setOperationAction(ISD::VSETCC, MVT::v2f64, Expand); 278 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 279 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 280 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 281 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 282 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 283 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 284 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 285 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 286 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 287 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 288 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 289 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 290 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 291 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 292 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 293 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 294 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 295 296 // Neon does not support some operations on v1i64 and v2i64 types. 297 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 298 setOperationAction(ISD::MUL, MVT::v2i64, Expand); 299 setOperationAction(ISD::VSETCC, MVT::v1i64, Expand); 300 setOperationAction(ISD::VSETCC, MVT::v2i64, Expand); 301 302 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 303 setTargetDAGCombine(ISD::SHL); 304 setTargetDAGCombine(ISD::SRL); 305 setTargetDAGCombine(ISD::SRA); 306 setTargetDAGCombine(ISD::SIGN_EXTEND); 307 setTargetDAGCombine(ISD::ZERO_EXTEND); 308 setTargetDAGCombine(ISD::ANY_EXTEND); 309 setTargetDAGCombine(ISD::SELECT_CC); 310 } 311 312 computeRegisterProperties(); 313 314 // ARM does not have f32 extending load. 315 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 316 317 // ARM does not have i1 sign extending load. 318 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 319 320 // ARM supports all 4 flavors of integer indexed load / store. 321 if (!Subtarget->isThumb1Only()) { 322 for (unsigned im = (unsigned)ISD::PRE_INC; 323 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 324 setIndexedLoadAction(im, MVT::i1, Legal); 325 setIndexedLoadAction(im, MVT::i8, Legal); 326 setIndexedLoadAction(im, MVT::i16, Legal); 327 setIndexedLoadAction(im, MVT::i32, Legal); 328 setIndexedStoreAction(im, MVT::i1, Legal); 329 setIndexedStoreAction(im, MVT::i8, Legal); 330 setIndexedStoreAction(im, MVT::i16, Legal); 331 setIndexedStoreAction(im, MVT::i32, Legal); 332 } 333 } 334 335 // i64 operation support. 336 if (Subtarget->isThumb1Only()) { 337 setOperationAction(ISD::MUL, MVT::i64, Expand); 338 setOperationAction(ISD::MULHU, MVT::i32, Expand); 339 setOperationAction(ISD::MULHS, MVT::i32, Expand); 340 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 341 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 342 } else { 343 setOperationAction(ISD::MUL, MVT::i64, Expand); 344 setOperationAction(ISD::MULHU, MVT::i32, Expand); 345 if (!Subtarget->hasV6Ops()) 346 setOperationAction(ISD::MULHS, MVT::i32, Expand); 347 } 348 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 349 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 350 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 351 setOperationAction(ISD::SRL, MVT::i64, Custom); 352 setOperationAction(ISD::SRA, MVT::i64, Custom); 353 354 // ARM does not have ROTL. 355 setOperationAction(ISD::ROTL, MVT::i32, Expand); 356 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 357 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 358 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 359 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 360 361 // Only ARMv6 has BSWAP. 362 if (!Subtarget->hasV6Ops()) 363 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 364 365 // These are expanded into libcalls. 366 if (!Subtarget->hasDivide()) { 367 // v7M has a hardware divider 368 setOperationAction(ISD::SDIV, MVT::i32, Expand); 369 setOperationAction(ISD::UDIV, MVT::i32, Expand); 370 } 371 setOperationAction(ISD::SREM, MVT::i32, Expand); 372 setOperationAction(ISD::UREM, MVT::i32, Expand); 373 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 374 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 375 376 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 377 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 378 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 379 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 380 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 381 382 setOperationAction(ISD::TRAP, MVT::Other, Legal); 383 384 // Use the default implementation. 385 setOperationAction(ISD::VASTART, MVT::Other, Custom); 386 setOperationAction(ISD::VAARG, MVT::Other, Expand); 387 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 388 setOperationAction(ISD::VAEND, MVT::Other, Expand); 389 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 390 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 391 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 392 // FIXME: Shouldn't need this, since no register is used, but the legalizer 393 // doesn't yet know how to not do that for SjLj. 394 setExceptionSelectorRegister(ARM::R0); 395 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 396 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); 397 398 // If the subtarget does not have extract instructions, sign_extend_inreg 399 // needs to be expanded. Extract is available in ARM mode on v6 and up, 400 // and on most Thumb2 implementations. 401 if ((!Subtarget->isThumb() && !Subtarget->hasV6Ops()) 402 || (Subtarget->isThumb2() && !Subtarget->hasT2ExtractPack())) { 403 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 404 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 405 } 406 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 407 408 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) 409 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 410 // iff target supports vfp2. 411 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom); 412 413 // We want to custom lower some of our intrinsics. 414 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 415 416 setOperationAction(ISD::SETCC, MVT::i32, Expand); 417 setOperationAction(ISD::SETCC, MVT::f32, Expand); 418 setOperationAction(ISD::SETCC, MVT::f64, Expand); 419 setOperationAction(ISD::SELECT, MVT::i32, Expand); 420 setOperationAction(ISD::SELECT, MVT::f32, Expand); 421 setOperationAction(ISD::SELECT, MVT::f64, Expand); 422 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 423 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 424 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 425 426 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 427 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 428 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 429 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 430 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 431 432 // We don't support sin/cos/fmod/copysign/pow 433 setOperationAction(ISD::FSIN, MVT::f64, Expand); 434 setOperationAction(ISD::FSIN, MVT::f32, Expand); 435 setOperationAction(ISD::FCOS, MVT::f32, Expand); 436 setOperationAction(ISD::FCOS, MVT::f64, Expand); 437 setOperationAction(ISD::FREM, MVT::f64, Expand); 438 setOperationAction(ISD::FREM, MVT::f32, Expand); 439 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { 440 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 441 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 442 } 443 setOperationAction(ISD::FPOW, MVT::f64, Expand); 444 setOperationAction(ISD::FPOW, MVT::f32, Expand); 445 446 // Various VFP goodness 447 if (!UseSoftFloat && !Subtarget->isThumb1Only()) { 448 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 449 if (Subtarget->hasVFP2()) { 450 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 451 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 452 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 453 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 454 } 455 // Special handling for half-precision FP. 456 if (!Subtarget->hasFP16()) { 457 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 458 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 459 } 460 } 461 462 // We have target-specific dag combine patterns for the following nodes: 463 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 464 setTargetDAGCombine(ISD::ADD); 465 setTargetDAGCombine(ISD::SUB); 466 setTargetDAGCombine(ISD::MUL); 467 468 setStackPointerRegisterToSaveRestore(ARM::SP); 469 470 setSchedulingPreference(Sched::RegPressure); 471 472 // FIXME: If-converter should use instruction latency to determine 473 // profitability rather than relying on fixed limits. 474 if (Subtarget->getCPUString() == "generic") { 475 // Generic (and overly aggressive) if-conversion limits. 476 setIfCvtBlockSizeLimit(10); 477 setIfCvtDupBlockSizeLimit(2); 478 } else if (Subtarget->hasV7Ops()) { 479 setIfCvtBlockSizeLimit(3); 480 setIfCvtDupBlockSizeLimit(1); 481 } else if (Subtarget->hasV6Ops()) { 482 setIfCvtBlockSizeLimit(2); 483 setIfCvtDupBlockSizeLimit(1); 484 } else { 485 setIfCvtBlockSizeLimit(3); 486 setIfCvtDupBlockSizeLimit(2); 487 } 488 489 maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type 490 // Do not enable CodePlacementOpt for now: it currently runs after the 491 // ARMConstantIslandPass and messes up branch relaxation and placement 492 // of constant islands. 493 // benefitFromCodePlacementOpt = true; 494} 495 496const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 497 switch (Opcode) { 498 default: return 0; 499 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 500 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 501 case ARMISD::CALL: return "ARMISD::CALL"; 502 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 503 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 504 case ARMISD::tCALL: return "ARMISD::tCALL"; 505 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 506 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 507 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 508 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 509 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 510 case ARMISD::CMP: return "ARMISD::CMP"; 511 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 512 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 513 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 514 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 515 case ARMISD::CMOV: return "ARMISD::CMOV"; 516 case ARMISD::CNEG: return "ARMISD::CNEG"; 517 518 case ARMISD::RBIT: return "ARMISD::RBIT"; 519 520 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 521 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 522 case ARMISD::SITOF: return "ARMISD::SITOF"; 523 case ARMISD::UITOF: return "ARMISD::UITOF"; 524 525 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 526 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 527 case ARMISD::RRX: return "ARMISD::RRX"; 528 529 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 530 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 531 532 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 533 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 534 535 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 536 537 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 538 539 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 540 case ARMISD::SYNCBARRIER: return "ARMISD::SYNCBARRIER"; 541 542 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 543 case ARMISD::VCGE: return "ARMISD::VCGE"; 544 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 545 case ARMISD::VCGT: return "ARMISD::VCGT"; 546 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 547 case ARMISD::VTST: return "ARMISD::VTST"; 548 549 case ARMISD::VSHL: return "ARMISD::VSHL"; 550 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 551 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 552 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 553 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 554 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 555 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 556 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 557 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 558 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 559 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 560 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 561 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 562 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 563 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 564 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 565 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 566 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 567 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 568 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 569 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 570 case ARMISD::VDUP: return "ARMISD::VDUP"; 571 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 572 case ARMISD::VEXT: return "ARMISD::VEXT"; 573 case ARMISD::VREV64: return "ARMISD::VREV64"; 574 case ARMISD::VREV32: return "ARMISD::VREV32"; 575 case ARMISD::VREV16: return "ARMISD::VREV16"; 576 case ARMISD::VZIP: return "ARMISD::VZIP"; 577 case ARMISD::VUZP: return "ARMISD::VUZP"; 578 case ARMISD::VTRN: return "ARMISD::VTRN"; 579 case ARMISD::FMAX: return "ARMISD::FMAX"; 580 case ARMISD::FMIN: return "ARMISD::FMIN"; 581 } 582} 583 584/// getRegClassFor - Return the register class that should be used for the 585/// specified value type. 586TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { 587 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 588 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 589 // load / store 4 to 8 consecutive D registers. 590 if (Subtarget->hasNEON()) { 591 if (VT == MVT::v4i64) 592 return ARM::QQPRRegisterClass; 593 else if (VT == MVT::v8i64) 594 return ARM::QQQQPRRegisterClass; 595 } 596 return TargetLowering::getRegClassFor(VT); 597} 598 599/// getFunctionAlignment - Return the Log2 alignment of this function. 600unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const { 601 return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 0 : 1; 602} 603 604Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 605 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 606 EVT VT = N->getValueType(i); 607 if (VT.isFloatingPoint() || VT.isVector()) 608 return Sched::Latency; 609 } 610 return Sched::RegPressure; 611} 612 613//===----------------------------------------------------------------------===// 614// Lowering Code 615//===----------------------------------------------------------------------===// 616 617/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 618static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 619 switch (CC) { 620 default: llvm_unreachable("Unknown condition code!"); 621 case ISD::SETNE: return ARMCC::NE; 622 case ISD::SETEQ: return ARMCC::EQ; 623 case ISD::SETGT: return ARMCC::GT; 624 case ISD::SETGE: return ARMCC::GE; 625 case ISD::SETLT: return ARMCC::LT; 626 case ISD::SETLE: return ARMCC::LE; 627 case ISD::SETUGT: return ARMCC::HI; 628 case ISD::SETUGE: return ARMCC::HS; 629 case ISD::SETULT: return ARMCC::LO; 630 case ISD::SETULE: return ARMCC::LS; 631 } 632} 633 634/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 635static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 636 ARMCC::CondCodes &CondCode2) { 637 CondCode2 = ARMCC::AL; 638 switch (CC) { 639 default: llvm_unreachable("Unknown FP condition!"); 640 case ISD::SETEQ: 641 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 642 case ISD::SETGT: 643 case ISD::SETOGT: CondCode = ARMCC::GT; break; 644 case ISD::SETGE: 645 case ISD::SETOGE: CondCode = ARMCC::GE; break; 646 case ISD::SETOLT: CondCode = ARMCC::MI; break; 647 case ISD::SETOLE: CondCode = ARMCC::LS; break; 648 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 649 case ISD::SETO: CondCode = ARMCC::VC; break; 650 case ISD::SETUO: CondCode = ARMCC::VS; break; 651 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 652 case ISD::SETUGT: CondCode = ARMCC::HI; break; 653 case ISD::SETUGE: CondCode = ARMCC::PL; break; 654 case ISD::SETLT: 655 case ISD::SETULT: CondCode = ARMCC::LT; break; 656 case ISD::SETLE: 657 case ISD::SETULE: CondCode = ARMCC::LE; break; 658 case ISD::SETNE: 659 case ISD::SETUNE: CondCode = ARMCC::NE; break; 660 } 661} 662 663//===----------------------------------------------------------------------===// 664// Calling Convention Implementation 665//===----------------------------------------------------------------------===// 666 667#include "ARMGenCallingConv.inc" 668 669// APCS f64 is in register pairs, possibly split to stack 670static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 671 CCValAssign::LocInfo &LocInfo, 672 CCState &State, bool CanFail) { 673 static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; 674 675 // Try to get the first register. 676 if (unsigned Reg = State.AllocateReg(RegList, 4)) 677 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 678 else { 679 // For the 2nd half of a v2f64, do not fail. 680 if (CanFail) 681 return false; 682 683 // Put the whole thing on the stack. 684 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 685 State.AllocateStack(8, 4), 686 LocVT, LocInfo)); 687 return true; 688 } 689 690 // Try to get the second register. 691 if (unsigned Reg = State.AllocateReg(RegList, 4)) 692 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 693 else 694 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 695 State.AllocateStack(4, 4), 696 LocVT, LocInfo)); 697 return true; 698} 699 700static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 701 CCValAssign::LocInfo &LocInfo, 702 ISD::ArgFlagsTy &ArgFlags, 703 CCState &State) { 704 if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true)) 705 return false; 706 if (LocVT == MVT::v2f64 && 707 !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false)) 708 return false; 709 return true; // we handled it 710} 711 712// AAPCS f64 is in aligned register pairs 713static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 714 CCValAssign::LocInfo &LocInfo, 715 CCState &State, bool CanFail) { 716 static const unsigned HiRegList[] = { ARM::R0, ARM::R2 }; 717 static const unsigned LoRegList[] = { ARM::R1, ARM::R3 }; 718 719 unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2); 720 if (Reg == 0) { 721 // For the 2nd half of a v2f64, do not just fail. 722 if (CanFail) 723 return false; 724 725 // Put the whole thing on the stack. 726 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 727 State.AllocateStack(8, 8), 728 LocVT, LocInfo)); 729 return true; 730 } 731 732 unsigned i; 733 for (i = 0; i < 2; ++i) 734 if (HiRegList[i] == Reg) 735 break; 736 737 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 738 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i], 739 LocVT, LocInfo)); 740 return true; 741} 742 743static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 744 CCValAssign::LocInfo &LocInfo, 745 ISD::ArgFlagsTy &ArgFlags, 746 CCState &State) { 747 if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true)) 748 return false; 749 if (LocVT == MVT::v2f64 && 750 !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false)) 751 return false; 752 return true; // we handled it 753} 754 755static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 756 CCValAssign::LocInfo &LocInfo, CCState &State) { 757 static const unsigned HiRegList[] = { ARM::R0, ARM::R2 }; 758 static const unsigned LoRegList[] = { ARM::R1, ARM::R3 }; 759 760 unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2); 761 if (Reg == 0) 762 return false; // we didn't handle it 763 764 unsigned i; 765 for (i = 0; i < 2; ++i) 766 if (HiRegList[i] == Reg) 767 break; 768 769 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 770 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i], 771 LocVT, LocInfo)); 772 return true; 773} 774 775static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 776 CCValAssign::LocInfo &LocInfo, 777 ISD::ArgFlagsTy &ArgFlags, 778 CCState &State) { 779 if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) 780 return false; 781 if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) 782 return false; 783 return true; // we handled it 784} 785 786static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 787 CCValAssign::LocInfo &LocInfo, 788 ISD::ArgFlagsTy &ArgFlags, 789 CCState &State) { 790 return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, 791 State); 792} 793 794/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 795/// given CallingConvention value. 796CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 797 bool Return, 798 bool isVarArg) const { 799 switch (CC) { 800 default: 801 llvm_unreachable("Unsupported calling convention"); 802 case CallingConv::C: 803 case CallingConv::Fast: 804 // Use target triple & subtarget features to do actual dispatch. 805 if (Subtarget->isAAPCS_ABI()) { 806 if (Subtarget->hasVFP2() && 807 FloatABIType == FloatABI::Hard && !isVarArg) 808 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 809 else 810 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 811 } else 812 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 813 case CallingConv::ARM_AAPCS_VFP: 814 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 815 case CallingConv::ARM_AAPCS: 816 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 817 case CallingConv::ARM_APCS: 818 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 819 } 820} 821 822/// LowerCallResult - Lower the result values of a call into the 823/// appropriate copies out of appropriate physical registers. 824SDValue 825ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 826 CallingConv::ID CallConv, bool isVarArg, 827 const SmallVectorImpl<ISD::InputArg> &Ins, 828 DebugLoc dl, SelectionDAG &DAG, 829 SmallVectorImpl<SDValue> &InVals) const { 830 831 // Assign locations to each value returned by this call. 832 SmallVector<CCValAssign, 16> RVLocs; 833 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 834 RVLocs, *DAG.getContext()); 835 CCInfo.AnalyzeCallResult(Ins, 836 CCAssignFnForNode(CallConv, /* Return*/ true, 837 isVarArg)); 838 839 // Copy all of the result registers out of their specified physreg. 840 for (unsigned i = 0; i != RVLocs.size(); ++i) { 841 CCValAssign VA = RVLocs[i]; 842 843 SDValue Val; 844 if (VA.needsCustom()) { 845 // Handle f64 or half of a v2f64. 846 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 847 InFlag); 848 Chain = Lo.getValue(1); 849 InFlag = Lo.getValue(2); 850 VA = RVLocs[++i]; // skip ahead to next loc 851 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 852 InFlag); 853 Chain = Hi.getValue(1); 854 InFlag = Hi.getValue(2); 855 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 856 857 if (VA.getLocVT() == MVT::v2f64) { 858 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 859 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 860 DAG.getConstant(0, MVT::i32)); 861 862 VA = RVLocs[++i]; // skip ahead to next loc 863 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 864 Chain = Lo.getValue(1); 865 InFlag = Lo.getValue(2); 866 VA = RVLocs[++i]; // skip ahead to next loc 867 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 868 Chain = Hi.getValue(1); 869 InFlag = Hi.getValue(2); 870 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 871 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 872 DAG.getConstant(1, MVT::i32)); 873 } 874 } else { 875 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 876 InFlag); 877 Chain = Val.getValue(1); 878 InFlag = Val.getValue(2); 879 } 880 881 switch (VA.getLocInfo()) { 882 default: llvm_unreachable("Unknown loc info!"); 883 case CCValAssign::Full: break; 884 case CCValAssign::BCvt: 885 Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val); 886 break; 887 } 888 889 InVals.push_back(Val); 890 } 891 892 return Chain; 893} 894 895/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 896/// by "Src" to address "Dst" of size "Size". Alignment information is 897/// specified by the specific parameter attribute. The copy will be passed as 898/// a byval function parameter. 899/// Sometimes what we are copying is the end of a larger object, the part that 900/// does not fit in registers. 901static SDValue 902CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 903 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 904 DebugLoc dl) { 905 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 906 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 907 /*isVolatile=*/false, /*AlwaysInline=*/false, 908 NULL, 0, NULL, 0); 909} 910 911/// LowerMemOpCallTo - Store the argument to the stack. 912SDValue 913ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 914 SDValue StackPtr, SDValue Arg, 915 DebugLoc dl, SelectionDAG &DAG, 916 const CCValAssign &VA, 917 ISD::ArgFlagsTy Flags) const { 918 unsigned LocMemOffset = VA.getLocMemOffset(); 919 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 920 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 921 if (Flags.isByVal()) { 922 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 923 } 924 return DAG.getStore(Chain, dl, Arg, PtrOff, 925 PseudoSourceValue::getStack(), LocMemOffset, 926 false, false, 0); 927} 928 929void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 930 SDValue Chain, SDValue &Arg, 931 RegsToPassVector &RegsToPass, 932 CCValAssign &VA, CCValAssign &NextVA, 933 SDValue &StackPtr, 934 SmallVector<SDValue, 8> &MemOpChains, 935 ISD::ArgFlagsTy Flags) const { 936 937 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 938 DAG.getVTList(MVT::i32, MVT::i32), Arg); 939 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 940 941 if (NextVA.isRegLoc()) 942 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 943 else { 944 assert(NextVA.isMemLoc()); 945 if (StackPtr.getNode() == 0) 946 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 947 948 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 949 dl, DAG, NextVA, 950 Flags)); 951 } 952} 953 954/// LowerCall - Lowering a call into a callseq_start <- 955/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 956/// nodes. 957SDValue 958ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 959 CallingConv::ID CallConv, bool isVarArg, 960 bool &isTailCall, 961 const SmallVectorImpl<ISD::OutputArg> &Outs, 962 const SmallVectorImpl<ISD::InputArg> &Ins, 963 DebugLoc dl, SelectionDAG &DAG, 964 SmallVectorImpl<SDValue> &InVals) const { 965 // ARM target does not yet support tail call optimization. 966 isTailCall = false; 967 968 // Analyze operands of the call, assigning locations to each operand. 969 SmallVector<CCValAssign, 16> ArgLocs; 970 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 971 *DAG.getContext()); 972 CCInfo.AnalyzeCallOperands(Outs, 973 CCAssignFnForNode(CallConv, /* Return*/ false, 974 isVarArg)); 975 976 // Get a count of how many bytes are to be pushed on the stack. 977 unsigned NumBytes = CCInfo.getNextStackOffset(); 978 979 // Adjust the stack pointer for the new arguments... 980 // These operations are automatically eliminated by the prolog/epilog pass 981 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 982 983 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 984 985 RegsToPassVector RegsToPass; 986 SmallVector<SDValue, 8> MemOpChains; 987 988 // Walk the register/memloc assignments, inserting copies/loads. In the case 989 // of tail call optimization, arguments are handled later. 990 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 991 i != e; 992 ++i, ++realArgIdx) { 993 CCValAssign &VA = ArgLocs[i]; 994 SDValue Arg = Outs[realArgIdx].Val; 995 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 996 997 // Promote the value if needed. 998 switch (VA.getLocInfo()) { 999 default: llvm_unreachable("Unknown loc info!"); 1000 case CCValAssign::Full: break; 1001 case CCValAssign::SExt: 1002 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1003 break; 1004 case CCValAssign::ZExt: 1005 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1006 break; 1007 case CCValAssign::AExt: 1008 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1009 break; 1010 case CCValAssign::BCvt: 1011 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1012 break; 1013 } 1014 1015 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1016 if (VA.needsCustom()) { 1017 if (VA.getLocVT() == MVT::v2f64) { 1018 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1019 DAG.getConstant(0, MVT::i32)); 1020 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1021 DAG.getConstant(1, MVT::i32)); 1022 1023 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1024 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1025 1026 VA = ArgLocs[++i]; // skip ahead to next loc 1027 if (VA.isRegLoc()) { 1028 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1029 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1030 } else { 1031 assert(VA.isMemLoc()); 1032 1033 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1034 dl, DAG, VA, Flags)); 1035 } 1036 } else { 1037 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1038 StackPtr, MemOpChains, Flags); 1039 } 1040 } else if (VA.isRegLoc()) { 1041 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1042 } else { 1043 assert(VA.isMemLoc()); 1044 1045 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1046 dl, DAG, VA, Flags)); 1047 } 1048 } 1049 1050 if (!MemOpChains.empty()) 1051 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1052 &MemOpChains[0], MemOpChains.size()); 1053 1054 // Build a sequence of copy-to-reg nodes chained together with token chain 1055 // and flag operands which copy the outgoing args into the appropriate regs. 1056 SDValue InFlag; 1057 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1058 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1059 RegsToPass[i].second, InFlag); 1060 InFlag = Chain.getValue(1); 1061 } 1062 1063 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1064 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1065 // node so that legalize doesn't hack it. 1066 bool isDirect = false; 1067 bool isARMFunc = false; 1068 bool isLocalARMFunc = false; 1069 MachineFunction &MF = DAG.getMachineFunction(); 1070 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1071 1072 if (EnableARMLongCalls) { 1073 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1074 && "long-calls with non-static relocation model!"); 1075 // Handle a global address or an external symbol. If it's not one of 1076 // those, the target's already in a register, so we don't need to do 1077 // anything extra. 1078 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1079 const GlobalValue *GV = G->getGlobal(); 1080 // Create a constant pool entry for the callee address 1081 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1082 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1083 ARMPCLabelIndex, 1084 ARMCP::CPValue, 0); 1085 // Get the address of the callee into a register 1086 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1087 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1088 Callee = DAG.getLoad(getPointerTy(), dl, 1089 DAG.getEntryNode(), CPAddr, 1090 PseudoSourceValue::getConstantPool(), 0, 1091 false, false, 0); 1092 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1093 const char *Sym = S->getSymbol(); 1094 1095 // Create a constant pool entry for the callee address 1096 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1097 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1098 Sym, ARMPCLabelIndex, 0); 1099 // Get the address of the callee into a register 1100 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1101 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1102 Callee = DAG.getLoad(getPointerTy(), dl, 1103 DAG.getEntryNode(), CPAddr, 1104 PseudoSourceValue::getConstantPool(), 0, 1105 false, false, 0); 1106 } 1107 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1108 const GlobalValue *GV = G->getGlobal(); 1109 isDirect = true; 1110 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1111 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1112 getTargetMachine().getRelocationModel() != Reloc::Static; 1113 isARMFunc = !Subtarget->isThumb() || isStub; 1114 // ARM call to a local ARM function is predicable. 1115 isLocalARMFunc = !Subtarget->isThumb() && !isExt; 1116 // tBX takes a register source operand. 1117 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1118 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1119 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, 1120 ARMPCLabelIndex, 1121 ARMCP::CPValue, 4); 1122 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1123 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1124 Callee = DAG.getLoad(getPointerTy(), dl, 1125 DAG.getEntryNode(), CPAddr, 1126 PseudoSourceValue::getConstantPool(), 0, 1127 false, false, 0); 1128 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1129 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1130 getPointerTy(), Callee, PICLabel); 1131 } else 1132 Callee = DAG.getTargetGlobalAddress(GV, getPointerTy()); 1133 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1134 isDirect = true; 1135 bool isStub = Subtarget->isTargetDarwin() && 1136 getTargetMachine().getRelocationModel() != Reloc::Static; 1137 isARMFunc = !Subtarget->isThumb() || isStub; 1138 // tBX takes a register source operand. 1139 const char *Sym = S->getSymbol(); 1140 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1141 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1142 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1143 Sym, ARMPCLabelIndex, 4); 1144 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1145 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1146 Callee = DAG.getLoad(getPointerTy(), dl, 1147 DAG.getEntryNode(), CPAddr, 1148 PseudoSourceValue::getConstantPool(), 0, 1149 false, false, 0); 1150 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1151 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1152 getPointerTy(), Callee, PICLabel); 1153 } else 1154 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 1155 } 1156 1157 // FIXME: handle tail calls differently. 1158 unsigned CallOpc; 1159 if (Subtarget->isThumb()) { 1160 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1161 CallOpc = ARMISD::CALL_NOLINK; 1162 else 1163 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1164 } else { 1165 CallOpc = (isDirect || Subtarget->hasV5TOps()) 1166 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL) 1167 : ARMISD::CALL_NOLINK; 1168 } 1169 if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) { 1170 // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK 1171 Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag); 1172 InFlag = Chain.getValue(1); 1173 } 1174 1175 std::vector<SDValue> Ops; 1176 Ops.push_back(Chain); 1177 Ops.push_back(Callee); 1178 1179 // Add argument registers to the end of the list so that they are known live 1180 // into the call. 1181 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1182 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1183 RegsToPass[i].second.getValueType())); 1184 1185 if (InFlag.getNode()) 1186 Ops.push_back(InFlag); 1187 // Returns a chain and a flag for retval copy to use. 1188 Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag), 1189 &Ops[0], Ops.size()); 1190 InFlag = Chain.getValue(1); 1191 1192 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1193 DAG.getIntPtrConstant(0, true), InFlag); 1194 if (!Ins.empty()) 1195 InFlag = Chain.getValue(1); 1196 1197 // Handle result values, copying them out of physregs into vregs that we 1198 // return. 1199 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, 1200 dl, DAG, InVals); 1201} 1202 1203SDValue 1204ARMTargetLowering::LowerReturn(SDValue Chain, 1205 CallingConv::ID CallConv, bool isVarArg, 1206 const SmallVectorImpl<ISD::OutputArg> &Outs, 1207 DebugLoc dl, SelectionDAG &DAG) const { 1208 1209 // CCValAssign - represent the assignment of the return value to a location. 1210 SmallVector<CCValAssign, 16> RVLocs; 1211 1212 // CCState - Info about the registers and stack slots. 1213 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 1214 *DAG.getContext()); 1215 1216 // Analyze outgoing return values. 1217 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 1218 isVarArg)); 1219 1220 // If this is the first return lowered for this function, add 1221 // the regs to the liveout set for the function. 1222 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1223 for (unsigned i = 0; i != RVLocs.size(); ++i) 1224 if (RVLocs[i].isRegLoc()) 1225 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1226 } 1227 1228 SDValue Flag; 1229 1230 // Copy the result values into the output registers. 1231 for (unsigned i = 0, realRVLocIdx = 0; 1232 i != RVLocs.size(); 1233 ++i, ++realRVLocIdx) { 1234 CCValAssign &VA = RVLocs[i]; 1235 assert(VA.isRegLoc() && "Can only return in registers!"); 1236 1237 SDValue Arg = Outs[realRVLocIdx].Val; 1238 1239 switch (VA.getLocInfo()) { 1240 default: llvm_unreachable("Unknown loc info!"); 1241 case CCValAssign::Full: break; 1242 case CCValAssign::BCvt: 1243 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); 1244 break; 1245 } 1246 1247 if (VA.needsCustom()) { 1248 if (VA.getLocVT() == MVT::v2f64) { 1249 // Extract the first half and return it in two registers. 1250 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1251 DAG.getConstant(0, MVT::i32)); 1252 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 1253 DAG.getVTList(MVT::i32, MVT::i32), Half); 1254 1255 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 1256 Flag = Chain.getValue(1); 1257 VA = RVLocs[++i]; // skip ahead to next loc 1258 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1259 HalfGPRs.getValue(1), Flag); 1260 Flag = Chain.getValue(1); 1261 VA = RVLocs[++i]; // skip ahead to next loc 1262 1263 // Extract the 2nd half and fall through to handle it as an f64 value. 1264 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1265 DAG.getConstant(1, MVT::i32)); 1266 } 1267 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 1268 // available. 1269 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1270 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 1271 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 1272 Flag = Chain.getValue(1); 1273 VA = RVLocs[++i]; // skip ahead to next loc 1274 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 1275 Flag); 1276 } else 1277 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1278 1279 // Guarantee that all emitted copies are 1280 // stuck together, avoiding something bad. 1281 Flag = Chain.getValue(1); 1282 } 1283 1284 SDValue result; 1285 if (Flag.getNode()) 1286 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 1287 else // Return Void 1288 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain); 1289 1290 return result; 1291} 1292 1293// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 1294// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 1295// one of the above mentioned nodes. It has to be wrapped because otherwise 1296// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 1297// be used to form addressing mode. These wrapped nodes will be selected 1298// into MOVi. 1299static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 1300 EVT PtrVT = Op.getValueType(); 1301 // FIXME there is no actual debug info here 1302 DebugLoc dl = Op.getDebugLoc(); 1303 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1304 SDValue Res; 1305 if (CP->isMachineConstantPoolEntry()) 1306 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1307 CP->getAlignment()); 1308 else 1309 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1310 CP->getAlignment()); 1311 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 1312} 1313 1314SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 1315 SelectionDAG &DAG) const { 1316 MachineFunction &MF = DAG.getMachineFunction(); 1317 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1318 unsigned ARMPCLabelIndex = 0; 1319 DebugLoc DL = Op.getDebugLoc(); 1320 EVT PtrVT = getPointerTy(); 1321 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1322 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1323 SDValue CPAddr; 1324 if (RelocM == Reloc::Static) { 1325 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 1326 } else { 1327 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1328 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1329 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex, 1330 ARMCP::CPBlockAddress, 1331 PCAdj); 1332 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1333 } 1334 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 1335 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 1336 PseudoSourceValue::getConstantPool(), 0, 1337 false, false, 0); 1338 if (RelocM == Reloc::Static) 1339 return Result; 1340 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1341 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 1342} 1343 1344// Lower ISD::GlobalTLSAddress using the "general dynamic" model 1345SDValue 1346ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 1347 SelectionDAG &DAG) const { 1348 DebugLoc dl = GA->getDebugLoc(); 1349 EVT PtrVT = getPointerTy(); 1350 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1351 MachineFunction &MF = DAG.getMachineFunction(); 1352 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1353 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1354 ARMConstantPoolValue *CPV = 1355 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1356 ARMCP::CPValue, PCAdj, "tlsgd", true); 1357 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1358 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 1359 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 1360 PseudoSourceValue::getConstantPool(), 0, 1361 false, false, 0); 1362 SDValue Chain = Argument.getValue(1); 1363 1364 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1365 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 1366 1367 // call __tls_get_addr. 1368 ArgListTy Args; 1369 ArgListEntry Entry; 1370 Entry.Node = Argument; 1371 Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext()); 1372 Args.push_back(Entry); 1373 // FIXME: is there useful debug info available here? 1374 std::pair<SDValue, SDValue> CallResult = 1375 LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), 1376 false, false, false, false, 1377 0, CallingConv::C, false, /*isReturnValueUsed=*/true, 1378 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 1379 return CallResult.first; 1380} 1381 1382// Lower ISD::GlobalTLSAddress using the "initial exec" or 1383// "local exec" model. 1384SDValue 1385ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 1386 SelectionDAG &DAG) const { 1387 const GlobalValue *GV = GA->getGlobal(); 1388 DebugLoc dl = GA->getDebugLoc(); 1389 SDValue Offset; 1390 SDValue Chain = DAG.getEntryNode(); 1391 EVT PtrVT = getPointerTy(); 1392 // Get the Thread Pointer 1393 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1394 1395 if (GV->isDeclaration()) { 1396 MachineFunction &MF = DAG.getMachineFunction(); 1397 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1398 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1399 // Initial exec model. 1400 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 1401 ARMConstantPoolValue *CPV = 1402 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, 1403 ARMCP::CPValue, PCAdj, "gottpoff", true); 1404 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1405 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1406 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1407 PseudoSourceValue::getConstantPool(), 0, 1408 false, false, 0); 1409 Chain = Offset.getValue(1); 1410 1411 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1412 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 1413 1414 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1415 PseudoSourceValue::getConstantPool(), 0, 1416 false, false, 0); 1417 } else { 1418 // local exec model 1419 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff"); 1420 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1421 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 1422 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 1423 PseudoSourceValue::getConstantPool(), 0, 1424 false, false, 0); 1425 } 1426 1427 // The address of the thread local variable is the add of the thread 1428 // pointer with the offset of the variable. 1429 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 1430} 1431 1432SDValue 1433ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 1434 // TODO: implement the "local dynamic" model 1435 assert(Subtarget->isTargetELF() && 1436 "TLS not implemented for non-ELF targets"); 1437 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1438 // If the relocation model is PIC, use the "General Dynamic" TLS Model, 1439 // otherwise use the "Local Exec" TLS Model 1440 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) 1441 return LowerToTLSGeneralDynamicModel(GA, DAG); 1442 else 1443 return LowerToTLSExecModels(GA, DAG); 1444} 1445 1446SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 1447 SelectionDAG &DAG) const { 1448 EVT PtrVT = getPointerTy(); 1449 DebugLoc dl = Op.getDebugLoc(); 1450 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1451 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1452 if (RelocM == Reloc::PIC_) { 1453 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 1454 ARMConstantPoolValue *CPV = 1455 new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT"); 1456 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1457 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1458 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 1459 CPAddr, 1460 PseudoSourceValue::getConstantPool(), 0, 1461 false, false, 0); 1462 SDValue Chain = Result.getValue(1); 1463 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1464 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 1465 if (!UseGOTOFF) 1466 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1467 PseudoSourceValue::getGOT(), 0, 1468 false, false, 0); 1469 return Result; 1470 } else { 1471 // If we have T2 ops, we can materialize the address directly via movt/movw 1472 // pair. This is always cheaper. 1473 if (Subtarget->useMovt()) { 1474 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 1475 DAG.getTargetGlobalAddress(GV, PtrVT)); 1476 } else { 1477 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1478 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1479 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1480 PseudoSourceValue::getConstantPool(), 0, 1481 false, false, 0); 1482 } 1483 } 1484} 1485 1486SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 1487 SelectionDAG &DAG) const { 1488 MachineFunction &MF = DAG.getMachineFunction(); 1489 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1490 unsigned ARMPCLabelIndex = 0; 1491 EVT PtrVT = getPointerTy(); 1492 DebugLoc dl = Op.getDebugLoc(); 1493 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1494 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1495 SDValue CPAddr; 1496 if (RelocM == Reloc::Static) 1497 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 1498 else { 1499 ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1500 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 1501 ARMConstantPoolValue *CPV = 1502 new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj); 1503 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1504 } 1505 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1506 1507 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1508 PseudoSourceValue::getConstantPool(), 0, 1509 false, false, 0); 1510 SDValue Chain = Result.getValue(1); 1511 1512 if (RelocM == Reloc::PIC_) { 1513 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1514 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1515 } 1516 1517 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 1518 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 1519 PseudoSourceValue::getGOT(), 0, 1520 false, false, 0); 1521 1522 return Result; 1523} 1524 1525SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 1526 SelectionDAG &DAG) const { 1527 assert(Subtarget->isTargetELF() && 1528 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 1529 MachineFunction &MF = DAG.getMachineFunction(); 1530 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1531 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1532 EVT PtrVT = getPointerTy(); 1533 DebugLoc dl = Op.getDebugLoc(); 1534 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 1535 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(), 1536 "_GLOBAL_OFFSET_TABLE_", 1537 ARMPCLabelIndex, PCAdj); 1538 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1539 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1540 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1541 PseudoSourceValue::getConstantPool(), 0, 1542 false, false, 0); 1543 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1544 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1545} 1546 1547SDValue 1548ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 1549 const ARMSubtarget *Subtarget) 1550 const { 1551 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1552 DebugLoc dl = Op.getDebugLoc(); 1553 switch (IntNo) { 1554 default: return SDValue(); // Don't custom lower most intrinsics. 1555 case Intrinsic::arm_thread_pointer: { 1556 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1557 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 1558 } 1559 case Intrinsic::eh_sjlj_lsda: { 1560 MachineFunction &MF = DAG.getMachineFunction(); 1561 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1562 unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId(); 1563 EVT PtrVT = getPointerTy(); 1564 DebugLoc dl = Op.getDebugLoc(); 1565 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 1566 SDValue CPAddr; 1567 unsigned PCAdj = (RelocM != Reloc::PIC_) 1568 ? 0 : (Subtarget->isThumb() ? 4 : 8); 1569 ARMConstantPoolValue *CPV = 1570 new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex, 1571 ARMCP::CPLSDA, PCAdj); 1572 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 1573 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1574 SDValue Result = 1575 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 1576 PseudoSourceValue::getConstantPool(), 0, 1577 false, false, 0); 1578 SDValue Chain = Result.getValue(1); 1579 1580 if (RelocM == Reloc::PIC_) { 1581 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1582 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 1583 } 1584 return Result; 1585 } 1586 case Intrinsic::eh_sjlj_setjmp: 1587 SDValue Val = Subtarget->isThumb() ? 1588 DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::SP, MVT::i32) : 1589 DAG.getConstant(0, MVT::i32); 1590 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(1), 1591 Val); 1592 } 1593} 1594 1595static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG, 1596 const ARMSubtarget *Subtarget) { 1597 DebugLoc dl = Op.getDebugLoc(); 1598 SDValue Op5 = Op.getOperand(5); 1599 SDValue Res; 1600 unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue(); 1601 if (isDeviceBarrier) { 1602 if (Subtarget->hasV7Ops()) 1603 Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0)); 1604 else 1605 Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0), 1606 DAG.getConstant(0, MVT::i32)); 1607 } else { 1608 if (Subtarget->hasV7Ops()) 1609 Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 1610 else 1611 Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 1612 DAG.getConstant(0, MVT::i32)); 1613 } 1614 return Res; 1615} 1616 1617static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 1618 MachineFunction &MF = DAG.getMachineFunction(); 1619 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 1620 1621 // vastart just stores the address of the VarArgsFrameIndex slot into the 1622 // memory location argument. 1623 DebugLoc dl = Op.getDebugLoc(); 1624 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1625 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1626 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1627 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0, 1628 false, false, 0); 1629} 1630 1631SDValue 1632ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 1633 SelectionDAG &DAG) const { 1634 SDNode *Node = Op.getNode(); 1635 DebugLoc dl = Node->getDebugLoc(); 1636 EVT VT = Node->getValueType(0); 1637 SDValue Chain = Op.getOperand(0); 1638 SDValue Size = Op.getOperand(1); 1639 SDValue Align = Op.getOperand(2); 1640 1641 // Chain the dynamic stack allocation so that it doesn't modify the stack 1642 // pointer when other instructions are using the stack. 1643 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1644 1645 unsigned AlignVal = cast<ConstantSDNode>(Align)->getZExtValue(); 1646 unsigned StackAlign = getTargetMachine().getFrameInfo()->getStackAlignment(); 1647 if (AlignVal > StackAlign) 1648 // Do this now since selection pass cannot introduce new target 1649 // independent node. 1650 Align = DAG.getConstant(-(uint64_t)AlignVal, VT); 1651 1652 // In Thumb1 mode, there isn't a "sub r, sp, r" instruction, we will end up 1653 // using a "add r, sp, r" instead. Negate the size now so we don't have to 1654 // do even more horrible hack later. 1655 MachineFunction &MF = DAG.getMachineFunction(); 1656 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1657 if (AFI->isThumb1OnlyFunction()) { 1658 bool Negate = true; 1659 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Size); 1660 if (C) { 1661 uint32_t Val = C->getZExtValue(); 1662 if (Val <= 508 && ((Val & 3) == 0)) 1663 Negate = false; 1664 } 1665 if (Negate) 1666 Size = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), Size); 1667 } 1668 1669 SDVTList VTList = DAG.getVTList(VT, MVT::Other); 1670 SDValue Ops1[] = { Chain, Size, Align }; 1671 SDValue Res = DAG.getNode(ARMISD::DYN_ALLOC, dl, VTList, Ops1, 3); 1672 Chain = Res.getValue(1); 1673 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1674 DAG.getIntPtrConstant(0, true), SDValue()); 1675 SDValue Ops2[] = { Res, Chain }; 1676 return DAG.getMergeValues(Ops2, 2, dl); 1677} 1678 1679SDValue 1680ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 1681 SDValue &Root, SelectionDAG &DAG, 1682 DebugLoc dl) const { 1683 MachineFunction &MF = DAG.getMachineFunction(); 1684 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1685 1686 TargetRegisterClass *RC; 1687 if (AFI->isThumb1OnlyFunction()) 1688 RC = ARM::tGPRRegisterClass; 1689 else 1690 RC = ARM::GPRRegisterClass; 1691 1692 // Transform the arguments stored in physical registers into virtual ones. 1693 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1694 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 1695 1696 SDValue ArgValue2; 1697 if (NextVA.isMemLoc()) { 1698 MachineFrameInfo *MFI = MF.getFrameInfo(); 1699 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true, false); 1700 1701 // Create load node to retrieve arguments from the stack. 1702 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1703 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 1704 PseudoSourceValue::getFixedStack(FI), 0, 1705 false, false, 0); 1706 } else { 1707 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 1708 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 1709 } 1710 1711 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 1712} 1713 1714SDValue 1715ARMTargetLowering::LowerFormalArguments(SDValue Chain, 1716 CallingConv::ID CallConv, bool isVarArg, 1717 const SmallVectorImpl<ISD::InputArg> 1718 &Ins, 1719 DebugLoc dl, SelectionDAG &DAG, 1720 SmallVectorImpl<SDValue> &InVals) 1721 const { 1722 1723 MachineFunction &MF = DAG.getMachineFunction(); 1724 MachineFrameInfo *MFI = MF.getFrameInfo(); 1725 1726 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1727 1728 // Assign locations to all of the incoming arguments. 1729 SmallVector<CCValAssign, 16> ArgLocs; 1730 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1731 *DAG.getContext()); 1732 CCInfo.AnalyzeFormalArguments(Ins, 1733 CCAssignFnForNode(CallConv, /* Return*/ false, 1734 isVarArg)); 1735 1736 SmallVector<SDValue, 16> ArgValues; 1737 1738 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1739 CCValAssign &VA = ArgLocs[i]; 1740 1741 // Arguments stored in registers. 1742 if (VA.isRegLoc()) { 1743 EVT RegVT = VA.getLocVT(); 1744 1745 SDValue ArgValue; 1746 if (VA.needsCustom()) { 1747 // f64 and vector types are split up into multiple registers or 1748 // combinations of registers and stack slots. 1749 if (VA.getLocVT() == MVT::v2f64) { 1750 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 1751 Chain, DAG, dl); 1752 VA = ArgLocs[++i]; // skip ahead to next loc 1753 SDValue ArgValue2; 1754 if (VA.isMemLoc()) { 1755 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), 1756 true, false); 1757 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1758 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 1759 PseudoSourceValue::getFixedStack(FI), 0, 1760 false, false, 0); 1761 } else { 1762 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 1763 Chain, DAG, dl); 1764 } 1765 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1766 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 1767 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 1768 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 1769 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 1770 } else 1771 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 1772 1773 } else { 1774 TargetRegisterClass *RC; 1775 1776 if (RegVT == MVT::f32) 1777 RC = ARM::SPRRegisterClass; 1778 else if (RegVT == MVT::f64) 1779 RC = ARM::DPRRegisterClass; 1780 else if (RegVT == MVT::v2f64) 1781 RC = ARM::QPRRegisterClass; 1782 else if (RegVT == MVT::i32) 1783 RC = (AFI->isThumb1OnlyFunction() ? 1784 ARM::tGPRRegisterClass : ARM::GPRRegisterClass); 1785 else 1786 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 1787 1788 // Transform the arguments in physical registers into virtual ones. 1789 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1790 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1791 } 1792 1793 // If this is an 8 or 16-bit value, it is really passed promoted 1794 // to 32 bits. Insert an assert[sz]ext to capture this, then 1795 // truncate to the right size. 1796 switch (VA.getLocInfo()) { 1797 default: llvm_unreachable("Unknown loc info!"); 1798 case CCValAssign::Full: break; 1799 case CCValAssign::BCvt: 1800 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); 1801 break; 1802 case CCValAssign::SExt: 1803 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1804 DAG.getValueType(VA.getValVT())); 1805 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1806 break; 1807 case CCValAssign::ZExt: 1808 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1809 DAG.getValueType(VA.getValVT())); 1810 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1811 break; 1812 } 1813 1814 InVals.push_back(ArgValue); 1815 1816 } else { // VA.isRegLoc() 1817 1818 // sanity check 1819 assert(VA.isMemLoc()); 1820 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 1821 1822 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; 1823 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 1824 true, false); 1825 1826 // Create load nodes to retrieve arguments from the stack. 1827 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1828 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 1829 PseudoSourceValue::getFixedStack(FI), 0, 1830 false, false, 0)); 1831 } 1832 } 1833 1834 // varargs 1835 if (isVarArg) { 1836 static const unsigned GPRArgRegs[] = { 1837 ARM::R0, ARM::R1, ARM::R2, ARM::R3 1838 }; 1839 1840 unsigned NumGPRs = CCInfo.getFirstUnallocated 1841 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 1842 1843 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); 1844 unsigned VARegSize = (4 - NumGPRs) * 4; 1845 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1); 1846 unsigned ArgOffset = CCInfo.getNextStackOffset(); 1847 if (VARegSaveSize) { 1848 // If this function is vararg, store any remaining integer argument regs 1849 // to their spots on the stack so that they may be loaded by deferencing 1850 // the result of va_next. 1851 AFI->setVarArgsRegSaveSize(VARegSaveSize); 1852 AFI->setVarArgsFrameIndex( 1853 MFI->CreateFixedObject(VARegSaveSize, 1854 ArgOffset + VARegSaveSize - VARegSize, 1855 true, false)); 1856 SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), 1857 getPointerTy()); 1858 1859 SmallVector<SDValue, 4> MemOps; 1860 for (; NumGPRs < 4; ++NumGPRs) { 1861 TargetRegisterClass *RC; 1862 if (AFI->isThumb1OnlyFunction()) 1863 RC = ARM::tGPRRegisterClass; 1864 else 1865 RC = ARM::GPRRegisterClass; 1866 1867 unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC); 1868 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1869 SDValue Store = 1870 DAG.getStore(Val.getValue(1), dl, Val, FIN, 1871 PseudoSourceValue::getFixedStack(AFI->getVarArgsFrameIndex()), 0, 1872 false, false, 0); 1873 MemOps.push_back(Store); 1874 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 1875 DAG.getConstant(4, getPointerTy())); 1876 } 1877 if (!MemOps.empty()) 1878 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1879 &MemOps[0], MemOps.size()); 1880 } else 1881 // This will point to the next argument passed via stack. 1882 AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, 1883 true, false)); 1884 } 1885 1886 return Chain; 1887} 1888 1889/// isFloatingPointZero - Return true if this is +0.0. 1890static bool isFloatingPointZero(SDValue Op) { 1891 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1892 return CFP->getValueAPF().isPosZero(); 1893 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1894 // Maybe this has already been legalized into the constant pool? 1895 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 1896 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 1897 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 1898 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1899 return CFP->getValueAPF().isPosZero(); 1900 } 1901 } 1902 return false; 1903} 1904 1905/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 1906/// the given operands. 1907SDValue 1908ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 1909 SDValue &ARMCC, SelectionDAG &DAG, 1910 DebugLoc dl) const { 1911 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 1912 unsigned C = RHSC->getZExtValue(); 1913 if (!isLegalICmpImmediate(C)) { 1914 // Constant does not fit, try adjusting it by one? 1915 switch (CC) { 1916 default: break; 1917 case ISD::SETLT: 1918 case ISD::SETGE: 1919 if (isLegalICmpImmediate(C-1)) { 1920 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 1921 RHS = DAG.getConstant(C-1, MVT::i32); 1922 } 1923 break; 1924 case ISD::SETULT: 1925 case ISD::SETUGE: 1926 if (C > 0 && isLegalICmpImmediate(C-1)) { 1927 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 1928 RHS = DAG.getConstant(C-1, MVT::i32); 1929 } 1930 break; 1931 case ISD::SETLE: 1932 case ISD::SETGT: 1933 if (isLegalICmpImmediate(C+1)) { 1934 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 1935 RHS = DAG.getConstant(C+1, MVT::i32); 1936 } 1937 break; 1938 case ISD::SETULE: 1939 case ISD::SETUGT: 1940 if (C < 0xffffffff && isLegalICmpImmediate(C+1)) { 1941 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 1942 RHS = DAG.getConstant(C+1, MVT::i32); 1943 } 1944 break; 1945 } 1946 } 1947 } 1948 1949 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 1950 ARMISD::NodeType CompareType; 1951 switch (CondCode) { 1952 default: 1953 CompareType = ARMISD::CMP; 1954 break; 1955 case ARMCC::EQ: 1956 case ARMCC::NE: 1957 // Uses only Z Flag 1958 CompareType = ARMISD::CMPZ; 1959 break; 1960 } 1961 ARMCC = DAG.getConstant(CondCode, MVT::i32); 1962 return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS); 1963} 1964 1965/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 1966static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 1967 DebugLoc dl) { 1968 SDValue Cmp; 1969 if (!isFloatingPointZero(RHS)) 1970 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS); 1971 else 1972 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS); 1973 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp); 1974} 1975 1976SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 1977 EVT VT = Op.getValueType(); 1978 SDValue LHS = Op.getOperand(0); 1979 SDValue RHS = Op.getOperand(1); 1980 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1981 SDValue TrueVal = Op.getOperand(2); 1982 SDValue FalseVal = Op.getOperand(3); 1983 DebugLoc dl = Op.getDebugLoc(); 1984 1985 if (LHS.getValueType() == MVT::i32) { 1986 SDValue ARMCC; 1987 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 1988 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl); 1989 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp); 1990 } 1991 1992 ARMCC::CondCodes CondCode, CondCode2; 1993 FPCCToARMCC(CC, CondCode, CondCode2); 1994 1995 SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32); 1996 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 1997 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 1998 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 1999 ARMCC, CCR, Cmp); 2000 if (CondCode2 != ARMCC::AL) { 2001 SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32); 2002 // FIXME: Needs another CMP because flag can have but one use. 2003 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 2004 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 2005 Result, TrueVal, ARMCC2, CCR, Cmp2); 2006 } 2007 return Result; 2008} 2009 2010SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2011 SDValue Chain = Op.getOperand(0); 2012 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2013 SDValue LHS = Op.getOperand(2); 2014 SDValue RHS = Op.getOperand(3); 2015 SDValue Dest = Op.getOperand(4); 2016 DebugLoc dl = Op.getDebugLoc(); 2017 2018 if (LHS.getValueType() == MVT::i32) { 2019 SDValue ARMCC; 2020 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2021 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl); 2022 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 2023 Chain, Dest, ARMCC, CCR,Cmp); 2024 } 2025 2026 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 2027 ARMCC::CondCodes CondCode, CondCode2; 2028 FPCCToARMCC(CC, CondCode, CondCode2); 2029 2030 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 2031 SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32); 2032 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2033 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 2034 SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp }; 2035 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2036 if (CondCode2 != ARMCC::AL) { 2037 ARMCC = DAG.getConstant(CondCode2, MVT::i32); 2038 SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) }; 2039 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 2040 } 2041 return Res; 2042} 2043 2044SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 2045 SDValue Chain = Op.getOperand(0); 2046 SDValue Table = Op.getOperand(1); 2047 SDValue Index = Op.getOperand(2); 2048 DebugLoc dl = Op.getDebugLoc(); 2049 2050 EVT PTy = getPointerTy(); 2051 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 2052 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2053 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 2054 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 2055 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 2056 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 2057 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2058 if (Subtarget->isThumb2()) { 2059 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 2060 // which does another jump to the destination. This also makes it easier 2061 // to translate it to TBB / TBH later. 2062 // FIXME: This might not work if the function is extremely large. 2063 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 2064 Addr, Op.getOperand(2), JTI, UId); 2065 } 2066 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2067 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 2068 PseudoSourceValue::getJumpTable(), 0, 2069 false, false, 0); 2070 Chain = Addr.getValue(1); 2071 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 2072 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2073 } else { 2074 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 2075 PseudoSourceValue::getJumpTable(), 0, false, false, 0); 2076 Chain = Addr.getValue(1); 2077 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 2078 } 2079} 2080 2081static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2082 DebugLoc dl = Op.getDebugLoc(); 2083 unsigned Opc; 2084 2085 switch (Op.getOpcode()) { 2086 default: 2087 assert(0 && "Invalid opcode!"); 2088 case ISD::FP_TO_SINT: 2089 Opc = ARMISD::FTOSI; 2090 break; 2091 case ISD::FP_TO_UINT: 2092 Opc = ARMISD::FTOUI; 2093 break; 2094 } 2095 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 2096 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); 2097} 2098 2099static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2100 EVT VT = Op.getValueType(); 2101 DebugLoc dl = Op.getDebugLoc(); 2102 unsigned Opc; 2103 2104 switch (Op.getOpcode()) { 2105 default: 2106 assert(0 && "Invalid opcode!"); 2107 case ISD::SINT_TO_FP: 2108 Opc = ARMISD::SITOF; 2109 break; 2110 case ISD::UINT_TO_FP: 2111 Opc = ARMISD::UITOF; 2112 break; 2113 } 2114 2115 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0)); 2116 return DAG.getNode(Opc, dl, VT, Op); 2117} 2118 2119static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) { 2120 // Implement fcopysign with a fabs and a conditional fneg. 2121 SDValue Tmp0 = Op.getOperand(0); 2122 SDValue Tmp1 = Op.getOperand(1); 2123 DebugLoc dl = Op.getDebugLoc(); 2124 EVT VT = Op.getValueType(); 2125 EVT SrcVT = Tmp1.getValueType(); 2126 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0); 2127 SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl); 2128 SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32); 2129 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2130 return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp); 2131} 2132 2133SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 2134 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2135 MFI->setFrameAddressIsTaken(true); 2136 EVT VT = Op.getValueType(); 2137 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 2138 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2139 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 2140 ? ARM::R7 : ARM::R11; 2141 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2142 while (Depth--) 2143 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0, 2144 false, false, 0); 2145 return FrameAddr; 2146} 2147 2148/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to 2149/// expand a bit convert where either the source or destination type is i64 to 2150/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 2151/// operand type is illegal (e.g., v2f32 for a target that doesn't support 2152/// vectors), since the legalizer won't know what to do with that. 2153static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) { 2154 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2155 DebugLoc dl = N->getDebugLoc(); 2156 SDValue Op = N->getOperand(0); 2157 2158 // This function is only supposed to be called for i64 types, either as the 2159 // source or destination of the bit convert. 2160 EVT SrcVT = Op.getValueType(); 2161 EVT DstVT = N->getValueType(0); 2162 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 2163 "ExpandBIT_CONVERT called for non-i64 type"); 2164 2165 // Turn i64->f64 into VMOVDRR. 2166 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 2167 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2168 DAG.getConstant(0, MVT::i32)); 2169 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 2170 DAG.getConstant(1, MVT::i32)); 2171 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 2172 } 2173 2174 // Turn f64->i64 into VMOVRRD. 2175 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 2176 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 2177 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 2178 // Merge the pieces into a single i64 value. 2179 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 2180 } 2181 2182 return SDValue(); 2183} 2184 2185/// getZeroVector - Returns a vector of specified type with all zero elements. 2186/// 2187static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2188 assert(VT.isVector() && "Expected a vector type"); 2189 2190 // Zero vectors are used to represent vector negation and in those cases 2191 // will be implemented with the NEON VNEG instruction. However, VNEG does 2192 // not support i64 elements, so sometimes the zero vectors will need to be 2193 // explicitly constructed. For those cases, and potentially other uses in 2194 // the future, always build zero vectors as <16 x i8> or <8 x i8> bitcasted 2195 // to their dest type. This ensures they get CSE'd. 2196 SDValue Vec; 2197 SDValue Cst = DAG.getTargetConstant(0, MVT::i8); 2198 SmallVector<SDValue, 8> Ops; 2199 MVT TVT; 2200 2201 if (VT.getSizeInBits() == 64) { 2202 Ops.assign(8, Cst); TVT = MVT::v8i8; 2203 } else { 2204 Ops.assign(16, Cst); TVT = MVT::v16i8; 2205 } 2206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size()); 2207 2208 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 2209} 2210 2211/// getOnesVector - Returns a vector of specified type with all bits set. 2212/// 2213static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2214 assert(VT.isVector() && "Expected a vector type"); 2215 2216 // Always build ones vectors as <16 x i8> or <8 x i8> bitcasted to their 2217 // dest type. This ensures they get CSE'd. 2218 SDValue Vec; 2219 SDValue Cst = DAG.getTargetConstant(0xFF, MVT::i8); 2220 SmallVector<SDValue, 8> Ops; 2221 MVT TVT; 2222 2223 if (VT.getSizeInBits() == 64) { 2224 Ops.assign(8, Cst); TVT = MVT::v8i8; 2225 } else { 2226 Ops.assign(16, Cst); TVT = MVT::v16i8; 2227 } 2228 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size()); 2229 2230 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); 2231} 2232 2233/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 2234/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2235SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 2236 SelectionDAG &DAG) const { 2237 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2238 EVT VT = Op.getValueType(); 2239 unsigned VTBits = VT.getSizeInBits(); 2240 DebugLoc dl = Op.getDebugLoc(); 2241 SDValue ShOpLo = Op.getOperand(0); 2242 SDValue ShOpHi = Op.getOperand(1); 2243 SDValue ShAmt = Op.getOperand(2); 2244 SDValue ARMCC; 2245 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 2246 2247 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 2248 2249 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2250 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2251 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 2252 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2253 DAG.getConstant(VTBits, MVT::i32)); 2254 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 2255 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2256 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 2257 2258 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2259 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2260 ARMCC, DAG, dl); 2261 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 2262 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, 2263 CCR, Cmp); 2264 2265 SDValue Ops[2] = { Lo, Hi }; 2266 return DAG.getMergeValues(Ops, 2, dl); 2267} 2268 2269/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 2270/// i32 values and take a 2 x i32 value to shift plus a shift amount. 2271SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 2272 SelectionDAG &DAG) const { 2273 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 2274 EVT VT = Op.getValueType(); 2275 unsigned VTBits = VT.getSizeInBits(); 2276 DebugLoc dl = Op.getDebugLoc(); 2277 SDValue ShOpLo = Op.getOperand(0); 2278 SDValue ShOpHi = Op.getOperand(1); 2279 SDValue ShAmt = Op.getOperand(2); 2280 SDValue ARMCC; 2281 2282 assert(Op.getOpcode() == ISD::SHL_PARTS); 2283 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 2284 DAG.getConstant(VTBits, MVT::i32), ShAmt); 2285 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 2286 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 2287 DAG.getConstant(VTBits, MVT::i32)); 2288 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 2289 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 2290 2291 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2292 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 2293 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 2294 ARMCC, DAG, dl); 2295 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 2296 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMCC, 2297 CCR, Cmp); 2298 2299 SDValue Ops[2] = { Lo, Hi }; 2300 return DAG.getMergeValues(Ops, 2, dl); 2301} 2302 2303static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 2304 const ARMSubtarget *ST) { 2305 EVT VT = N->getValueType(0); 2306 DebugLoc dl = N->getDebugLoc(); 2307 2308 if (!ST->hasV6T2Ops()) 2309 return SDValue(); 2310 2311 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 2312 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 2313} 2314 2315static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 2316 const ARMSubtarget *ST) { 2317 EVT VT = N->getValueType(0); 2318 DebugLoc dl = N->getDebugLoc(); 2319 2320 // Lower vector shifts on NEON to use VSHL. 2321 if (VT.isVector()) { 2322 assert(ST->hasNEON() && "unexpected vector shift"); 2323 2324 // Left shifts translate directly to the vshiftu intrinsic. 2325 if (N->getOpcode() == ISD::SHL) 2326 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2327 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 2328 N->getOperand(0), N->getOperand(1)); 2329 2330 assert((N->getOpcode() == ISD::SRA || 2331 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 2332 2333 // NEON uses the same intrinsics for both left and right shifts. For 2334 // right shifts, the shift amounts are negative, so negate the vector of 2335 // shift amounts. 2336 EVT ShiftVT = N->getOperand(1).getValueType(); 2337 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 2338 getZeroVector(ShiftVT, DAG, dl), 2339 N->getOperand(1)); 2340 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 2341 Intrinsic::arm_neon_vshifts : 2342 Intrinsic::arm_neon_vshiftu); 2343 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 2344 DAG.getConstant(vshiftInt, MVT::i32), 2345 N->getOperand(0), NegatedCount); 2346 } 2347 2348 // We can get here for a node like i32 = ISD::SHL i32, i64 2349 if (VT != MVT::i64) 2350 return SDValue(); 2351 2352 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 2353 "Unknown shift to lower!"); 2354 2355 // We only lower SRA, SRL of 1 here, all others use generic lowering. 2356 if (!isa<ConstantSDNode>(N->getOperand(1)) || 2357 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 2358 return SDValue(); 2359 2360 // If we are in thumb mode, we don't have RRX. 2361 if (ST->isThumb1Only()) return SDValue(); 2362 2363 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 2364 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2365 DAG.getConstant(0, MVT::i32)); 2366 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 2367 DAG.getConstant(1, MVT::i32)); 2368 2369 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 2370 // captures the result into a carry flag. 2371 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 2372 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1); 2373 2374 // The low part is an ARMISD::RRX operand, which shifts the carry in. 2375 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 2376 2377 // Merge the pieces into a single i64 value. 2378 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 2379} 2380 2381static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 2382 SDValue TmpOp0, TmpOp1; 2383 bool Invert = false; 2384 bool Swap = false; 2385 unsigned Opc = 0; 2386 2387 SDValue Op0 = Op.getOperand(0); 2388 SDValue Op1 = Op.getOperand(1); 2389 SDValue CC = Op.getOperand(2); 2390 EVT VT = Op.getValueType(); 2391 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 2392 DebugLoc dl = Op.getDebugLoc(); 2393 2394 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 2395 switch (SetCCOpcode) { 2396 default: llvm_unreachable("Illegal FP comparison"); break; 2397 case ISD::SETUNE: 2398 case ISD::SETNE: Invert = true; // Fallthrough 2399 case ISD::SETOEQ: 2400 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 2401 case ISD::SETOLT: 2402 case ISD::SETLT: Swap = true; // Fallthrough 2403 case ISD::SETOGT: 2404 case ISD::SETGT: Opc = ARMISD::VCGT; break; 2405 case ISD::SETOLE: 2406 case ISD::SETLE: Swap = true; // Fallthrough 2407 case ISD::SETOGE: 2408 case ISD::SETGE: Opc = ARMISD::VCGE; break; 2409 case ISD::SETUGE: Swap = true; // Fallthrough 2410 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 2411 case ISD::SETUGT: Swap = true; // Fallthrough 2412 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 2413 case ISD::SETUEQ: Invert = true; // Fallthrough 2414 case ISD::SETONE: 2415 // Expand this to (OLT | OGT). 2416 TmpOp0 = Op0; 2417 TmpOp1 = Op1; 2418 Opc = ISD::OR; 2419 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2420 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 2421 break; 2422 case ISD::SETUO: Invert = true; // Fallthrough 2423 case ISD::SETO: 2424 // Expand this to (OLT | OGE). 2425 TmpOp0 = Op0; 2426 TmpOp1 = Op1; 2427 Opc = ISD::OR; 2428 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 2429 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 2430 break; 2431 } 2432 } else { 2433 // Integer comparisons. 2434 switch (SetCCOpcode) { 2435 default: llvm_unreachable("Illegal integer comparison"); break; 2436 case ISD::SETNE: Invert = true; 2437 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 2438 case ISD::SETLT: Swap = true; 2439 case ISD::SETGT: Opc = ARMISD::VCGT; break; 2440 case ISD::SETLE: Swap = true; 2441 case ISD::SETGE: Opc = ARMISD::VCGE; break; 2442 case ISD::SETULT: Swap = true; 2443 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 2444 case ISD::SETULE: Swap = true; 2445 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 2446 } 2447 2448 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 2449 if (Opc == ARMISD::VCEQ) { 2450 2451 SDValue AndOp; 2452 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 2453 AndOp = Op0; 2454 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 2455 AndOp = Op1; 2456 2457 // Ignore bitconvert. 2458 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT) 2459 AndOp = AndOp.getOperand(0); 2460 2461 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 2462 Opc = ARMISD::VTST; 2463 Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0)); 2464 Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1)); 2465 Invert = !Invert; 2466 } 2467 } 2468 } 2469 2470 if (Swap) 2471 std::swap(Op0, Op1); 2472 2473 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 2474 2475 if (Invert) 2476 Result = DAG.getNOT(dl, Result, VT); 2477 2478 return Result; 2479} 2480 2481/// isVMOVSplat - Check if the specified splat value corresponds to an immediate 2482/// VMOV instruction, and if so, return the constant being splatted. 2483static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef, 2484 unsigned SplatBitSize, SelectionDAG &DAG) { 2485 switch (SplatBitSize) { 2486 case 8: 2487 // Any 1-byte value is OK. 2488 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 2489 return DAG.getTargetConstant(SplatBits, MVT::i8); 2490 2491 case 16: 2492 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 2493 if ((SplatBits & ~0xff) == 0 || 2494 (SplatBits & ~0xff00) == 0) 2495 return DAG.getTargetConstant(SplatBits, MVT::i16); 2496 break; 2497 2498 case 32: 2499 // NEON's 32-bit VMOV supports splat values where: 2500 // * only one byte is nonzero, or 2501 // * the least significant byte is 0xff and the second byte is nonzero, or 2502 // * the least significant 2 bytes are 0xff and the third is nonzero. 2503 if ((SplatBits & ~0xff) == 0 || 2504 (SplatBits & ~0xff00) == 0 || 2505 (SplatBits & ~0xff0000) == 0 || 2506 (SplatBits & ~0xff000000) == 0) 2507 return DAG.getTargetConstant(SplatBits, MVT::i32); 2508 2509 if ((SplatBits & ~0xffff) == 0 && 2510 ((SplatBits | SplatUndef) & 0xff) == 0xff) 2511 return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32); 2512 2513 if ((SplatBits & ~0xffffff) == 0 && 2514 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) 2515 return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32); 2516 2517 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 2518 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 2519 // VMOV.I32. A (very) minor optimization would be to replicate the value 2520 // and fall through here to test for a valid 64-bit splat. But, then the 2521 // caller would also need to check and handle the change in size. 2522 break; 2523 2524 case 64: { 2525 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 2526 uint64_t BitMask = 0xff; 2527 uint64_t Val = 0; 2528 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 2529 if (((SplatBits | SplatUndef) & BitMask) == BitMask) 2530 Val |= BitMask; 2531 else if ((SplatBits & BitMask) != 0) 2532 return SDValue(); 2533 BitMask <<= 8; 2534 } 2535 return DAG.getTargetConstant(Val, MVT::i64); 2536 } 2537 2538 default: 2539 llvm_unreachable("unexpected size for isVMOVSplat"); 2540 break; 2541 } 2542 2543 return SDValue(); 2544} 2545 2546/// getVMOVImm - If this is a build_vector of constants which can be 2547/// formed by using a VMOV instruction of the specified element size, 2548/// return the constant being splatted. The ByteSize field indicates the 2549/// number of bytes of each element [1248]. 2550SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2551 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N); 2552 APInt SplatBits, SplatUndef; 2553 unsigned SplatBitSize; 2554 bool HasAnyUndefs; 2555 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 2556 HasAnyUndefs, ByteSize * 8)) 2557 return SDValue(); 2558 2559 if (SplatBitSize > ByteSize * 8) 2560 return SDValue(); 2561 2562 return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 2563 SplatBitSize, DAG); 2564} 2565 2566static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT, 2567 bool &ReverseVEXT, unsigned &Imm) { 2568 unsigned NumElts = VT.getVectorNumElements(); 2569 ReverseVEXT = false; 2570 Imm = M[0]; 2571 2572 // If this is a VEXT shuffle, the immediate value is the index of the first 2573 // element. The other shuffle indices must be the successive elements after 2574 // the first one. 2575 unsigned ExpectedElt = Imm; 2576 for (unsigned i = 1; i < NumElts; ++i) { 2577 // Increment the expected index. If it wraps around, it may still be 2578 // a VEXT but the source vectors must be swapped. 2579 ExpectedElt += 1; 2580 if (ExpectedElt == NumElts * 2) { 2581 ExpectedElt = 0; 2582 ReverseVEXT = true; 2583 } 2584 2585 if (ExpectedElt != static_cast<unsigned>(M[i])) 2586 return false; 2587 } 2588 2589 // Adjust the index value if the source operands will be swapped. 2590 if (ReverseVEXT) 2591 Imm -= NumElts; 2592 2593 return true; 2594} 2595 2596/// isVREVMask - Check if a vector shuffle corresponds to a VREV 2597/// instruction with the specified blocksize. (The order of the elements 2598/// within each block of the vector is reversed.) 2599static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT, 2600 unsigned BlockSize) { 2601 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 2602 "Only possible block sizes for VREV are: 16, 32, 64"); 2603 2604 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2605 if (EltSz == 64) 2606 return false; 2607 2608 unsigned NumElts = VT.getVectorNumElements(); 2609 unsigned BlockElts = M[0] + 1; 2610 2611 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 2612 return false; 2613 2614 for (unsigned i = 0; i < NumElts; ++i) { 2615 if ((unsigned) M[i] != 2616 (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 2617 return false; 2618 } 2619 2620 return true; 2621} 2622 2623static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT, 2624 unsigned &WhichResult) { 2625 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2626 if (EltSz == 64) 2627 return false; 2628 2629 unsigned NumElts = VT.getVectorNumElements(); 2630 WhichResult = (M[0] == 0 ? 0 : 1); 2631 for (unsigned i = 0; i < NumElts; i += 2) { 2632 if ((unsigned) M[i] != i + WhichResult || 2633 (unsigned) M[i+1] != i + NumElts + WhichResult) 2634 return false; 2635 } 2636 return true; 2637} 2638 2639/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 2640/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 2641/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 2642static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 2643 unsigned &WhichResult) { 2644 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2645 if (EltSz == 64) 2646 return false; 2647 2648 unsigned NumElts = VT.getVectorNumElements(); 2649 WhichResult = (M[0] == 0 ? 0 : 1); 2650 for (unsigned i = 0; i < NumElts; i += 2) { 2651 if ((unsigned) M[i] != i + WhichResult || 2652 (unsigned) M[i+1] != i + WhichResult) 2653 return false; 2654 } 2655 return true; 2656} 2657 2658static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT, 2659 unsigned &WhichResult) { 2660 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2661 if (EltSz == 64) 2662 return false; 2663 2664 unsigned NumElts = VT.getVectorNumElements(); 2665 WhichResult = (M[0] == 0 ? 0 : 1); 2666 for (unsigned i = 0; i != NumElts; ++i) { 2667 if ((unsigned) M[i] != 2 * i + WhichResult) 2668 return false; 2669 } 2670 2671 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 2672 if (VT.is64BitVector() && EltSz == 32) 2673 return false; 2674 2675 return true; 2676} 2677 2678/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 2679/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 2680/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 2681static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 2682 unsigned &WhichResult) { 2683 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2684 if (EltSz == 64) 2685 return false; 2686 2687 unsigned Half = VT.getVectorNumElements() / 2; 2688 WhichResult = (M[0] == 0 ? 0 : 1); 2689 for (unsigned j = 0; j != 2; ++j) { 2690 unsigned Idx = WhichResult; 2691 for (unsigned i = 0; i != Half; ++i) { 2692 if ((unsigned) M[i + j * Half] != Idx) 2693 return false; 2694 Idx += 2; 2695 } 2696 } 2697 2698 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 2699 if (VT.is64BitVector() && EltSz == 32) 2700 return false; 2701 2702 return true; 2703} 2704 2705static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT, 2706 unsigned &WhichResult) { 2707 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2708 if (EltSz == 64) 2709 return false; 2710 2711 unsigned NumElts = VT.getVectorNumElements(); 2712 WhichResult = (M[0] == 0 ? 0 : 1); 2713 unsigned Idx = WhichResult * NumElts / 2; 2714 for (unsigned i = 0; i != NumElts; i += 2) { 2715 if ((unsigned) M[i] != Idx || 2716 (unsigned) M[i+1] != Idx + NumElts) 2717 return false; 2718 Idx += 1; 2719 } 2720 2721 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 2722 if (VT.is64BitVector() && EltSz == 32) 2723 return false; 2724 2725 return true; 2726} 2727 2728/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 2729/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 2730/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 2731static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT, 2732 unsigned &WhichResult) { 2733 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 2734 if (EltSz == 64) 2735 return false; 2736 2737 unsigned NumElts = VT.getVectorNumElements(); 2738 WhichResult = (M[0] == 0 ? 0 : 1); 2739 unsigned Idx = WhichResult * NumElts / 2; 2740 for (unsigned i = 0; i != NumElts; i += 2) { 2741 if ((unsigned) M[i] != Idx || 2742 (unsigned) M[i+1] != Idx) 2743 return false; 2744 Idx += 1; 2745 } 2746 2747 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 2748 if (VT.is64BitVector() && EltSz == 32) 2749 return false; 2750 2751 return true; 2752} 2753 2754 2755static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) { 2756 // Canonicalize all-zeros and all-ones vectors. 2757 ConstantSDNode *ConstVal = cast<ConstantSDNode>(Val.getNode()); 2758 if (ConstVal->isNullValue()) 2759 return getZeroVector(VT, DAG, dl); 2760 if (ConstVal->isAllOnesValue()) 2761 return getOnesVector(VT, DAG, dl); 2762 2763 EVT CanonicalVT; 2764 if (VT.is64BitVector()) { 2765 switch (Val.getValueType().getSizeInBits()) { 2766 case 8: CanonicalVT = MVT::v8i8; break; 2767 case 16: CanonicalVT = MVT::v4i16; break; 2768 case 32: CanonicalVT = MVT::v2i32; break; 2769 case 64: CanonicalVT = MVT::v1i64; break; 2770 default: llvm_unreachable("unexpected splat element type"); break; 2771 } 2772 } else { 2773 assert(VT.is128BitVector() && "unknown splat vector size"); 2774 switch (Val.getValueType().getSizeInBits()) { 2775 case 8: CanonicalVT = MVT::v16i8; break; 2776 case 16: CanonicalVT = MVT::v8i16; break; 2777 case 32: CanonicalVT = MVT::v4i32; break; 2778 case 64: CanonicalVT = MVT::v2i64; break; 2779 default: llvm_unreachable("unexpected splat element type"); break; 2780 } 2781 } 2782 2783 // Build a canonical splat for this value. 2784 SmallVector<SDValue, 8> Ops; 2785 Ops.assign(CanonicalVT.getVectorNumElements(), Val); 2786 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0], 2787 Ops.size()); 2788 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res); 2789} 2790 2791// If this is a case we can't handle, return null and let the default 2792// expansion code take care of it. 2793static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { 2794 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 2795 DebugLoc dl = Op.getDebugLoc(); 2796 EVT VT = Op.getValueType(); 2797 2798 APInt SplatBits, SplatUndef; 2799 unsigned SplatBitSize; 2800 bool HasAnyUndefs; 2801 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 2802 if (SplatBitSize <= 64) { 2803 SDValue Val = isVMOVSplat(SplatBits.getZExtValue(), 2804 SplatUndef.getZExtValue(), SplatBitSize, DAG); 2805 if (Val.getNode()) 2806 return BuildSplat(Val, VT, DAG, dl); 2807 } 2808 } 2809 2810 // If there are only 2 elements in a 128-bit vector, insert them into an 2811 // undef vector. This handles the common case for 128-bit vector argument 2812 // passing, where the insertions should be translated to subreg accesses 2813 // with no real instructions. 2814 if (VT.is128BitVector() && Op.getNumOperands() == 2) { 2815 SDValue Val = DAG.getUNDEF(VT); 2816 SDValue Op0 = Op.getOperand(0); 2817 SDValue Op1 = Op.getOperand(1); 2818 if (Op0.getOpcode() != ISD::UNDEF) 2819 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op0, 2820 DAG.getIntPtrConstant(0)); 2821 if (Op1.getOpcode() != ISD::UNDEF) 2822 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op1, 2823 DAG.getIntPtrConstant(1)); 2824 return Val; 2825 } 2826 2827 return SDValue(); 2828} 2829 2830/// isShuffleMaskLegal - Targets can use this to indicate that they only 2831/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 2832/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 2833/// are assumed to be legal. 2834bool 2835ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 2836 EVT VT) const { 2837 if (VT.getVectorNumElements() == 4 && 2838 (VT.is128BitVector() || VT.is64BitVector())) { 2839 unsigned PFIndexes[4]; 2840 for (unsigned i = 0; i != 4; ++i) { 2841 if (M[i] < 0) 2842 PFIndexes[i] = 8; 2843 else 2844 PFIndexes[i] = M[i]; 2845 } 2846 2847 // Compute the index in the perfect shuffle table. 2848 unsigned PFTableIndex = 2849 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 2850 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 2851 unsigned Cost = (PFEntry >> 30); 2852 2853 if (Cost <= 4) 2854 return true; 2855 } 2856 2857 bool ReverseVEXT; 2858 unsigned Imm, WhichResult; 2859 2860 return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 2861 isVREVMask(M, VT, 64) || 2862 isVREVMask(M, VT, 32) || 2863 isVREVMask(M, VT, 16) || 2864 isVEXTMask(M, VT, ReverseVEXT, Imm) || 2865 isVTRNMask(M, VT, WhichResult) || 2866 isVUZPMask(M, VT, WhichResult) || 2867 isVZIPMask(M, VT, WhichResult) || 2868 isVTRN_v_undef_Mask(M, VT, WhichResult) || 2869 isVUZP_v_undef_Mask(M, VT, WhichResult) || 2870 isVZIP_v_undef_Mask(M, VT, WhichResult)); 2871} 2872 2873/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 2874/// the specified operations to build the shuffle. 2875static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 2876 SDValue RHS, SelectionDAG &DAG, 2877 DebugLoc dl) { 2878 unsigned OpNum = (PFEntry >> 26) & 0x0F; 2879 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 2880 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 2881 2882 enum { 2883 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 2884 OP_VREV, 2885 OP_VDUP0, 2886 OP_VDUP1, 2887 OP_VDUP2, 2888 OP_VDUP3, 2889 OP_VEXT1, 2890 OP_VEXT2, 2891 OP_VEXT3, 2892 OP_VUZPL, // VUZP, left result 2893 OP_VUZPR, // VUZP, right result 2894 OP_VZIPL, // VZIP, left result 2895 OP_VZIPR, // VZIP, right result 2896 OP_VTRNL, // VTRN, left result 2897 OP_VTRNR // VTRN, right result 2898 }; 2899 2900 if (OpNum == OP_COPY) { 2901 if (LHSID == (1*9+2)*9+3) return LHS; 2902 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 2903 return RHS; 2904 } 2905 2906 SDValue OpLHS, OpRHS; 2907 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 2908 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 2909 EVT VT = OpLHS.getValueType(); 2910 2911 switch (OpNum) { 2912 default: llvm_unreachable("Unknown shuffle opcode!"); 2913 case OP_VREV: 2914 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 2915 case OP_VDUP0: 2916 case OP_VDUP1: 2917 case OP_VDUP2: 2918 case OP_VDUP3: 2919 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 2920 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 2921 case OP_VEXT1: 2922 case OP_VEXT2: 2923 case OP_VEXT3: 2924 return DAG.getNode(ARMISD::VEXT, dl, VT, 2925 OpLHS, OpRHS, 2926 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 2927 case OP_VUZPL: 2928 case OP_VUZPR: 2929 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 2930 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 2931 case OP_VZIPL: 2932 case OP_VZIPR: 2933 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 2934 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 2935 case OP_VTRNL: 2936 case OP_VTRNR: 2937 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 2938 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 2939 } 2940} 2941 2942static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 2943 SDValue V1 = Op.getOperand(0); 2944 SDValue V2 = Op.getOperand(1); 2945 DebugLoc dl = Op.getDebugLoc(); 2946 EVT VT = Op.getValueType(); 2947 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 2948 SmallVector<int, 8> ShuffleMask; 2949 2950 // Convert shuffles that are directly supported on NEON to target-specific 2951 // DAG nodes, instead of keeping them as shuffles and matching them again 2952 // during code selection. This is more efficient and avoids the possibility 2953 // of inconsistencies between legalization and selection. 2954 // FIXME: floating-point vectors should be canonicalized to integer vectors 2955 // of the same time so that they get CSEd properly. 2956 SVN->getMask(ShuffleMask); 2957 2958 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 2959 int Lane = SVN->getSplatIndex(); 2960 // If this is undef splat, generate it via "just" vdup, if possible. 2961 if (Lane == -1) Lane = 0; 2962 2963 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 2964 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 2965 } 2966 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 2967 DAG.getConstant(Lane, MVT::i32)); 2968 } 2969 2970 bool ReverseVEXT; 2971 unsigned Imm; 2972 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 2973 if (ReverseVEXT) 2974 std::swap(V1, V2); 2975 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 2976 DAG.getConstant(Imm, MVT::i32)); 2977 } 2978 2979 if (isVREVMask(ShuffleMask, VT, 64)) 2980 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 2981 if (isVREVMask(ShuffleMask, VT, 32)) 2982 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 2983 if (isVREVMask(ShuffleMask, VT, 16)) 2984 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 2985 2986 // Check for Neon shuffles that modify both input vectors in place. 2987 // If both results are used, i.e., if there are two shuffles with the same 2988 // source operands and with masks corresponding to both results of one of 2989 // these operations, DAG memoization will ensure that a single node is 2990 // used for both shuffles. 2991 unsigned WhichResult; 2992 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 2993 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 2994 V1, V2).getValue(WhichResult); 2995 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 2996 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 2997 V1, V2).getValue(WhichResult); 2998 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 2999 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3000 V1, V2).getValue(WhichResult); 3001 3002 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3003 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 3004 V1, V1).getValue(WhichResult); 3005 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3006 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 3007 V1, V1).getValue(WhichResult); 3008 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 3009 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 3010 V1, V1).getValue(WhichResult); 3011 3012 // If the shuffle is not directly supported and it has 4 elements, use 3013 // the PerfectShuffle-generated table to synthesize it from other shuffles. 3014 if (VT.getVectorNumElements() == 4 && 3015 (VT.is128BitVector() || VT.is64BitVector())) { 3016 unsigned PFIndexes[4]; 3017 for (unsigned i = 0; i != 4; ++i) { 3018 if (ShuffleMask[i] < 0) 3019 PFIndexes[i] = 8; 3020 else 3021 PFIndexes[i] = ShuffleMask[i]; 3022 } 3023 3024 // Compute the index in the perfect shuffle table. 3025 unsigned PFTableIndex = 3026 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3027 3028 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3029 unsigned Cost = (PFEntry >> 30); 3030 3031 if (Cost <= 4) 3032 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 3033 } 3034 3035 // v2f64 and v2i64 shuffles are just register copies. 3036 if (VT == MVT::v2f64 || VT == MVT::v2i64) { 3037 // Do the expansion as f64 since i64 is not legal. 3038 V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, V1); 3039 V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, V2); 3040 SDValue Val = DAG.getUNDEF(MVT::v2f64); 3041 for (unsigned i = 0; i < 2; ++i) { 3042 if (ShuffleMask[i] < 0) 3043 continue; 3044 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 3045 ShuffleMask[i] < 2 ? V1 : V2, 3046 DAG.getConstant(ShuffleMask[i] & 1, MVT::i32)); 3047 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3048 Elt, DAG.getConstant(i, MVT::i32)); 3049 } 3050 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); 3051 } 3052 3053 return SDValue(); 3054} 3055 3056static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 3057 EVT VT = Op.getValueType(); 3058 DebugLoc dl = Op.getDebugLoc(); 3059 SDValue Vec = Op.getOperand(0); 3060 SDValue Lane = Op.getOperand(1); 3061 assert(VT == MVT::i32 && 3062 Vec.getValueType().getVectorElementType().getSizeInBits() < 32 && 3063 "unexpected type for custom-lowering vector extract"); 3064 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 3065} 3066 3067static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 3068 // The only time a CONCAT_VECTORS operation can have legal types is when 3069 // two 64-bit vectors are concatenated to a 128-bit vector. 3070 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 3071 "unexpected CONCAT_VECTORS"); 3072 DebugLoc dl = Op.getDebugLoc(); 3073 SDValue Val = DAG.getUNDEF(MVT::v2f64); 3074 SDValue Op0 = Op.getOperand(0); 3075 SDValue Op1 = Op.getOperand(1); 3076 if (Op0.getOpcode() != ISD::UNDEF) 3077 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3078 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0), 3079 DAG.getIntPtrConstant(0)); 3080 if (Op1.getOpcode() != ISD::UNDEF) 3081 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 3082 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1), 3083 DAG.getIntPtrConstant(1)); 3084 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val); 3085} 3086 3087SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3088 switch (Op.getOpcode()) { 3089 default: llvm_unreachable("Don't know how to custom lower this!"); 3090 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3091 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 3092 case ISD::GlobalAddress: 3093 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 3094 LowerGlobalAddressELF(Op, DAG); 3095 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3096 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 3097 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 3098 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 3099 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 3100 case ISD::VASTART: return LowerVASTART(Op, DAG); 3101 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget); 3102 case ISD::SINT_TO_FP: 3103 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 3104 case ISD::FP_TO_SINT: 3105 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 3106 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 3107 case ISD::RETURNADDR: break; 3108 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3109 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 3110 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 3111 Subtarget); 3112 case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG); 3113 case ISD::SHL: 3114 case ISD::SRL: 3115 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 3116 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 3117 case ISD::SRL_PARTS: 3118 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 3119 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 3120 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 3121 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 3122 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3123 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 3124 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 3125 } 3126 return SDValue(); 3127} 3128 3129/// ReplaceNodeResults - Replace the results of node with an illegal result 3130/// type with new values built out of custom code. 3131void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 3132 SmallVectorImpl<SDValue>&Results, 3133 SelectionDAG &DAG) const { 3134 SDValue Res; 3135 switch (N->getOpcode()) { 3136 default: 3137 llvm_unreachable("Don't know how to custom expand this!"); 3138 break; 3139 case ISD::BIT_CONVERT: 3140 Res = ExpandBIT_CONVERT(N, DAG); 3141 break; 3142 case ISD::SRL: 3143 case ISD::SRA: 3144 Res = LowerShift(N, DAG, Subtarget); 3145 break; 3146 } 3147 if (Res.getNode()) 3148 Results.push_back(Res); 3149} 3150 3151//===----------------------------------------------------------------------===// 3152// ARM Scheduler Hooks 3153//===----------------------------------------------------------------------===// 3154 3155MachineBasicBlock * 3156ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 3157 MachineBasicBlock *BB, 3158 unsigned Size) const { 3159 unsigned dest = MI->getOperand(0).getReg(); 3160 unsigned ptr = MI->getOperand(1).getReg(); 3161 unsigned oldval = MI->getOperand(2).getReg(); 3162 unsigned newval = MI->getOperand(3).getReg(); 3163 unsigned scratch = BB->getParent()->getRegInfo() 3164 .createVirtualRegister(ARM::GPRRegisterClass); 3165 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3166 DebugLoc dl = MI->getDebugLoc(); 3167 bool isThumb2 = Subtarget->isThumb2(); 3168 3169 unsigned ldrOpc, strOpc; 3170 switch (Size) { 3171 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 3172 case 1: 3173 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 3174 strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB; 3175 break; 3176 case 2: 3177 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 3178 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 3179 break; 3180 case 4: 3181 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 3182 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 3183 break; 3184 } 3185 3186 MachineFunction *MF = BB->getParent(); 3187 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3188 MachineFunction::iterator It = BB; 3189 ++It; // insert the new blocks after the current block 3190 3191 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 3192 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 3193 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3194 MF->insert(It, loop1MBB); 3195 MF->insert(It, loop2MBB); 3196 MF->insert(It, exitMBB); 3197 exitMBB->transferSuccessors(BB); 3198 3199 // thisMBB: 3200 // ... 3201 // fallthrough --> loop1MBB 3202 BB->addSuccessor(loop1MBB); 3203 3204 // loop1MBB: 3205 // ldrex dest, [ptr] 3206 // cmp dest, oldval 3207 // bne exitMBB 3208 BB = loop1MBB; 3209 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 3210 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 3211 .addReg(dest).addReg(oldval)); 3212 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 3213 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 3214 BB->addSuccessor(loop2MBB); 3215 BB->addSuccessor(exitMBB); 3216 3217 // loop2MBB: 3218 // strex scratch, newval, [ptr] 3219 // cmp scratch, #0 3220 // bne loop1MBB 3221 BB = loop2MBB; 3222 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval) 3223 .addReg(ptr)); 3224 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 3225 .addReg(scratch).addImm(0)); 3226 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 3227 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 3228 BB->addSuccessor(loop1MBB); 3229 BB->addSuccessor(exitMBB); 3230 3231 // exitMBB: 3232 // ... 3233 BB = exitMBB; 3234 3235 MF->DeleteMachineInstr(MI); // The instruction is gone now. 3236 3237 return BB; 3238} 3239 3240MachineBasicBlock * 3241ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 3242 unsigned Size, unsigned BinOpcode) const { 3243 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 3244 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3245 3246 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3247 MachineFunction *MF = BB->getParent(); 3248 MachineFunction::iterator It = BB; 3249 ++It; 3250 3251 unsigned dest = MI->getOperand(0).getReg(); 3252 unsigned ptr = MI->getOperand(1).getReg(); 3253 unsigned incr = MI->getOperand(2).getReg(); 3254 DebugLoc dl = MI->getDebugLoc(); 3255 3256 bool isThumb2 = Subtarget->isThumb2(); 3257 unsigned ldrOpc, strOpc; 3258 switch (Size) { 3259 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 3260 case 1: 3261 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 3262 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 3263 break; 3264 case 2: 3265 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 3266 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 3267 break; 3268 case 4: 3269 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 3270 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 3271 break; 3272 } 3273 3274 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3275 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3276 MF->insert(It, loopMBB); 3277 MF->insert(It, exitMBB); 3278 exitMBB->transferSuccessors(BB); 3279 3280 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 3281 unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 3282 unsigned scratch2 = (!BinOpcode) ? incr : 3283 RegInfo.createVirtualRegister(ARM::GPRRegisterClass); 3284 3285 // thisMBB: 3286 // ... 3287 // fallthrough --> loopMBB 3288 BB->addSuccessor(loopMBB); 3289 3290 // loopMBB: 3291 // ldrex dest, ptr 3292 // <binop> scratch2, dest, incr 3293 // strex scratch, scratch2, ptr 3294 // cmp scratch, #0 3295 // bne- loopMBB 3296 // fallthrough --> exitMBB 3297 BB = loopMBB; 3298 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr)); 3299 if (BinOpcode) { 3300 // operand order needs to go the other way for NAND 3301 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 3302 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 3303 addReg(incr).addReg(dest)).addReg(0); 3304 else 3305 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 3306 addReg(dest).addReg(incr)).addReg(0); 3307 } 3308 3309 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2) 3310 .addReg(ptr)); 3311 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 3312 .addReg(scratch).addImm(0)); 3313 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 3314 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 3315 3316 BB->addSuccessor(loopMBB); 3317 BB->addSuccessor(exitMBB); 3318 3319 // exitMBB: 3320 // ... 3321 BB = exitMBB; 3322 3323 MF->DeleteMachineInstr(MI); // The instruction is gone now. 3324 3325 return BB; 3326} 3327 3328MachineBasicBlock * 3329ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 3330 MachineBasicBlock *BB) const { 3331 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3332 DebugLoc dl = MI->getDebugLoc(); 3333 bool isThumb2 = Subtarget->isThumb2(); 3334 switch (MI->getOpcode()) { 3335 default: 3336 MI->dump(); 3337 llvm_unreachable("Unexpected instr type to insert"); 3338 3339 case ARM::ATOMIC_LOAD_ADD_I8: 3340 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 3341 case ARM::ATOMIC_LOAD_ADD_I16: 3342 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 3343 case ARM::ATOMIC_LOAD_ADD_I32: 3344 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 3345 3346 case ARM::ATOMIC_LOAD_AND_I8: 3347 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 3348 case ARM::ATOMIC_LOAD_AND_I16: 3349 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 3350 case ARM::ATOMIC_LOAD_AND_I32: 3351 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 3352 3353 case ARM::ATOMIC_LOAD_OR_I8: 3354 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 3355 case ARM::ATOMIC_LOAD_OR_I16: 3356 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 3357 case ARM::ATOMIC_LOAD_OR_I32: 3358 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 3359 3360 case ARM::ATOMIC_LOAD_XOR_I8: 3361 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 3362 case ARM::ATOMIC_LOAD_XOR_I16: 3363 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 3364 case ARM::ATOMIC_LOAD_XOR_I32: 3365 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 3366 3367 case ARM::ATOMIC_LOAD_NAND_I8: 3368 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 3369 case ARM::ATOMIC_LOAD_NAND_I16: 3370 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 3371 case ARM::ATOMIC_LOAD_NAND_I32: 3372 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 3373 3374 case ARM::ATOMIC_LOAD_SUB_I8: 3375 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 3376 case ARM::ATOMIC_LOAD_SUB_I16: 3377 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 3378 case ARM::ATOMIC_LOAD_SUB_I32: 3379 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 3380 3381 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 3382 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 3383 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 3384 3385 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 3386 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 3387 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 3388 3389 case ARM::tMOVCCr_pseudo: { 3390 // To "insert" a SELECT_CC instruction, we actually have to insert the 3391 // diamond control-flow pattern. The incoming instruction knows the 3392 // destination vreg to set, the condition code register to branch on, the 3393 // true/false values to select between, and a branch opcode to use. 3394 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3395 MachineFunction::iterator It = BB; 3396 ++It; 3397 3398 // thisMBB: 3399 // ... 3400 // TrueVal = ... 3401 // cmpTY ccX, r1, r2 3402 // bCC copy1MBB 3403 // fallthrough --> copy0MBB 3404 MachineBasicBlock *thisMBB = BB; 3405 MachineFunction *F = BB->getParent(); 3406 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 3407 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 3408 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 3409 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 3410 F->insert(It, copy0MBB); 3411 F->insert(It, sinkMBB); 3412 // Update machine-CFG edges by first adding all successors of the current 3413 // block to the new block which will contain the Phi node for the select. 3414 for (MachineBasicBlock::succ_iterator I = BB->succ_begin(), 3415 E = BB->succ_end(); I != E; ++I) 3416 sinkMBB->addSuccessor(*I); 3417 // Next, remove all successors of the current block, and add the true 3418 // and fallthrough blocks as its successors. 3419 while (!BB->succ_empty()) 3420 BB->removeSuccessor(BB->succ_begin()); 3421 BB->addSuccessor(copy0MBB); 3422 BB->addSuccessor(sinkMBB); 3423 3424 // copy0MBB: 3425 // %FalseValue = ... 3426 // # fallthrough to sinkMBB 3427 BB = copy0MBB; 3428 3429 // Update machine-CFG edges 3430 BB->addSuccessor(sinkMBB); 3431 3432 // sinkMBB: 3433 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 3434 // ... 3435 BB = sinkMBB; 3436 BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg()) 3437 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 3438 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 3439 3440 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 3441 return BB; 3442 } 3443 3444 case ARM::tANDsp: 3445 case ARM::tADDspr_: 3446 case ARM::tSUBspi_: 3447 case ARM::t2SUBrSPi_: 3448 case ARM::t2SUBrSPi12_: 3449 case ARM::t2SUBrSPs_: { 3450 MachineFunction *MF = BB->getParent(); 3451 unsigned DstReg = MI->getOperand(0).getReg(); 3452 unsigned SrcReg = MI->getOperand(1).getReg(); 3453 bool DstIsDead = MI->getOperand(0).isDead(); 3454 bool SrcIsKill = MI->getOperand(1).isKill(); 3455 3456 if (SrcReg != ARM::SP) { 3457 // Copy the source to SP from virtual register. 3458 const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(SrcReg); 3459 unsigned CopyOpc = (RC == ARM::tGPRRegisterClass) 3460 ? ARM::tMOVtgpr2gpr : ARM::tMOVgpr2gpr; 3461 BuildMI(BB, dl, TII->get(CopyOpc), ARM::SP) 3462 .addReg(SrcReg, getKillRegState(SrcIsKill)); 3463 } 3464 3465 unsigned OpOpc = 0; 3466 bool NeedPred = false, NeedCC = false, NeedOp3 = false; 3467 switch (MI->getOpcode()) { 3468 default: 3469 llvm_unreachable("Unexpected pseudo instruction!"); 3470 case ARM::tANDsp: 3471 OpOpc = ARM::tAND; 3472 NeedPred = true; 3473 break; 3474 case ARM::tADDspr_: 3475 OpOpc = ARM::tADDspr; 3476 break; 3477 case ARM::tSUBspi_: 3478 OpOpc = ARM::tSUBspi; 3479 break; 3480 case ARM::t2SUBrSPi_: 3481 OpOpc = ARM::t2SUBrSPi; 3482 NeedPred = true; NeedCC = true; 3483 break; 3484 case ARM::t2SUBrSPi12_: 3485 OpOpc = ARM::t2SUBrSPi12; 3486 NeedPred = true; 3487 break; 3488 case ARM::t2SUBrSPs_: 3489 OpOpc = ARM::t2SUBrSPs; 3490 NeedPred = true; NeedCC = true; NeedOp3 = true; 3491 break; 3492 } 3493 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(OpOpc), ARM::SP); 3494 if (OpOpc == ARM::tAND) 3495 AddDefaultT1CC(MIB); 3496 MIB.addReg(ARM::SP); 3497 MIB.addOperand(MI->getOperand(2)); 3498 if (NeedOp3) 3499 MIB.addOperand(MI->getOperand(3)); 3500 if (NeedPred) 3501 AddDefaultPred(MIB); 3502 if (NeedCC) 3503 AddDefaultCC(MIB); 3504 3505 // Copy the result from SP to virtual register. 3506 const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(DstReg); 3507 unsigned CopyOpc = (RC == ARM::tGPRRegisterClass) 3508 ? ARM::tMOVgpr2tgpr : ARM::tMOVgpr2gpr; 3509 BuildMI(BB, dl, TII->get(CopyOpc)) 3510 .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead)) 3511 .addReg(ARM::SP); 3512 MF->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 3513 return BB; 3514 } 3515 } 3516} 3517 3518//===----------------------------------------------------------------------===// 3519// ARM Optimization Hooks 3520//===----------------------------------------------------------------------===// 3521 3522static 3523SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 3524 TargetLowering::DAGCombinerInfo &DCI) { 3525 SelectionDAG &DAG = DCI.DAG; 3526 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3527 EVT VT = N->getValueType(0); 3528 unsigned Opc = N->getOpcode(); 3529 bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; 3530 SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); 3531 SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); 3532 ISD::CondCode CC = ISD::SETCC_INVALID; 3533 3534 if (isSlctCC) { 3535 CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); 3536 } else { 3537 SDValue CCOp = Slct.getOperand(0); 3538 if (CCOp.getOpcode() == ISD::SETCC) 3539 CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); 3540 } 3541 3542 bool DoXform = false; 3543 bool InvCC = false; 3544 assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && 3545 "Bad input!"); 3546 3547 if (LHS.getOpcode() == ISD::Constant && 3548 cast<ConstantSDNode>(LHS)->isNullValue()) { 3549 DoXform = true; 3550 } else if (CC != ISD::SETCC_INVALID && 3551 RHS.getOpcode() == ISD::Constant && 3552 cast<ConstantSDNode>(RHS)->isNullValue()) { 3553 std::swap(LHS, RHS); 3554 SDValue Op0 = Slct.getOperand(0); 3555 EVT OpVT = isSlctCC ? Op0.getValueType() : 3556 Op0.getOperand(0).getValueType(); 3557 bool isInt = OpVT.isInteger(); 3558 CC = ISD::getSetCCInverse(CC, isInt); 3559 3560 if (!TLI.isCondCodeLegal(CC, OpVT)) 3561 return SDValue(); // Inverse operator isn't legal. 3562 3563 DoXform = true; 3564 InvCC = true; 3565 } 3566 3567 if (DoXform) { 3568 SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); 3569 if (isSlctCC) 3570 return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, 3571 Slct.getOperand(0), Slct.getOperand(1), CC); 3572 SDValue CCOp = Slct.getOperand(0); 3573 if (InvCC) 3574 CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), 3575 CCOp.getOperand(0), CCOp.getOperand(1), CC); 3576 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 3577 CCOp, OtherOp, Result); 3578 } 3579 return SDValue(); 3580} 3581 3582/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 3583static SDValue PerformADDCombine(SDNode *N, 3584 TargetLowering::DAGCombinerInfo &DCI) { 3585 // added by evan in r37685 with no testcase. 3586 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 3587 3588 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 3589 if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { 3590 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 3591 if (Result.getNode()) return Result; 3592 } 3593 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 3594 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 3595 if (Result.getNode()) return Result; 3596 } 3597 3598 return SDValue(); 3599} 3600 3601/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 3602static SDValue PerformSUBCombine(SDNode *N, 3603 TargetLowering::DAGCombinerInfo &DCI) { 3604 // added by evan in r37685 with no testcase. 3605 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 3606 3607 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 3608 if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { 3609 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 3610 if (Result.getNode()) return Result; 3611 } 3612 3613 return SDValue(); 3614} 3615 3616static SDValue PerformMULCombine(SDNode *N, 3617 TargetLowering::DAGCombinerInfo &DCI, 3618 const ARMSubtarget *Subtarget) { 3619 SelectionDAG &DAG = DCI.DAG; 3620 3621 if (Subtarget->isThumb1Only()) 3622 return SDValue(); 3623 3624 if (DAG.getMachineFunction(). 3625 getFunction()->hasFnAttr(Attribute::OptimizeForSize)) 3626 return SDValue(); 3627 3628 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 3629 return SDValue(); 3630 3631 EVT VT = N->getValueType(0); 3632 if (VT != MVT::i32) 3633 return SDValue(); 3634 3635 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3636 if (!C) 3637 return SDValue(); 3638 3639 uint64_t MulAmt = C->getZExtValue(); 3640 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 3641 ShiftAmt = ShiftAmt & (32 - 1); 3642 SDValue V = N->getOperand(0); 3643 DebugLoc DL = N->getDebugLoc(); 3644 3645 SDValue Res; 3646 MulAmt >>= ShiftAmt; 3647 if (isPowerOf2_32(MulAmt - 1)) { 3648 // (mul x, 2^N + 1) => (add (shl x, N), x) 3649 Res = DAG.getNode(ISD::ADD, DL, VT, 3650 V, DAG.getNode(ISD::SHL, DL, VT, 3651 V, DAG.getConstant(Log2_32(MulAmt-1), 3652 MVT::i32))); 3653 } else if (isPowerOf2_32(MulAmt + 1)) { 3654 // (mul x, 2^N - 1) => (sub (shl x, N), x) 3655 Res = DAG.getNode(ISD::SUB, DL, VT, 3656 DAG.getNode(ISD::SHL, DL, VT, 3657 V, DAG.getConstant(Log2_32(MulAmt+1), 3658 MVT::i32)), 3659 V); 3660 } else 3661 return SDValue(); 3662 3663 if (ShiftAmt != 0) 3664 Res = DAG.getNode(ISD::SHL, DL, VT, Res, 3665 DAG.getConstant(ShiftAmt, MVT::i32)); 3666 3667 // Do not add new nodes to DAG combiner worklist. 3668 DCI.CombineTo(N, Res, false); 3669 return SDValue(); 3670} 3671 3672/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 3673/// ARMISD::VMOVRRD. 3674static SDValue PerformVMOVRRDCombine(SDNode *N, 3675 TargetLowering::DAGCombinerInfo &DCI) { 3676 // fmrrd(fmdrr x, y) -> x,y 3677 SDValue InDouble = N->getOperand(0); 3678 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 3679 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 3680 return SDValue(); 3681} 3682 3683/// getVShiftImm - Check if this is a valid build_vector for the immediate 3684/// operand of a vector shift operation, where all the elements of the 3685/// build_vector must have the same constant integer value. 3686static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 3687 // Ignore bit_converts. 3688 while (Op.getOpcode() == ISD::BIT_CONVERT) 3689 Op = Op.getOperand(0); 3690 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3691 APInt SplatBits, SplatUndef; 3692 unsigned SplatBitSize; 3693 bool HasAnyUndefs; 3694 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 3695 HasAnyUndefs, ElementBits) || 3696 SplatBitSize > ElementBits) 3697 return false; 3698 Cnt = SplatBits.getSExtValue(); 3699 return true; 3700} 3701 3702/// isVShiftLImm - Check if this is a valid build_vector for the immediate 3703/// operand of a vector shift left operation. That value must be in the range: 3704/// 0 <= Value < ElementBits for a left shift; or 3705/// 0 <= Value <= ElementBits for a long left shift. 3706static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 3707 assert(VT.isVector() && "vector shift count is not a vector type"); 3708 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3709 if (! getVShiftImm(Op, ElementBits, Cnt)) 3710 return false; 3711 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 3712} 3713 3714/// isVShiftRImm - Check if this is a valid build_vector for the immediate 3715/// operand of a vector shift right operation. For a shift opcode, the value 3716/// is positive, but for an intrinsic the value count must be negative. The 3717/// absolute value must be in the range: 3718/// 1 <= |Value| <= ElementBits for a right shift; or 3719/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 3720static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 3721 int64_t &Cnt) { 3722 assert(VT.isVector() && "vector shift count is not a vector type"); 3723 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3724 if (! getVShiftImm(Op, ElementBits, Cnt)) 3725 return false; 3726 if (isIntrinsic) 3727 Cnt = -Cnt; 3728 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 3729} 3730 3731/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 3732static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 3733 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3734 switch (IntNo) { 3735 default: 3736 // Don't do anything for most intrinsics. 3737 break; 3738 3739 // Vector shifts: check for immediate versions and lower them. 3740 // Note: This is done during DAG combining instead of DAG legalizing because 3741 // the build_vectors for 64-bit vector element shift counts are generally 3742 // not legal, and it is hard to see their values after they get legalized to 3743 // loads from a constant pool. 3744 case Intrinsic::arm_neon_vshifts: 3745 case Intrinsic::arm_neon_vshiftu: 3746 case Intrinsic::arm_neon_vshiftls: 3747 case Intrinsic::arm_neon_vshiftlu: 3748 case Intrinsic::arm_neon_vshiftn: 3749 case Intrinsic::arm_neon_vrshifts: 3750 case Intrinsic::arm_neon_vrshiftu: 3751 case Intrinsic::arm_neon_vrshiftn: 3752 case Intrinsic::arm_neon_vqshifts: 3753 case Intrinsic::arm_neon_vqshiftu: 3754 case Intrinsic::arm_neon_vqshiftsu: 3755 case Intrinsic::arm_neon_vqshiftns: 3756 case Intrinsic::arm_neon_vqshiftnu: 3757 case Intrinsic::arm_neon_vqshiftnsu: 3758 case Intrinsic::arm_neon_vqrshiftns: 3759 case Intrinsic::arm_neon_vqrshiftnu: 3760 case Intrinsic::arm_neon_vqrshiftnsu: { 3761 EVT VT = N->getOperand(1).getValueType(); 3762 int64_t Cnt; 3763 unsigned VShiftOpc = 0; 3764 3765 switch (IntNo) { 3766 case Intrinsic::arm_neon_vshifts: 3767 case Intrinsic::arm_neon_vshiftu: 3768 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 3769 VShiftOpc = ARMISD::VSHL; 3770 break; 3771 } 3772 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 3773 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 3774 ARMISD::VSHRs : ARMISD::VSHRu); 3775 break; 3776 } 3777 return SDValue(); 3778 3779 case Intrinsic::arm_neon_vshiftls: 3780 case Intrinsic::arm_neon_vshiftlu: 3781 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 3782 break; 3783 llvm_unreachable("invalid shift count for vshll intrinsic"); 3784 3785 case Intrinsic::arm_neon_vrshifts: 3786 case Intrinsic::arm_neon_vrshiftu: 3787 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 3788 break; 3789 return SDValue(); 3790 3791 case Intrinsic::arm_neon_vqshifts: 3792 case Intrinsic::arm_neon_vqshiftu: 3793 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 3794 break; 3795 return SDValue(); 3796 3797 case Intrinsic::arm_neon_vqshiftsu: 3798 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 3799 break; 3800 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 3801 3802 case Intrinsic::arm_neon_vshiftn: 3803 case Intrinsic::arm_neon_vrshiftn: 3804 case Intrinsic::arm_neon_vqshiftns: 3805 case Intrinsic::arm_neon_vqshiftnu: 3806 case Intrinsic::arm_neon_vqshiftnsu: 3807 case Intrinsic::arm_neon_vqrshiftns: 3808 case Intrinsic::arm_neon_vqrshiftnu: 3809 case Intrinsic::arm_neon_vqrshiftnsu: 3810 // Narrowing shifts require an immediate right shift. 3811 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 3812 break; 3813 llvm_unreachable("invalid shift count for narrowing vector shift intrinsic"); 3814 3815 default: 3816 llvm_unreachable("unhandled vector shift"); 3817 } 3818 3819 switch (IntNo) { 3820 case Intrinsic::arm_neon_vshifts: 3821 case Intrinsic::arm_neon_vshiftu: 3822 // Opcode already set above. 3823 break; 3824 case Intrinsic::arm_neon_vshiftls: 3825 case Intrinsic::arm_neon_vshiftlu: 3826 if (Cnt == VT.getVectorElementType().getSizeInBits()) 3827 VShiftOpc = ARMISD::VSHLLi; 3828 else 3829 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 3830 ARMISD::VSHLLs : ARMISD::VSHLLu); 3831 break; 3832 case Intrinsic::arm_neon_vshiftn: 3833 VShiftOpc = ARMISD::VSHRN; break; 3834 case Intrinsic::arm_neon_vrshifts: 3835 VShiftOpc = ARMISD::VRSHRs; break; 3836 case Intrinsic::arm_neon_vrshiftu: 3837 VShiftOpc = ARMISD::VRSHRu; break; 3838 case Intrinsic::arm_neon_vrshiftn: 3839 VShiftOpc = ARMISD::VRSHRN; break; 3840 case Intrinsic::arm_neon_vqshifts: 3841 VShiftOpc = ARMISD::VQSHLs; break; 3842 case Intrinsic::arm_neon_vqshiftu: 3843 VShiftOpc = ARMISD::VQSHLu; break; 3844 case Intrinsic::arm_neon_vqshiftsu: 3845 VShiftOpc = ARMISD::VQSHLsu; break; 3846 case Intrinsic::arm_neon_vqshiftns: 3847 VShiftOpc = ARMISD::VQSHRNs; break; 3848 case Intrinsic::arm_neon_vqshiftnu: 3849 VShiftOpc = ARMISD::VQSHRNu; break; 3850 case Intrinsic::arm_neon_vqshiftnsu: 3851 VShiftOpc = ARMISD::VQSHRNsu; break; 3852 case Intrinsic::arm_neon_vqrshiftns: 3853 VShiftOpc = ARMISD::VQRSHRNs; break; 3854 case Intrinsic::arm_neon_vqrshiftnu: 3855 VShiftOpc = ARMISD::VQRSHRNu; break; 3856 case Intrinsic::arm_neon_vqrshiftnsu: 3857 VShiftOpc = ARMISD::VQRSHRNsu; break; 3858 } 3859 3860 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 3861 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 3862 } 3863 3864 case Intrinsic::arm_neon_vshiftins: { 3865 EVT VT = N->getOperand(1).getValueType(); 3866 int64_t Cnt; 3867 unsigned VShiftOpc = 0; 3868 3869 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 3870 VShiftOpc = ARMISD::VSLI; 3871 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 3872 VShiftOpc = ARMISD::VSRI; 3873 else { 3874 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 3875 } 3876 3877 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 3878 N->getOperand(1), N->getOperand(2), 3879 DAG.getConstant(Cnt, MVT::i32)); 3880 } 3881 3882 case Intrinsic::arm_neon_vqrshifts: 3883 case Intrinsic::arm_neon_vqrshiftu: 3884 // No immediate versions of these to check for. 3885 break; 3886 } 3887 3888 return SDValue(); 3889} 3890 3891/// PerformShiftCombine - Checks for immediate versions of vector shifts and 3892/// lowers them. As with the vector shift intrinsics, this is done during DAG 3893/// combining instead of DAG legalizing because the build_vectors for 64-bit 3894/// vector element shift counts are generally not legal, and it is hard to see 3895/// their values after they get legalized to loads from a constant pool. 3896static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 3897 const ARMSubtarget *ST) { 3898 EVT VT = N->getValueType(0); 3899 3900 // Nothing to be done for scalar shifts. 3901 if (! VT.isVector()) 3902 return SDValue(); 3903 3904 assert(ST->hasNEON() && "unexpected vector shift"); 3905 int64_t Cnt; 3906 3907 switch (N->getOpcode()) { 3908 default: llvm_unreachable("unexpected shift opcode"); 3909 3910 case ISD::SHL: 3911 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 3912 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 3913 DAG.getConstant(Cnt, MVT::i32)); 3914 break; 3915 3916 case ISD::SRA: 3917 case ISD::SRL: 3918 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 3919 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 3920 ARMISD::VSHRs : ARMISD::VSHRu); 3921 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 3922 DAG.getConstant(Cnt, MVT::i32)); 3923 } 3924 } 3925 return SDValue(); 3926} 3927 3928/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 3929/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 3930static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 3931 const ARMSubtarget *ST) { 3932 SDValue N0 = N->getOperand(0); 3933 3934 // Check for sign- and zero-extensions of vector extract operations of 8- 3935 // and 16-bit vector elements. NEON supports these directly. They are 3936 // handled during DAG combining because type legalization will promote them 3937 // to 32-bit types and it is messy to recognize the operations after that. 3938 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 3939 SDValue Vec = N0.getOperand(0); 3940 SDValue Lane = N0.getOperand(1); 3941 EVT VT = N->getValueType(0); 3942 EVT EltVT = N0.getValueType(); 3943 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3944 3945 if (VT == MVT::i32 && 3946 (EltVT == MVT::i8 || EltVT == MVT::i16) && 3947 TLI.isTypeLegal(Vec.getValueType())) { 3948 3949 unsigned Opc = 0; 3950 switch (N->getOpcode()) { 3951 default: llvm_unreachable("unexpected opcode"); 3952 case ISD::SIGN_EXTEND: 3953 Opc = ARMISD::VGETLANEs; 3954 break; 3955 case ISD::ZERO_EXTEND: 3956 case ISD::ANY_EXTEND: 3957 Opc = ARMISD::VGETLANEu; 3958 break; 3959 } 3960 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 3961 } 3962 } 3963 3964 return SDValue(); 3965} 3966 3967/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 3968/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 3969static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 3970 const ARMSubtarget *ST) { 3971 // If the target supports NEON, try to use vmax/vmin instructions for f32 3972 // selects like "x < y ? x : y". Unless the FiniteOnlyFPMath option is set, 3973 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 3974 // a NaN; only do the transformation when it matches that behavior. 3975 3976 // For now only do this when using NEON for FP operations; if using VFP, it 3977 // is not obvious that the benefit outweighs the cost of switching to the 3978 // NEON pipeline. 3979 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 3980 N->getValueType(0) != MVT::f32) 3981 return SDValue(); 3982 3983 SDValue CondLHS = N->getOperand(0); 3984 SDValue CondRHS = N->getOperand(1); 3985 SDValue LHS = N->getOperand(2); 3986 SDValue RHS = N->getOperand(3); 3987 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 3988 3989 unsigned Opcode = 0; 3990 bool IsReversed; 3991 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 3992 IsReversed = false; // x CC y ? x : y 3993 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 3994 IsReversed = true ; // x CC y ? y : x 3995 } else { 3996 return SDValue(); 3997 } 3998 3999 bool IsUnordered; 4000 switch (CC) { 4001 default: break; 4002 case ISD::SETOLT: 4003 case ISD::SETOLE: 4004 case ISD::SETLT: 4005 case ISD::SETLE: 4006 case ISD::SETULT: 4007 case ISD::SETULE: 4008 // If LHS is NaN, an ordered comparison will be false and the result will 4009 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 4010 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 4011 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 4012 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 4013 break; 4014 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 4015 // will return -0, so vmin can only be used for unsafe math or if one of 4016 // the operands is known to be nonzero. 4017 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 4018 !UnsafeFPMath && 4019 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 4020 break; 4021 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 4022 break; 4023 4024 case ISD::SETOGT: 4025 case ISD::SETOGE: 4026 case ISD::SETGT: 4027 case ISD::SETGE: 4028 case ISD::SETUGT: 4029 case ISD::SETUGE: 4030 // If LHS is NaN, an ordered comparison will be false and the result will 4031 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 4032 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 4033 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 4034 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 4035 break; 4036 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 4037 // will return +0, so vmax can only be used for unsafe math or if one of 4038 // the operands is known to be nonzero. 4039 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 4040 !UnsafeFPMath && 4041 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 4042 break; 4043 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 4044 break; 4045 } 4046 4047 if (!Opcode) 4048 return SDValue(); 4049 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 4050} 4051 4052SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 4053 DAGCombinerInfo &DCI) const { 4054 switch (N->getOpcode()) { 4055 default: break; 4056 case ISD::ADD: return PerformADDCombine(N, DCI); 4057 case ISD::SUB: return PerformSUBCombine(N, DCI); 4058 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 4059 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 4060 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 4061 case ISD::SHL: 4062 case ISD::SRA: 4063 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 4064 case ISD::SIGN_EXTEND: 4065 case ISD::ZERO_EXTEND: 4066 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 4067 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 4068 } 4069 return SDValue(); 4070} 4071 4072bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { 4073 if (!Subtarget->hasV6Ops()) 4074 // Pre-v6 does not support unaligned mem access. 4075 return false; 4076 else { 4077 // v6+ may or may not support unaligned mem access depending on the system 4078 // configuration. 4079 // FIXME: This is pretty conservative. Should we provide cmdline option to 4080 // control the behaviour? 4081 if (!Subtarget->isTargetDarwin()) 4082 return false; 4083 } 4084 4085 switch (VT.getSimpleVT().SimpleTy) { 4086 default: 4087 return false; 4088 case MVT::i8: 4089 case MVT::i16: 4090 case MVT::i32: 4091 return true; 4092 // FIXME: VLD1 etc with standard alignment is legal. 4093 } 4094} 4095 4096static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 4097 if (V < 0) 4098 return false; 4099 4100 unsigned Scale = 1; 4101 switch (VT.getSimpleVT().SimpleTy) { 4102 default: return false; 4103 case MVT::i1: 4104 case MVT::i8: 4105 // Scale == 1; 4106 break; 4107 case MVT::i16: 4108 // Scale == 2; 4109 Scale = 2; 4110 break; 4111 case MVT::i32: 4112 // Scale == 4; 4113 Scale = 4; 4114 break; 4115 } 4116 4117 if ((V & (Scale - 1)) != 0) 4118 return false; 4119 V /= Scale; 4120 return V == (V & ((1LL << 5) - 1)); 4121} 4122 4123static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 4124 const ARMSubtarget *Subtarget) { 4125 bool isNeg = false; 4126 if (V < 0) { 4127 isNeg = true; 4128 V = - V; 4129 } 4130 4131 switch (VT.getSimpleVT().SimpleTy) { 4132 default: return false; 4133 case MVT::i1: 4134 case MVT::i8: 4135 case MVT::i16: 4136 case MVT::i32: 4137 // + imm12 or - imm8 4138 if (isNeg) 4139 return V == (V & ((1LL << 8) - 1)); 4140 return V == (V & ((1LL << 12) - 1)); 4141 case MVT::f32: 4142 case MVT::f64: 4143 // Same as ARM mode. FIXME: NEON? 4144 if (!Subtarget->hasVFP2()) 4145 return false; 4146 if ((V & 3) != 0) 4147 return false; 4148 V >>= 2; 4149 return V == (V & ((1LL << 8) - 1)); 4150 } 4151} 4152 4153/// isLegalAddressImmediate - Return true if the integer value can be used 4154/// as the offset of the target addressing mode for load / store of the 4155/// given type. 4156static bool isLegalAddressImmediate(int64_t V, EVT VT, 4157 const ARMSubtarget *Subtarget) { 4158 if (V == 0) 4159 return true; 4160 4161 if (!VT.isSimple()) 4162 return false; 4163 4164 if (Subtarget->isThumb1Only()) 4165 return isLegalT1AddressImmediate(V, VT); 4166 else if (Subtarget->isThumb2()) 4167 return isLegalT2AddressImmediate(V, VT, Subtarget); 4168 4169 // ARM mode. 4170 if (V < 0) 4171 V = - V; 4172 switch (VT.getSimpleVT().SimpleTy) { 4173 default: return false; 4174 case MVT::i1: 4175 case MVT::i8: 4176 case MVT::i32: 4177 // +- imm12 4178 return V == (V & ((1LL << 12) - 1)); 4179 case MVT::i16: 4180 // +- imm8 4181 return V == (V & ((1LL << 8) - 1)); 4182 case MVT::f32: 4183 case MVT::f64: 4184 if (!Subtarget->hasVFP2()) // FIXME: NEON? 4185 return false; 4186 if ((V & 3) != 0) 4187 return false; 4188 V >>= 2; 4189 return V == (V & ((1LL << 8) - 1)); 4190 } 4191} 4192 4193bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 4194 EVT VT) const { 4195 int Scale = AM.Scale; 4196 if (Scale < 0) 4197 return false; 4198 4199 switch (VT.getSimpleVT().SimpleTy) { 4200 default: return false; 4201 case MVT::i1: 4202 case MVT::i8: 4203 case MVT::i16: 4204 case MVT::i32: 4205 if (Scale == 1) 4206 return true; 4207 // r + r << imm 4208 Scale = Scale & ~1; 4209 return Scale == 2 || Scale == 4 || Scale == 8; 4210 case MVT::i64: 4211 // r + r 4212 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 4213 return true; 4214 return false; 4215 case MVT::isVoid: 4216 // Note, we allow "void" uses (basically, uses that aren't loads or 4217 // stores), because arm allows folding a scale into many arithmetic 4218 // operations. This should be made more precise and revisited later. 4219 4220 // Allow r << imm, but the imm has to be a multiple of two. 4221 if (Scale & 1) return false; 4222 return isPowerOf2_32(Scale); 4223 } 4224} 4225 4226/// isLegalAddressingMode - Return true if the addressing mode represented 4227/// by AM is legal for this target, for a load/store of the specified type. 4228bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 4229 const Type *Ty) const { 4230 EVT VT = getValueType(Ty, true); 4231 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 4232 return false; 4233 4234 // Can never fold addr of global into load/store. 4235 if (AM.BaseGV) 4236 return false; 4237 4238 switch (AM.Scale) { 4239 case 0: // no scale reg, must be "r+i" or "r", or "i". 4240 break; 4241 case 1: 4242 if (Subtarget->isThumb1Only()) 4243 return false; 4244 // FALL THROUGH. 4245 default: 4246 // ARM doesn't support any R+R*scale+imm addr modes. 4247 if (AM.BaseOffs) 4248 return false; 4249 4250 if (!VT.isSimple()) 4251 return false; 4252 4253 if (Subtarget->isThumb2()) 4254 return isLegalT2ScaledAddressingMode(AM, VT); 4255 4256 int Scale = AM.Scale; 4257 switch (VT.getSimpleVT().SimpleTy) { 4258 default: return false; 4259 case MVT::i1: 4260 case MVT::i8: 4261 case MVT::i32: 4262 if (Scale < 0) Scale = -Scale; 4263 if (Scale == 1) 4264 return true; 4265 // r + r << imm 4266 return isPowerOf2_32(Scale & ~1); 4267 case MVT::i16: 4268 case MVT::i64: 4269 // r + r 4270 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 4271 return true; 4272 return false; 4273 4274 case MVT::isVoid: 4275 // Note, we allow "void" uses (basically, uses that aren't loads or 4276 // stores), because arm allows folding a scale into many arithmetic 4277 // operations. This should be made more precise and revisited later. 4278 4279 // Allow r << imm, but the imm has to be a multiple of two. 4280 if (Scale & 1) return false; 4281 return isPowerOf2_32(Scale); 4282 } 4283 break; 4284 } 4285 return true; 4286} 4287 4288/// isLegalICmpImmediate - Return true if the specified immediate is legal 4289/// icmp immediate, that is the target has icmp instructions which can compare 4290/// a register against the immediate without having to materialize the 4291/// immediate into a register. 4292bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 4293 if (!Subtarget->isThumb()) 4294 return ARM_AM::getSOImmVal(Imm) != -1; 4295 if (Subtarget->isThumb2()) 4296 return ARM_AM::getT2SOImmVal(Imm) != -1; 4297 return Imm >= 0 && Imm <= 255; 4298} 4299 4300static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 4301 bool isSEXTLoad, SDValue &Base, 4302 SDValue &Offset, bool &isInc, 4303 SelectionDAG &DAG) { 4304 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 4305 return false; 4306 4307 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 4308 // AddressingMode 3 4309 Base = Ptr->getOperand(0); 4310 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 4311 int RHSC = (int)RHS->getZExtValue(); 4312 if (RHSC < 0 && RHSC > -256) { 4313 assert(Ptr->getOpcode() == ISD::ADD); 4314 isInc = false; 4315 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 4316 return true; 4317 } 4318 } 4319 isInc = (Ptr->getOpcode() == ISD::ADD); 4320 Offset = Ptr->getOperand(1); 4321 return true; 4322 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 4323 // AddressingMode 2 4324 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 4325 int RHSC = (int)RHS->getZExtValue(); 4326 if (RHSC < 0 && RHSC > -0x1000) { 4327 assert(Ptr->getOpcode() == ISD::ADD); 4328 isInc = false; 4329 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 4330 Base = Ptr->getOperand(0); 4331 return true; 4332 } 4333 } 4334 4335 if (Ptr->getOpcode() == ISD::ADD) { 4336 isInc = true; 4337 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 4338 if (ShOpcVal != ARM_AM::no_shift) { 4339 Base = Ptr->getOperand(1); 4340 Offset = Ptr->getOperand(0); 4341 } else { 4342 Base = Ptr->getOperand(0); 4343 Offset = Ptr->getOperand(1); 4344 } 4345 return true; 4346 } 4347 4348 isInc = (Ptr->getOpcode() == ISD::ADD); 4349 Base = Ptr->getOperand(0); 4350 Offset = Ptr->getOperand(1); 4351 return true; 4352 } 4353 4354 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 4355 return false; 4356} 4357 4358static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 4359 bool isSEXTLoad, SDValue &Base, 4360 SDValue &Offset, bool &isInc, 4361 SelectionDAG &DAG) { 4362 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 4363 return false; 4364 4365 Base = Ptr->getOperand(0); 4366 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 4367 int RHSC = (int)RHS->getZExtValue(); 4368 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 4369 assert(Ptr->getOpcode() == ISD::ADD); 4370 isInc = false; 4371 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 4372 return true; 4373 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 4374 isInc = Ptr->getOpcode() == ISD::ADD; 4375 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 4376 return true; 4377 } 4378 } 4379 4380 return false; 4381} 4382 4383/// getPreIndexedAddressParts - returns true by value, base pointer and 4384/// offset pointer and addressing mode by reference if the node's address 4385/// can be legally represented as pre-indexed load / store address. 4386bool 4387ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 4388 SDValue &Offset, 4389 ISD::MemIndexedMode &AM, 4390 SelectionDAG &DAG) const { 4391 if (Subtarget->isThumb1Only()) 4392 return false; 4393 4394 EVT VT; 4395 SDValue Ptr; 4396 bool isSEXTLoad = false; 4397 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 4398 Ptr = LD->getBasePtr(); 4399 VT = LD->getMemoryVT(); 4400 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 4401 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 4402 Ptr = ST->getBasePtr(); 4403 VT = ST->getMemoryVT(); 4404 } else 4405 return false; 4406 4407 bool isInc; 4408 bool isLegal = false; 4409 if (Subtarget->isThumb2()) 4410 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 4411 Offset, isInc, DAG); 4412 else 4413 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 4414 Offset, isInc, DAG); 4415 if (!isLegal) 4416 return false; 4417 4418 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 4419 return true; 4420} 4421 4422/// getPostIndexedAddressParts - returns true by value, base pointer and 4423/// offset pointer and addressing mode by reference if this node can be 4424/// combined with a load / store to form a post-indexed load / store. 4425bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 4426 SDValue &Base, 4427 SDValue &Offset, 4428 ISD::MemIndexedMode &AM, 4429 SelectionDAG &DAG) const { 4430 if (Subtarget->isThumb1Only()) 4431 return false; 4432 4433 EVT VT; 4434 SDValue Ptr; 4435 bool isSEXTLoad = false; 4436 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 4437 VT = LD->getMemoryVT(); 4438 Ptr = LD->getBasePtr(); 4439 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 4440 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 4441 VT = ST->getMemoryVT(); 4442 Ptr = ST->getBasePtr(); 4443 } else 4444 return false; 4445 4446 bool isInc; 4447 bool isLegal = false; 4448 if (Subtarget->isThumb2()) 4449 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 4450 isInc, DAG); 4451 else 4452 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 4453 isInc, DAG); 4454 if (!isLegal) 4455 return false; 4456 4457 if (Ptr != Base) { 4458 // Swap base ptr and offset to catch more post-index load / store when 4459 // it's legal. In Thumb2 mode, offset must be an immediate. 4460 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 4461 !Subtarget->isThumb2()) 4462 std::swap(Base, Offset); 4463 4464 // Post-indexed load / store update the base pointer. 4465 if (Ptr != Base) 4466 return false; 4467 } 4468 4469 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 4470 return true; 4471} 4472 4473void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 4474 const APInt &Mask, 4475 APInt &KnownZero, 4476 APInt &KnownOne, 4477 const SelectionDAG &DAG, 4478 unsigned Depth) const { 4479 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 4480 switch (Op.getOpcode()) { 4481 default: break; 4482 case ARMISD::CMOV: { 4483 // Bits are known zero/one if known on the LHS and RHS. 4484 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 4485 if (KnownZero == 0 && KnownOne == 0) return; 4486 4487 APInt KnownZeroRHS, KnownOneRHS; 4488 DAG.ComputeMaskedBits(Op.getOperand(1), Mask, 4489 KnownZeroRHS, KnownOneRHS, Depth+1); 4490 KnownZero &= KnownZeroRHS; 4491 KnownOne &= KnownOneRHS; 4492 return; 4493 } 4494 } 4495} 4496 4497//===----------------------------------------------------------------------===// 4498// ARM Inline Assembly Support 4499//===----------------------------------------------------------------------===// 4500 4501/// getConstraintType - Given a constraint letter, return the type of 4502/// constraint it is for this target. 4503ARMTargetLowering::ConstraintType 4504ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 4505 if (Constraint.size() == 1) { 4506 switch (Constraint[0]) { 4507 default: break; 4508 case 'l': return C_RegisterClass; 4509 case 'w': return C_RegisterClass; 4510 } 4511 } 4512 return TargetLowering::getConstraintType(Constraint); 4513} 4514 4515std::pair<unsigned, const TargetRegisterClass*> 4516ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 4517 EVT VT) const { 4518 if (Constraint.size() == 1) { 4519 // GCC ARM Constraint Letters 4520 switch (Constraint[0]) { 4521 case 'l': 4522 if (Subtarget->isThumb()) 4523 return std::make_pair(0U, ARM::tGPRRegisterClass); 4524 else 4525 return std::make_pair(0U, ARM::GPRRegisterClass); 4526 case 'r': 4527 return std::make_pair(0U, ARM::GPRRegisterClass); 4528 case 'w': 4529 if (VT == MVT::f32) 4530 return std::make_pair(0U, ARM::SPRRegisterClass); 4531 if (VT.getSizeInBits() == 64) 4532 return std::make_pair(0U, ARM::DPRRegisterClass); 4533 if (VT.getSizeInBits() == 128) 4534 return std::make_pair(0U, ARM::QPRRegisterClass); 4535 break; 4536 } 4537 } 4538 if (StringRef("{cc}").equals_lower(Constraint)) 4539 return std::make_pair(0U, ARM::CCRRegisterClass); 4540 4541 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 4542} 4543 4544std::vector<unsigned> ARMTargetLowering:: 4545getRegClassForInlineAsmConstraint(const std::string &Constraint, 4546 EVT VT) const { 4547 if (Constraint.size() != 1) 4548 return std::vector<unsigned>(); 4549 4550 switch (Constraint[0]) { // GCC ARM Constraint Letters 4551 default: break; 4552 case 'l': 4553 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 4554 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 4555 0); 4556 case 'r': 4557 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 4558 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 4559 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 4560 ARM::R12, ARM::LR, 0); 4561 case 'w': 4562 if (VT == MVT::f32) 4563 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3, 4564 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 4565 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 4566 ARM::S12,ARM::S13,ARM::S14,ARM::S15, 4567 ARM::S16,ARM::S17,ARM::S18,ARM::S19, 4568 ARM::S20,ARM::S21,ARM::S22,ARM::S23, 4569 ARM::S24,ARM::S25,ARM::S26,ARM::S27, 4570 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); 4571 if (VT.getSizeInBits() == 64) 4572 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, 4573 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 4574 ARM::D8, ARM::D9, ARM::D10,ARM::D11, 4575 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0); 4576 if (VT.getSizeInBits() == 128) 4577 return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 4578 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0); 4579 break; 4580 } 4581 4582 return std::vector<unsigned>(); 4583} 4584 4585/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 4586/// vector. If it is invalid, don't add anything to Ops. 4587void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4588 char Constraint, 4589 bool hasMemory, 4590 std::vector<SDValue>&Ops, 4591 SelectionDAG &DAG) const { 4592 SDValue Result(0, 0); 4593 4594 switch (Constraint) { 4595 default: break; 4596 case 'I': case 'J': case 'K': case 'L': 4597 case 'M': case 'N': case 'O': 4598 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4599 if (!C) 4600 return; 4601 4602 int64_t CVal64 = C->getSExtValue(); 4603 int CVal = (int) CVal64; 4604 // None of these constraints allow values larger than 32 bits. Check 4605 // that the value fits in an int. 4606 if (CVal != CVal64) 4607 return; 4608 4609 switch (Constraint) { 4610 case 'I': 4611 if (Subtarget->isThumb1Only()) { 4612 // This must be a constant between 0 and 255, for ADD 4613 // immediates. 4614 if (CVal >= 0 && CVal <= 255) 4615 break; 4616 } else if (Subtarget->isThumb2()) { 4617 // A constant that can be used as an immediate value in a 4618 // data-processing instruction. 4619 if (ARM_AM::getT2SOImmVal(CVal) != -1) 4620 break; 4621 } else { 4622 // A constant that can be used as an immediate value in a 4623 // data-processing instruction. 4624 if (ARM_AM::getSOImmVal(CVal) != -1) 4625 break; 4626 } 4627 return; 4628 4629 case 'J': 4630 if (Subtarget->isThumb()) { // FIXME thumb2 4631 // This must be a constant between -255 and -1, for negated ADD 4632 // immediates. This can be used in GCC with an "n" modifier that 4633 // prints the negated value, for use with SUB instructions. It is 4634 // not useful otherwise but is implemented for compatibility. 4635 if (CVal >= -255 && CVal <= -1) 4636 break; 4637 } else { 4638 // This must be a constant between -4095 and 4095. It is not clear 4639 // what this constraint is intended for. Implemented for 4640 // compatibility with GCC. 4641 if (CVal >= -4095 && CVal <= 4095) 4642 break; 4643 } 4644 return; 4645 4646 case 'K': 4647 if (Subtarget->isThumb1Only()) { 4648 // A 32-bit value where only one byte has a nonzero value. Exclude 4649 // zero to match GCC. This constraint is used by GCC internally for 4650 // constants that can be loaded with a move/shift combination. 4651 // It is not useful otherwise but is implemented for compatibility. 4652 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 4653 break; 4654 } else if (Subtarget->isThumb2()) { 4655 // A constant whose bitwise inverse can be used as an immediate 4656 // value in a data-processing instruction. This can be used in GCC 4657 // with a "B" modifier that prints the inverted value, for use with 4658 // BIC and MVN instructions. It is not useful otherwise but is 4659 // implemented for compatibility. 4660 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 4661 break; 4662 } else { 4663 // A constant whose bitwise inverse can be used as an immediate 4664 // value in a data-processing instruction. This can be used in GCC 4665 // with a "B" modifier that prints the inverted value, for use with 4666 // BIC and MVN instructions. It is not useful otherwise but is 4667 // implemented for compatibility. 4668 if (ARM_AM::getSOImmVal(~CVal) != -1) 4669 break; 4670 } 4671 return; 4672 4673 case 'L': 4674 if (Subtarget->isThumb1Only()) { 4675 // This must be a constant between -7 and 7, 4676 // for 3-operand ADD/SUB immediate instructions. 4677 if (CVal >= -7 && CVal < 7) 4678 break; 4679 } else if (Subtarget->isThumb2()) { 4680 // A constant whose negation can be used as an immediate value in a 4681 // data-processing instruction. This can be used in GCC with an "n" 4682 // modifier that prints the negated value, for use with SUB 4683 // instructions. It is not useful otherwise but is implemented for 4684 // compatibility. 4685 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 4686 break; 4687 } else { 4688 // A constant whose negation can be used as an immediate value in a 4689 // data-processing instruction. This can be used in GCC with an "n" 4690 // modifier that prints the negated value, for use with SUB 4691 // instructions. It is not useful otherwise but is implemented for 4692 // compatibility. 4693 if (ARM_AM::getSOImmVal(-CVal) != -1) 4694 break; 4695 } 4696 return; 4697 4698 case 'M': 4699 if (Subtarget->isThumb()) { // FIXME thumb2 4700 // This must be a multiple of 4 between 0 and 1020, for 4701 // ADD sp + immediate. 4702 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 4703 break; 4704 } else { 4705 // A power of two or a constant between 0 and 32. This is used in 4706 // GCC for the shift amount on shifted register operands, but it is 4707 // useful in general for any shift amounts. 4708 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 4709 break; 4710 } 4711 return; 4712 4713 case 'N': 4714 if (Subtarget->isThumb()) { // FIXME thumb2 4715 // This must be a constant between 0 and 31, for shift amounts. 4716 if (CVal >= 0 && CVal <= 31) 4717 break; 4718 } 4719 return; 4720 4721 case 'O': 4722 if (Subtarget->isThumb()) { // FIXME thumb2 4723 // This must be a multiple of 4 between -508 and 508, for 4724 // ADD/SUB sp = sp + immediate. 4725 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 4726 break; 4727 } 4728 return; 4729 } 4730 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 4731 break; 4732 } 4733 4734 if (Result.getNode()) { 4735 Ops.push_back(Result); 4736 return; 4737 } 4738 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory, 4739 Ops, DAG); 4740} 4741 4742bool 4743ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 4744 // The ARM target isn't yet aware of offsets. 4745 return false; 4746} 4747 4748int ARM::getVFPf32Imm(const APFloat &FPImm) { 4749 APInt Imm = FPImm.bitcastToAPInt(); 4750 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; 4751 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 4752 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits 4753 4754 // We can handle 4 bits of mantissa. 4755 // mantissa = (16+UInt(e:f:g:h))/16. 4756 if (Mantissa & 0x7ffff) 4757 return -1; 4758 Mantissa >>= 19; 4759 if ((Mantissa & 0xf) != Mantissa) 4760 return -1; 4761 4762 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 4763 if (Exp < -3 || Exp > 4) 4764 return -1; 4765 Exp = ((Exp+3) & 0x7) ^ 4; 4766 4767 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 4768} 4769 4770int ARM::getVFPf64Imm(const APFloat &FPImm) { 4771 APInt Imm = FPImm.bitcastToAPInt(); 4772 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; 4773 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 4774 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL; 4775 4776 // We can handle 4 bits of mantissa. 4777 // mantissa = (16+UInt(e:f:g:h))/16. 4778 if (Mantissa & 0xffffffffffffLL) 4779 return -1; 4780 Mantissa >>= 48; 4781 if ((Mantissa & 0xf) != Mantissa) 4782 return -1; 4783 4784 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 4785 if (Exp < -3 || Exp > 4) 4786 return -1; 4787 Exp = ((Exp+3) & 0x7) ^ 4; 4788 4789 return ((int)Sign << 7) | (Exp << 4) | Mantissa; 4790} 4791 4792/// isFPImmLegal - Returns true if the target can instruction select the 4793/// specified FP immediate natively. If false, the legalizer will 4794/// materialize the FP immediate as a load from a constant pool. 4795bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 4796 if (!Subtarget->hasVFP3()) 4797 return false; 4798 if (VT == MVT::f32) 4799 return ARM::getVFPf32Imm(Imm) != -1; 4800 if (VT == MVT::f64) 4801 return ARM::getVFPf64Imm(Imm) != -1; 4802 return false; 4803} 4804