AArch64ISelLowering.cpp revision 97577757c6dc84233ad10cd432664257e593e76d
1//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that AArch64 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "aarch64-isel" 16#include "AArch64.h" 17#include "AArch64ISelLowering.h" 18#include "AArch64MachineFunctionInfo.h" 19#include "AArch64TargetMachine.h" 20#include "AArch64TargetObjectFile.h" 21#include "Utils/AArch64BaseInfo.h" 22#include "llvm/CodeGen/Analysis.h" 23#include "llvm/CodeGen/CallingConvLower.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28#include "llvm/IR/CallingConv.h" 29 30using namespace llvm; 31 32static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) { 33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 34 35 if (Subtarget->isTargetLinux()) 36 return new AArch64LinuxTargetObjectFile(); 37 if (Subtarget->isTargetELF()) 38 return new TargetLoweringObjectFileELF(); 39 llvm_unreachable("unknown subtarget type"); 40} 41 42AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) 43 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) { 44 45 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 46 47 // SIMD compares set the entire lane's bits to 1 48 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 49 50 // Scalar register <-> type mapping 51 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass); 52 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass); 53 54 if (Subtarget->hasFPARMv8()) { 55 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); 56 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); 57 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); 58 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); 59 } 60 61 if (Subtarget->hasNEON()) { 62 // And the vectors 63 addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass); 64 addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass); 65 addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass); 66 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 67 addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass); 68 addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass); 69 addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass); 70 addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass); 71 addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass); 72 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 73 addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass); 74 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass); 75 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass); 76 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass); 77 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass); 78 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass); 79 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass); 80 } 81 82 computeRegisterProperties(); 83 84 // We combine OR nodes for bitfield and NEON BSL operations. 85 setTargetDAGCombine(ISD::OR); 86 87 setTargetDAGCombine(ISD::AND); 88 setTargetDAGCombine(ISD::SRA); 89 setTargetDAGCombine(ISD::SRL); 90 setTargetDAGCombine(ISD::SHL); 91 92 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 93 setTargetDAGCombine(ISD::INTRINSIC_VOID); 94 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 95 96 // AArch64 does not have i1 loads, or much of anything for i1 really. 97 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 98 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 99 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 100 101 setStackPointerRegisterToSaveRestore(AArch64::XSP); 102 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 103 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 104 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 105 106 // We'll lower globals to wrappers for selection. 107 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 108 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 109 110 // A64 instructions have the comparison predicate attached to the user of the 111 // result, but having a separate comparison is valuable for matching. 112 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 113 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 114 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 115 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 116 117 setOperationAction(ISD::SELECT, MVT::i32, Custom); 118 setOperationAction(ISD::SELECT, MVT::i64, Custom); 119 setOperationAction(ISD::SELECT, MVT::f32, Custom); 120 setOperationAction(ISD::SELECT, MVT::f64, Custom); 121 122 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 123 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 124 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 125 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 126 127 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 128 129 setOperationAction(ISD::SETCC, MVT::i32, Custom); 130 setOperationAction(ISD::SETCC, MVT::i64, Custom); 131 setOperationAction(ISD::SETCC, MVT::f32, Custom); 132 setOperationAction(ISD::SETCC, MVT::f64, Custom); 133 134 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 135 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 136 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 137 138 setOperationAction(ISD::VASTART, MVT::Other, Custom); 139 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 140 setOperationAction(ISD::VAEND, MVT::Other, Expand); 141 setOperationAction(ISD::VAARG, MVT::Other, Expand); 142 143 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 144 145 setOperationAction(ISD::ROTL, MVT::i32, Expand); 146 setOperationAction(ISD::ROTL, MVT::i64, Expand); 147 148 setOperationAction(ISD::UREM, MVT::i32, Expand); 149 setOperationAction(ISD::UREM, MVT::i64, Expand); 150 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 151 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 152 153 setOperationAction(ISD::SREM, MVT::i32, Expand); 154 setOperationAction(ISD::SREM, MVT::i64, Expand); 155 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 156 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 157 158 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 159 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 160 161 // Legal floating-point operations. 162 setOperationAction(ISD::FABS, MVT::f32, Legal); 163 setOperationAction(ISD::FABS, MVT::f64, Legal); 164 165 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 166 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 167 168 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 169 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 170 171 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 172 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 173 174 setOperationAction(ISD::FNEG, MVT::f32, Legal); 175 setOperationAction(ISD::FNEG, MVT::f64, Legal); 176 177 setOperationAction(ISD::FRINT, MVT::f32, Legal); 178 setOperationAction(ISD::FRINT, MVT::f64, Legal); 179 180 setOperationAction(ISD::FSQRT, MVT::f32, Legal); 181 setOperationAction(ISD::FSQRT, MVT::f64, Legal); 182 183 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 184 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 185 186 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 187 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 188 setOperationAction(ISD::ConstantFP, MVT::f128, Legal); 189 190 // Illegal floating-point operations. 191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 192 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 193 194 setOperationAction(ISD::FCOS, MVT::f32, Expand); 195 setOperationAction(ISD::FCOS, MVT::f64, Expand); 196 197 setOperationAction(ISD::FEXP, MVT::f32, Expand); 198 setOperationAction(ISD::FEXP, MVT::f64, Expand); 199 200 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 201 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 202 203 setOperationAction(ISD::FLOG, MVT::f32, Expand); 204 setOperationAction(ISD::FLOG, MVT::f64, Expand); 205 206 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 207 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 208 209 setOperationAction(ISD::FLOG10, MVT::f32, Expand); 210 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 211 212 setOperationAction(ISD::FPOW, MVT::f32, Expand); 213 setOperationAction(ISD::FPOW, MVT::f64, Expand); 214 215 setOperationAction(ISD::FPOWI, MVT::f32, Expand); 216 setOperationAction(ISD::FPOWI, MVT::f64, Expand); 217 218 setOperationAction(ISD::FREM, MVT::f32, Expand); 219 setOperationAction(ISD::FREM, MVT::f64, Expand); 220 221 setOperationAction(ISD::FSIN, MVT::f32, Expand); 222 setOperationAction(ISD::FSIN, MVT::f64, Expand); 223 224 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 225 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 226 227 // Virtually no operation on f128 is legal, but LLVM can't expand them when 228 // there's a valid register class, so we need custom operations in most cases. 229 setOperationAction(ISD::FABS, MVT::f128, Expand); 230 setOperationAction(ISD::FADD, MVT::f128, Custom); 231 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 232 setOperationAction(ISD::FCOS, MVT::f128, Expand); 233 setOperationAction(ISD::FDIV, MVT::f128, Custom); 234 setOperationAction(ISD::FMA, MVT::f128, Expand); 235 setOperationAction(ISD::FMUL, MVT::f128, Custom); 236 setOperationAction(ISD::FNEG, MVT::f128, Expand); 237 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); 238 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand); 239 setOperationAction(ISD::FPOW, MVT::f128, Expand); 240 setOperationAction(ISD::FREM, MVT::f128, Expand); 241 setOperationAction(ISD::FRINT, MVT::f128, Expand); 242 setOperationAction(ISD::FSIN, MVT::f128, Expand); 243 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 244 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 245 setOperationAction(ISD::FSUB, MVT::f128, Custom); 246 setOperationAction(ISD::FTRUNC, MVT::f128, Expand); 247 setOperationAction(ISD::SETCC, MVT::f128, Custom); 248 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 249 setOperationAction(ISD::SELECT, MVT::f128, Expand); 250 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 251 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 252 253 // Lowering for many of the conversions is actually specified by the non-f128 254 // type. The LowerXXX function will be trivial when f128 isn't involved. 255 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 256 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 257 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); 258 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 259 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 260 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); 261 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 262 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 263 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); 264 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 265 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 266 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); 267 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 268 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 269 270 // This prevents LLVM trying to compress double constants into a floating 271 // constant-pool entry and trying to load from there. It's of doubtful benefit 272 // for A64: we'd need LDR followed by FCVT, I believe. 273 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); 274 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 275 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand); 276 277 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 278 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 279 setTruncStoreAction(MVT::f128, MVT::f16, Expand); 280 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 281 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 282 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 283 284 setExceptionPointerRegister(AArch64::X0); 285 setExceptionSelectorRegister(AArch64::X1); 286 287 if (Subtarget->hasNEON()) { 288 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom); 289 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 290 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 291 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom); 292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 293 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 294 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom); 295 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 296 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 297 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 298 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 299 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom); 300 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom); 301 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 302 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom); 303 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 304 305 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 306 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom); 309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom); 311 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 312 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 313 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom); 314 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 315 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom); 316 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 317 318 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal); 319 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 320 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 321 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 322 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 323 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 324 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 325 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal); 326 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal); 327 328 setOperationAction(ISD::SETCC, MVT::v8i8, Custom); 329 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 330 setOperationAction(ISD::SETCC, MVT::v4i16, Custom); 331 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 332 setOperationAction(ISD::SETCC, MVT::v2i32, Custom); 333 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 334 setOperationAction(ISD::SETCC, MVT::v1i64, Custom); 335 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 336 setOperationAction(ISD::SETCC, MVT::v1f32, Custom); 337 setOperationAction(ISD::SETCC, MVT::v2f32, Custom); 338 setOperationAction(ISD::SETCC, MVT::v4f32, Custom); 339 setOperationAction(ISD::SETCC, MVT::v1f64, Custom); 340 setOperationAction(ISD::SETCC, MVT::v2f64, Custom); 341 342 setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal); 343 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 344 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 345 346 setOperationAction(ISD::FCEIL, MVT::v2f32, Legal); 347 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 348 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 349 350 setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal); 351 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 352 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 353 354 setOperationAction(ISD::FRINT, MVT::v2f32, Legal); 355 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 356 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 357 358 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal); 359 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 360 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 361 362 setOperationAction(ISD::FROUND, MVT::v2f32, Legal); 363 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 364 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 365 } 366} 367 368EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 369 // It's reasonably important that this value matches the "natural" legal 370 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself 371 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64). 372 if (!VT.isVector()) return MVT::i32; 373 return VT.changeVectorElementTypeToInteger(); 374} 375 376static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, 377 unsigned &LdrOpc, 378 unsigned &StrOpc) { 379 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, 380 AArch64::LDXR_word, AArch64::LDXR_dword}; 381 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, 382 AArch64::LDAXR_word, AArch64::LDAXR_dword}; 383 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, 384 AArch64::STXR_word, AArch64::STXR_dword}; 385 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword, 386 AArch64::STLXR_word, AArch64::STLXR_dword}; 387 388 const unsigned *LoadOps, *StoreOps; 389 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) 390 LoadOps = LoadAcqs; 391 else 392 LoadOps = LoadBares; 393 394 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent) 395 StoreOps = StoreRels; 396 else 397 StoreOps = StoreBares; 398 399 assert(isPowerOf2_32(Size) && Size <= 8 && 400 "unsupported size for atomic binary op!"); 401 402 LdrOpc = LoadOps[Log2_32(Size)]; 403 StrOpc = StoreOps[Log2_32(Size)]; 404} 405 406MachineBasicBlock * 407AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 408 unsigned Size, 409 unsigned BinOpcode) const { 410 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 411 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 412 413 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 414 MachineFunction *MF = BB->getParent(); 415 MachineFunction::iterator It = BB; 416 ++It; 417 418 unsigned dest = MI->getOperand(0).getReg(); 419 unsigned ptr = MI->getOperand(1).getReg(); 420 unsigned incr = MI->getOperand(2).getReg(); 421 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 422 DebugLoc dl = MI->getDebugLoc(); 423 424 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 425 426 unsigned ldrOpc, strOpc; 427 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 428 429 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 430 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 431 MF->insert(It, loopMBB); 432 MF->insert(It, exitMBB); 433 434 // Transfer the remainder of BB and its successor edges to exitMBB. 435 exitMBB->splice(exitMBB->begin(), BB, 436 llvm::next(MachineBasicBlock::iterator(MI)), 437 BB->end()); 438 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 439 440 const TargetRegisterClass *TRC 441 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; 442 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 443 444 // thisMBB: 445 // ... 446 // fallthrough --> loopMBB 447 BB->addSuccessor(loopMBB); 448 449 // loopMBB: 450 // ldxr dest, ptr 451 // <binop> scratch, dest, incr 452 // stxr stxr_status, scratch, ptr 453 // cbnz stxr_status, loopMBB 454 // fallthrough --> exitMBB 455 BB = loopMBB; 456 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 457 if (BinOpcode) { 458 // All arithmetic operations we'll be creating are designed to take an extra 459 // shift or extend operand, which we can conveniently set to zero. 460 461 // Operand order needs to go the other way for NAND. 462 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl) 463 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 464 .addReg(incr).addReg(dest).addImm(0); 465 else 466 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 467 .addReg(dest).addReg(incr).addImm(0); 468 } 469 470 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp 471 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 472 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 473 474 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr); 475 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 476 .addReg(stxr_status).addMBB(loopMBB); 477 478 BB->addSuccessor(loopMBB); 479 BB->addSuccessor(exitMBB); 480 481 // exitMBB: 482 // ... 483 BB = exitMBB; 484 485 MI->eraseFromParent(); // The instruction is gone now. 486 487 return BB; 488} 489 490MachineBasicBlock * 491AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, 492 MachineBasicBlock *BB, 493 unsigned Size, 494 unsigned CmpOp, 495 A64CC::CondCodes Cond) const { 496 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 497 498 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 499 MachineFunction *MF = BB->getParent(); 500 MachineFunction::iterator It = BB; 501 ++It; 502 503 unsigned dest = MI->getOperand(0).getReg(); 504 unsigned ptr = MI->getOperand(1).getReg(); 505 unsigned incr = MI->getOperand(2).getReg(); 506 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 507 508 unsigned oldval = dest; 509 DebugLoc dl = MI->getDebugLoc(); 510 511 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 512 const TargetRegisterClass *TRC, *TRCsp; 513 if (Size == 8) { 514 TRC = &AArch64::GPR64RegClass; 515 TRCsp = &AArch64::GPR64xspRegClass; 516 } else { 517 TRC = &AArch64::GPR32RegClass; 518 TRCsp = &AArch64::GPR32wspRegClass; 519 } 520 521 unsigned ldrOpc, strOpc; 522 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 523 524 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 525 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 526 MF->insert(It, loopMBB); 527 MF->insert(It, exitMBB); 528 529 // Transfer the remainder of BB and its successor edges to exitMBB. 530 exitMBB->splice(exitMBB->begin(), BB, 531 llvm::next(MachineBasicBlock::iterator(MI)), 532 BB->end()); 533 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 534 535 unsigned scratch = MRI.createVirtualRegister(TRC); 536 MRI.constrainRegClass(scratch, TRCsp); 537 538 // thisMBB: 539 // ... 540 // fallthrough --> loopMBB 541 BB->addSuccessor(loopMBB); 542 543 // loopMBB: 544 // ldxr dest, ptr 545 // cmp incr, dest (, sign extend if necessary) 546 // csel scratch, dest, incr, cond 547 // stxr stxr_status, scratch, ptr 548 // cbnz stxr_status, loopMBB 549 // fallthrough --> exitMBB 550 BB = loopMBB; 551 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 552 553 // Build compare and cmov instructions. 554 MRI.constrainRegClass(incr, TRCsp); 555 BuildMI(BB, dl, TII->get(CmpOp)) 556 .addReg(incr).addReg(oldval).addImm(0); 557 558 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc), 559 scratch) 560 .addReg(oldval).addReg(incr).addImm(Cond); 561 562 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 563 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 564 565 BuildMI(BB, dl, TII->get(strOpc), stxr_status) 566 .addReg(scratch).addReg(ptr); 567 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 568 .addReg(stxr_status).addMBB(loopMBB); 569 570 BB->addSuccessor(loopMBB); 571 BB->addSuccessor(exitMBB); 572 573 // exitMBB: 574 // ... 575 BB = exitMBB; 576 577 MI->eraseFromParent(); // The instruction is gone now. 578 579 return BB; 580} 581 582MachineBasicBlock * 583AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, 584 MachineBasicBlock *BB, 585 unsigned Size) const { 586 unsigned dest = MI->getOperand(0).getReg(); 587 unsigned ptr = MI->getOperand(1).getReg(); 588 unsigned oldval = MI->getOperand(2).getReg(); 589 unsigned newval = MI->getOperand(3).getReg(); 590 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm()); 591 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 592 DebugLoc dl = MI->getDebugLoc(); 593 594 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 595 const TargetRegisterClass *TRCsp; 596 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass; 597 598 unsigned ldrOpc, strOpc; 599 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 600 601 MachineFunction *MF = BB->getParent(); 602 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 603 MachineFunction::iterator It = BB; 604 ++It; // insert the new blocks after the current block 605 606 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 607 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 608 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 609 MF->insert(It, loop1MBB); 610 MF->insert(It, loop2MBB); 611 MF->insert(It, exitMBB); 612 613 // Transfer the remainder of BB and its successor edges to exitMBB. 614 exitMBB->splice(exitMBB->begin(), BB, 615 llvm::next(MachineBasicBlock::iterator(MI)), 616 BB->end()); 617 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 618 619 // thisMBB: 620 // ... 621 // fallthrough --> loop1MBB 622 BB->addSuccessor(loop1MBB); 623 624 // loop1MBB: 625 // ldxr dest, [ptr] 626 // cmp dest, oldval 627 // b.ne exitMBB 628 BB = loop1MBB; 629 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 630 631 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl; 632 MRI.constrainRegClass(dest, TRCsp); 633 BuildMI(BB, dl, TII->get(CmpOp)) 634 .addReg(dest).addReg(oldval).addImm(0); 635 BuildMI(BB, dl, TII->get(AArch64::Bcc)) 636 .addImm(A64CC::NE).addMBB(exitMBB); 637 BB->addSuccessor(loop2MBB); 638 BB->addSuccessor(exitMBB); 639 640 // loop2MBB: 641 // strex stxr_status, newval, [ptr] 642 // cbnz stxr_status, loop1MBB 643 BB = loop2MBB; 644 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 645 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 646 647 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr); 648 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 649 .addReg(stxr_status).addMBB(loop1MBB); 650 BB->addSuccessor(loop1MBB); 651 BB->addSuccessor(exitMBB); 652 653 // exitMBB: 654 // ... 655 BB = exitMBB; 656 657 MI->eraseFromParent(); // The instruction is gone now. 658 659 return BB; 660} 661 662MachineBasicBlock * 663AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, 664 MachineBasicBlock *MBB) const { 665 // We materialise the F128CSEL pseudo-instruction using conditional branches 666 // and loads, giving an instruciton sequence like: 667 // str q0, [sp] 668 // b.ne IfTrue 669 // b Finish 670 // IfTrue: 671 // str q1, [sp] 672 // Finish: 673 // ldr q0, [sp] 674 // 675 // Using virtual registers would probably not be beneficial since COPY 676 // instructions are expensive for f128 (there's no actual instruction to 677 // implement them). 678 // 679 // An alternative would be to do an integer-CSEL on some address. E.g.: 680 // mov x0, sp 681 // add x1, sp, #16 682 // str q0, [x0] 683 // str q1, [x1] 684 // csel x0, x0, x1, ne 685 // ldr q0, [x0] 686 // 687 // It's unclear which approach is actually optimal. 688 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 689 MachineFunction *MF = MBB->getParent(); 690 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 691 DebugLoc DL = MI->getDebugLoc(); 692 MachineFunction::iterator It = MBB; 693 ++It; 694 695 unsigned DestReg = MI->getOperand(0).getReg(); 696 unsigned IfTrueReg = MI->getOperand(1).getReg(); 697 unsigned IfFalseReg = MI->getOperand(2).getReg(); 698 unsigned CondCode = MI->getOperand(3).getImm(); 699 bool NZCVKilled = MI->getOperand(4).isKill(); 700 701 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); 702 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); 703 MF->insert(It, TrueBB); 704 MF->insert(It, EndBB); 705 706 // Transfer rest of current basic-block to EndBB 707 EndBB->splice(EndBB->begin(), MBB, 708 llvm::next(MachineBasicBlock::iterator(MI)), 709 MBB->end()); 710 EndBB->transferSuccessorsAndUpdatePHIs(MBB); 711 712 // We need somewhere to store the f128 value needed. 713 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16); 714 715 // [... start of incoming MBB ...] 716 // str qIFFALSE, [sp] 717 // b.cc IfTrue 718 // b Done 719 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR)) 720 .addReg(IfFalseReg) 721 .addFrameIndex(ScratchFI) 722 .addImm(0); 723 BuildMI(MBB, DL, TII->get(AArch64::Bcc)) 724 .addImm(CondCode) 725 .addMBB(TrueBB); 726 BuildMI(MBB, DL, TII->get(AArch64::Bimm)) 727 .addMBB(EndBB); 728 MBB->addSuccessor(TrueBB); 729 MBB->addSuccessor(EndBB); 730 731 if (!NZCVKilled) { 732 // NZCV is live-through TrueBB. 733 TrueBB->addLiveIn(AArch64::NZCV); 734 EndBB->addLiveIn(AArch64::NZCV); 735 } 736 737 // IfTrue: 738 // str qIFTRUE, [sp] 739 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR)) 740 .addReg(IfTrueReg) 741 .addFrameIndex(ScratchFI) 742 .addImm(0); 743 744 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the 745 // blocks. 746 TrueBB->addSuccessor(EndBB); 747 748 // Done: 749 // ldr qDEST, [sp] 750 // [... rest of incoming MBB ...] 751 MachineInstr *StartOfEnd = EndBB->begin(); 752 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg) 753 .addFrameIndex(ScratchFI) 754 .addImm(0); 755 756 MI->eraseFromParent(); 757 return EndBB; 758} 759 760MachineBasicBlock * 761AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 762 MachineBasicBlock *MBB) const { 763 switch (MI->getOpcode()) { 764 default: llvm_unreachable("Unhandled instruction with custom inserter"); 765 case AArch64::F128CSEL: 766 return EmitF128CSEL(MI, MBB); 767 case AArch64::ATOMIC_LOAD_ADD_I8: 768 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl); 769 case AArch64::ATOMIC_LOAD_ADD_I16: 770 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl); 771 case AArch64::ATOMIC_LOAD_ADD_I32: 772 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl); 773 case AArch64::ATOMIC_LOAD_ADD_I64: 774 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl); 775 776 case AArch64::ATOMIC_LOAD_SUB_I8: 777 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl); 778 case AArch64::ATOMIC_LOAD_SUB_I16: 779 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl); 780 case AArch64::ATOMIC_LOAD_SUB_I32: 781 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl); 782 case AArch64::ATOMIC_LOAD_SUB_I64: 783 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl); 784 785 case AArch64::ATOMIC_LOAD_AND_I8: 786 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl); 787 case AArch64::ATOMIC_LOAD_AND_I16: 788 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl); 789 case AArch64::ATOMIC_LOAD_AND_I32: 790 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl); 791 case AArch64::ATOMIC_LOAD_AND_I64: 792 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl); 793 794 case AArch64::ATOMIC_LOAD_OR_I8: 795 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl); 796 case AArch64::ATOMIC_LOAD_OR_I16: 797 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl); 798 case AArch64::ATOMIC_LOAD_OR_I32: 799 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl); 800 case AArch64::ATOMIC_LOAD_OR_I64: 801 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl); 802 803 case AArch64::ATOMIC_LOAD_XOR_I8: 804 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl); 805 case AArch64::ATOMIC_LOAD_XOR_I16: 806 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl); 807 case AArch64::ATOMIC_LOAD_XOR_I32: 808 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl); 809 case AArch64::ATOMIC_LOAD_XOR_I64: 810 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl); 811 812 case AArch64::ATOMIC_LOAD_NAND_I8: 813 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl); 814 case AArch64::ATOMIC_LOAD_NAND_I16: 815 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl); 816 case AArch64::ATOMIC_LOAD_NAND_I32: 817 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl); 818 case AArch64::ATOMIC_LOAD_NAND_I64: 819 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl); 820 821 case AArch64::ATOMIC_LOAD_MIN_I8: 822 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT); 823 case AArch64::ATOMIC_LOAD_MIN_I16: 824 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT); 825 case AArch64::ATOMIC_LOAD_MIN_I32: 826 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT); 827 case AArch64::ATOMIC_LOAD_MIN_I64: 828 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT); 829 830 case AArch64::ATOMIC_LOAD_MAX_I8: 831 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT); 832 case AArch64::ATOMIC_LOAD_MAX_I16: 833 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT); 834 case AArch64::ATOMIC_LOAD_MAX_I32: 835 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT); 836 case AArch64::ATOMIC_LOAD_MAX_I64: 837 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT); 838 839 case AArch64::ATOMIC_LOAD_UMIN_I8: 840 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI); 841 case AArch64::ATOMIC_LOAD_UMIN_I16: 842 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI); 843 case AArch64::ATOMIC_LOAD_UMIN_I32: 844 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI); 845 case AArch64::ATOMIC_LOAD_UMIN_I64: 846 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI); 847 848 case AArch64::ATOMIC_LOAD_UMAX_I8: 849 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO); 850 case AArch64::ATOMIC_LOAD_UMAX_I16: 851 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO); 852 case AArch64::ATOMIC_LOAD_UMAX_I32: 853 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO); 854 case AArch64::ATOMIC_LOAD_UMAX_I64: 855 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO); 856 857 case AArch64::ATOMIC_SWAP_I8: 858 return emitAtomicBinary(MI, MBB, 1, 0); 859 case AArch64::ATOMIC_SWAP_I16: 860 return emitAtomicBinary(MI, MBB, 2, 0); 861 case AArch64::ATOMIC_SWAP_I32: 862 return emitAtomicBinary(MI, MBB, 4, 0); 863 case AArch64::ATOMIC_SWAP_I64: 864 return emitAtomicBinary(MI, MBB, 8, 0); 865 866 case AArch64::ATOMIC_CMP_SWAP_I8: 867 return emitAtomicCmpSwap(MI, MBB, 1); 868 case AArch64::ATOMIC_CMP_SWAP_I16: 869 return emitAtomicCmpSwap(MI, MBB, 2); 870 case AArch64::ATOMIC_CMP_SWAP_I32: 871 return emitAtomicCmpSwap(MI, MBB, 4); 872 case AArch64::ATOMIC_CMP_SWAP_I64: 873 return emitAtomicCmpSwap(MI, MBB, 8); 874 } 875} 876 877 878const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { 879 switch (Opcode) { 880 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC"; 881 case AArch64ISD::Call: return "AArch64ISD::Call"; 882 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV"; 883 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad"; 884 case AArch64ISD::BFI: return "AArch64ISD::BFI"; 885 case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; 886 case AArch64ISD::Ret: return "AArch64ISD::Ret"; 887 case AArch64ISD::SBFX: return "AArch64ISD::SBFX"; 888 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC"; 889 case AArch64ISD::SETCC: return "AArch64ISD::SETCC"; 890 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; 891 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; 892 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL"; 893 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; 894 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall"; 895 896 case AArch64ISD::NEON_BSL: 897 return "AArch64ISD::NEON_BSL"; 898 case AArch64ISD::NEON_MOVIMM: 899 return "AArch64ISD::NEON_MOVIMM"; 900 case AArch64ISD::NEON_MVNIMM: 901 return "AArch64ISD::NEON_MVNIMM"; 902 case AArch64ISD::NEON_FMOVIMM: 903 return "AArch64ISD::NEON_FMOVIMM"; 904 case AArch64ISD::NEON_CMP: 905 return "AArch64ISD::NEON_CMP"; 906 case AArch64ISD::NEON_CMPZ: 907 return "AArch64ISD::NEON_CMPZ"; 908 case AArch64ISD::NEON_TST: 909 return "AArch64ISD::NEON_TST"; 910 case AArch64ISD::NEON_QSHLs: 911 return "AArch64ISD::NEON_QSHLs"; 912 case AArch64ISD::NEON_QSHLu: 913 return "AArch64ISD::NEON_QSHLu"; 914 case AArch64ISD::NEON_VDUP: 915 return "AArch64ISD::NEON_VDUP"; 916 case AArch64ISD::NEON_VDUPLANE: 917 return "AArch64ISD::NEON_VDUPLANE"; 918 case AArch64ISD::NEON_REV16: 919 return "AArch64ISD::NEON_REV16"; 920 case AArch64ISD::NEON_REV32: 921 return "AArch64ISD::NEON_REV32"; 922 case AArch64ISD::NEON_REV64: 923 return "AArch64ISD::NEON_REV64"; 924 case AArch64ISD::NEON_LD1_UPD: 925 return "AArch64ISD::NEON_LD1_UPD"; 926 case AArch64ISD::NEON_LD2_UPD: 927 return "AArch64ISD::NEON_LD2_UPD"; 928 case AArch64ISD::NEON_LD3_UPD: 929 return "AArch64ISD::NEON_LD3_UPD"; 930 case AArch64ISD::NEON_LD4_UPD: 931 return "AArch64ISD::NEON_LD4_UPD"; 932 case AArch64ISD::NEON_ST1_UPD: 933 return "AArch64ISD::NEON_ST1_UPD"; 934 case AArch64ISD::NEON_ST2_UPD: 935 return "AArch64ISD::NEON_ST2_UPD"; 936 case AArch64ISD::NEON_ST3_UPD: 937 return "AArch64ISD::NEON_ST3_UPD"; 938 case AArch64ISD::NEON_ST4_UPD: 939 return "AArch64ISD::NEON_ST4_UPD"; 940 case AArch64ISD::NEON_LD1x2_UPD: 941 return "AArch64ISD::NEON_LD1x2_UPD"; 942 case AArch64ISD::NEON_LD1x3_UPD: 943 return "AArch64ISD::NEON_LD1x3_UPD"; 944 case AArch64ISD::NEON_LD1x4_UPD: 945 return "AArch64ISD::NEON_LD1x4_UPD"; 946 case AArch64ISD::NEON_ST1x2_UPD: 947 return "AArch64ISD::NEON_ST1x2_UPD"; 948 case AArch64ISD::NEON_ST1x3_UPD: 949 return "AArch64ISD::NEON_ST1x3_UPD"; 950 case AArch64ISD::NEON_ST1x4_UPD: 951 return "AArch64ISD::NEON_ST1x4_UPD"; 952 case AArch64ISD::NEON_VEXTRACT: 953 return "AArch64ISD::NEON_VEXTRACT"; 954 default: 955 return NULL; 956 } 957} 958 959static const uint16_t AArch64FPRArgRegs[] = { 960 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, 961 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7 962}; 963static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs); 964 965static const uint16_t AArch64ArgRegs[] = { 966 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, 967 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7 968}; 969static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs); 970 971static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 972 CCValAssign::LocInfo LocInfo, 973 ISD::ArgFlagsTy ArgFlags, CCState &State) { 974 // Mark all remaining general purpose registers as allocated. We don't 975 // backtrack: if (for example) an i128 gets put on the stack, no subsequent 976 // i64 will go in registers (C.11). 977 for (unsigned i = 0; i < NumArgRegs; ++i) 978 State.AllocateReg(AArch64ArgRegs[i]); 979 980 return false; 981} 982 983#include "AArch64GenCallingConv.inc" 984 985CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const { 986 987 switch(CC) { 988 default: llvm_unreachable("Unsupported calling convention"); 989 case CallingConv::Fast: 990 case CallingConv::C: 991 return CC_A64_APCS; 992 } 993} 994 995void 996AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, 997 SDLoc DL, SDValue &Chain) const { 998 MachineFunction &MF = DAG.getMachineFunction(); 999 MachineFrameInfo *MFI = MF.getFrameInfo(); 1000 AArch64MachineFunctionInfo *FuncInfo 1001 = MF.getInfo<AArch64MachineFunctionInfo>(); 1002 1003 SmallVector<SDValue, 8> MemOps; 1004 1005 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs, 1006 NumArgRegs); 1007 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs, 1008 NumFPRArgRegs); 1009 1010 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR); 1011 int GPRIdx = 0; 1012 if (GPRSaveSize != 0) { 1013 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false); 1014 1015 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy()); 1016 1017 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) { 1018 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass); 1019 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 1020 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1021 MachinePointerInfo::getStack(i * 8), 1022 false, false, 0); 1023 MemOps.push_back(Store); 1024 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1025 DAG.getConstant(8, getPointerTy())); 1026 } 1027 } 1028 1029 if (getSubtarget()->hasFPARMv8()) { 1030 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); 1031 int FPRIdx = 0; 1032 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we 1033 // can omit a register save area if we know we'll never use registers of 1034 // that class. 1035 if (FPRSaveSize != 0) { 1036 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false); 1037 1038 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy()); 1039 1040 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { 1041 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i], 1042 &AArch64::FPR128RegClass); 1043 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); 1044 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1045 MachinePointerInfo::getStack(i * 16), 1046 false, false, 0); 1047 MemOps.push_back(Store); 1048 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1049 DAG.getConstant(16, getPointerTy())); 1050 } 1051 } 1052 FuncInfo->setVariadicFPRIdx(FPRIdx); 1053 FuncInfo->setVariadicFPRSize(FPRSaveSize); 1054 } 1055 1056 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true); 1057 1058 FuncInfo->setVariadicStackIdx(StackIdx); 1059 FuncInfo->setVariadicGPRIdx(GPRIdx); 1060 FuncInfo->setVariadicGPRSize(GPRSaveSize); 1061 1062 if (!MemOps.empty()) { 1063 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 1064 MemOps.size()); 1065 } 1066} 1067 1068 1069SDValue 1070AArch64TargetLowering::LowerFormalArguments(SDValue Chain, 1071 CallingConv::ID CallConv, bool isVarArg, 1072 const SmallVectorImpl<ISD::InputArg> &Ins, 1073 SDLoc dl, SelectionDAG &DAG, 1074 SmallVectorImpl<SDValue> &InVals) const { 1075 MachineFunction &MF = DAG.getMachineFunction(); 1076 AArch64MachineFunctionInfo *FuncInfo 1077 = MF.getInfo<AArch64MachineFunctionInfo>(); 1078 MachineFrameInfo *MFI = MF.getFrameInfo(); 1079 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1080 1081 SmallVector<CCValAssign, 16> ArgLocs; 1082 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1083 getTargetMachine(), ArgLocs, *DAG.getContext()); 1084 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv)); 1085 1086 SmallVector<SDValue, 16> ArgValues; 1087 1088 SDValue ArgValue; 1089 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1090 CCValAssign &VA = ArgLocs[i]; 1091 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1092 1093 if (Flags.isByVal()) { 1094 // Byval is used for small structs and HFAs in the PCS, but the system 1095 // should work in a non-compliant manner for larger structs. 1096 EVT PtrTy = getPointerTy(); 1097 int Size = Flags.getByValSize(); 1098 unsigned NumRegs = (Size + 7) / 8; 1099 1100 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs, 1101 VA.getLocMemOffset(), 1102 false); 1103 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy); 1104 InVals.push_back(FrameIdxN); 1105 1106 continue; 1107 } else if (VA.isRegLoc()) { 1108 MVT RegVT = VA.getLocVT(); 1109 const TargetRegisterClass *RC = getRegClassFor(RegVT); 1110 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1111 1112 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1113 } else { // VA.isRegLoc() 1114 assert(VA.isMemLoc()); 1115 1116 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 1117 VA.getLocMemOffset(), true); 1118 1119 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1120 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1121 MachinePointerInfo::getFixedStack(FI), 1122 false, false, false, 0); 1123 1124 1125 } 1126 1127 switch (VA.getLocInfo()) { 1128 default: llvm_unreachable("Unknown loc info!"); 1129 case CCValAssign::Full: break; 1130 case CCValAssign::BCvt: 1131 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue); 1132 break; 1133 case CCValAssign::SExt: 1134 case CCValAssign::ZExt: 1135 case CCValAssign::AExt: { 1136 unsigned DestSize = VA.getValVT().getSizeInBits(); 1137 unsigned DestSubReg; 1138 1139 switch (DestSize) { 1140 case 8: DestSubReg = AArch64::sub_8; break; 1141 case 16: DestSubReg = AArch64::sub_16; break; 1142 case 32: DestSubReg = AArch64::sub_32; break; 1143 case 64: DestSubReg = AArch64::sub_64; break; 1144 default: llvm_unreachable("Unexpected argument promotion"); 1145 } 1146 1147 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, 1148 VA.getValVT(), ArgValue, 1149 DAG.getTargetConstant(DestSubReg, MVT::i32)), 1150 0); 1151 break; 1152 } 1153 } 1154 1155 InVals.push_back(ArgValue); 1156 } 1157 1158 if (isVarArg) 1159 SaveVarArgRegisters(CCInfo, DAG, dl, Chain); 1160 1161 unsigned StackArgSize = CCInfo.getNextStackOffset(); 1162 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { 1163 // This is a non-standard ABI so by fiat I say we're allowed to make full 1164 // use of the stack area to be popped, which must be aligned to 16 bytes in 1165 // any case: 1166 StackArgSize = RoundUpToAlignment(StackArgSize, 16); 1167 1168 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding 1169 // a multiple of 16. 1170 FuncInfo->setArgumentStackToRestore(StackArgSize); 1171 1172 // This realignment carries over to the available bytes below. Our own 1173 // callers will guarantee the space is free by giving an aligned value to 1174 // CALLSEQ_START. 1175 } 1176 // Even if we're not expected to free up the space, it's useful to know how 1177 // much is there while considering tail calls (because we can reuse it). 1178 FuncInfo->setBytesInStackArgArea(StackArgSize); 1179 1180 return Chain; 1181} 1182 1183SDValue 1184AArch64TargetLowering::LowerReturn(SDValue Chain, 1185 CallingConv::ID CallConv, bool isVarArg, 1186 const SmallVectorImpl<ISD::OutputArg> &Outs, 1187 const SmallVectorImpl<SDValue> &OutVals, 1188 SDLoc dl, SelectionDAG &DAG) const { 1189 // CCValAssign - represent the assignment of the return value to a location. 1190 SmallVector<CCValAssign, 16> RVLocs; 1191 1192 // CCState - Info about the registers and stack slots. 1193 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1194 getTargetMachine(), RVLocs, *DAG.getContext()); 1195 1196 // Analyze outgoing return values. 1197 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv)); 1198 1199 SDValue Flag; 1200 SmallVector<SDValue, 4> RetOps(1, Chain); 1201 1202 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1203 // PCS: "If the type, T, of the result of a function is such that 1204 // void func(T arg) would require that arg be passed as a value in a 1205 // register (or set of registers) according to the rules in 5.4, then the 1206 // result is returned in the same registers as would be used for such an 1207 // argument. 1208 // 1209 // Otherwise, the caller shall reserve a block of memory of sufficient 1210 // size and alignment to hold the result. The address of the memory block 1211 // shall be passed as an additional argument to the function in x8." 1212 // 1213 // This is implemented in two places. The register-return values are dealt 1214 // with here, more complex returns are passed as an sret parameter, which 1215 // means we don't have to worry about it during actual return. 1216 CCValAssign &VA = RVLocs[i]; 1217 assert(VA.isRegLoc() && "Only register-returns should be created by PCS"); 1218 1219 1220 SDValue Arg = OutVals[i]; 1221 1222 // There's no convenient note in the ABI about this as there is for normal 1223 // arguments, but it says return values are passed in the same registers as 1224 // an argument would be. I believe that includes the comments about 1225 // unspecified higher bits, putting the burden of widening on the *caller* 1226 // for return values. 1227 switch (VA.getLocInfo()) { 1228 default: llvm_unreachable("Unknown loc info"); 1229 case CCValAssign::Full: break; 1230 case CCValAssign::SExt: 1231 case CCValAssign::ZExt: 1232 case CCValAssign::AExt: 1233 // Floating-point values should only be extended when they're going into 1234 // memory, which can't happen here so an integer extend is acceptable. 1235 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1236 break; 1237 case CCValAssign::BCvt: 1238 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1239 break; 1240 } 1241 1242 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1243 Flag = Chain.getValue(1); 1244 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1245 } 1246 1247 RetOps[0] = Chain; // Update chain. 1248 1249 // Add the flag if we have it. 1250 if (Flag.getNode()) 1251 RetOps.push_back(Flag); 1252 1253 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other, 1254 &RetOps[0], RetOps.size()); 1255} 1256 1257SDValue 1258AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, 1259 SmallVectorImpl<SDValue> &InVals) const { 1260 SelectionDAG &DAG = CLI.DAG; 1261 SDLoc &dl = CLI.DL; 1262 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1263 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1264 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1265 SDValue Chain = CLI.Chain; 1266 SDValue Callee = CLI.Callee; 1267 bool &IsTailCall = CLI.IsTailCall; 1268 CallingConv::ID CallConv = CLI.CallConv; 1269 bool IsVarArg = CLI.IsVarArg; 1270 1271 MachineFunction &MF = DAG.getMachineFunction(); 1272 AArch64MachineFunctionInfo *FuncInfo 1273 = MF.getInfo<AArch64MachineFunctionInfo>(); 1274 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1275 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet(); 1276 bool IsSibCall = false; 1277 1278 if (IsTailCall) { 1279 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1280 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1281 Outs, OutVals, Ins, DAG); 1282 1283 // A sibling call is one where we're under the usual C ABI and not planning 1284 // to change that but can still do a tail call: 1285 if (!TailCallOpt && IsTailCall) 1286 IsSibCall = true; 1287 } 1288 1289 SmallVector<CCValAssign, 16> ArgLocs; 1290 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1291 getTargetMachine(), ArgLocs, *DAG.getContext()); 1292 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv)); 1293 1294 // On AArch64 (and all other architectures I'm aware of) the most this has to 1295 // do is adjust the stack pointer. 1296 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16); 1297 if (IsSibCall) { 1298 // Since we're not changing the ABI to make this a tail call, the memory 1299 // operands are already available in the caller's incoming argument space. 1300 NumBytes = 0; 1301 } 1302 1303 // FPDiff is the byte offset of the call's argument area from the callee's. 1304 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1305 // by this amount for a tail call. In a sibling call it must be 0 because the 1306 // caller will deallocate the entire stack and the callee still expects its 1307 // arguments to begin at SP+0. Completely unused for non-tail calls. 1308 int FPDiff = 0; 1309 1310 if (IsTailCall && !IsSibCall) { 1311 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1312 1313 // FPDiff will be negative if this tail call requires more space than we 1314 // would automatically have in our incoming argument space. Positive if we 1315 // can actually shrink the stack. 1316 FPDiff = NumReusableBytes - NumBytes; 1317 1318 // The stack pointer must be 16-byte aligned at all times it's used for a 1319 // memory operation, which in practice means at *all* times and in 1320 // particular across call boundaries. Therefore our own arguments started at 1321 // a 16-byte aligned SP and the delta applied for the tail call should 1322 // satisfy the same constraint. 1323 assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); 1324 } 1325 1326 if (!IsSibCall) 1327 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 1328 dl); 1329 1330 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP, 1331 getPointerTy()); 1332 1333 SmallVector<SDValue, 8> MemOpChains; 1334 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1335 1336 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1337 CCValAssign &VA = ArgLocs[i]; 1338 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1339 SDValue Arg = OutVals[i]; 1340 1341 // Callee does the actual widening, so all extensions just use an implicit 1342 // definition of the rest of the Loc. Aesthetically, this would be nicer as 1343 // an ANY_EXTEND, but that isn't valid for floating-point types and this 1344 // alternative works on integer types too. 1345 switch (VA.getLocInfo()) { 1346 default: llvm_unreachable("Unknown loc info!"); 1347 case CCValAssign::Full: break; 1348 case CCValAssign::SExt: 1349 case CCValAssign::ZExt: 1350 case CCValAssign::AExt: { 1351 unsigned SrcSize = VA.getValVT().getSizeInBits(); 1352 unsigned SrcSubReg; 1353 1354 switch (SrcSize) { 1355 case 8: SrcSubReg = AArch64::sub_8; break; 1356 case 16: SrcSubReg = AArch64::sub_16; break; 1357 case 32: SrcSubReg = AArch64::sub_32; break; 1358 case 64: SrcSubReg = AArch64::sub_64; break; 1359 default: llvm_unreachable("Unexpected argument promotion"); 1360 } 1361 1362 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 1363 VA.getLocVT(), 1364 DAG.getUNDEF(VA.getLocVT()), 1365 Arg, 1366 DAG.getTargetConstant(SrcSubReg, MVT::i32)), 1367 0); 1368 1369 break; 1370 } 1371 case CCValAssign::BCvt: 1372 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1373 break; 1374 } 1375 1376 if (VA.isRegLoc()) { 1377 // A normal register (sub-) argument. For now we just note it down because 1378 // we want to copy things into registers as late as possible to avoid 1379 // register-pressure (and possibly worse). 1380 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1381 continue; 1382 } 1383 1384 assert(VA.isMemLoc() && "unexpected argument location"); 1385 1386 SDValue DstAddr; 1387 MachinePointerInfo DstInfo; 1388 if (IsTailCall) { 1389 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() : 1390 VA.getLocVT().getSizeInBits(); 1391 OpSize = (OpSize + 7) / 8; 1392 int32_t Offset = VA.getLocMemOffset() + FPDiff; 1393 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 1394 1395 DstAddr = DAG.getFrameIndex(FI, getPointerTy()); 1396 DstInfo = MachinePointerInfo::getFixedStack(FI); 1397 1398 // Make sure any stack arguments overlapping with where we're storing are 1399 // loaded before this eventual operation. Otherwise they'll be clobbered. 1400 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); 1401 } else { 1402 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1403 1404 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1405 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset()); 1406 } 1407 1408 if (Flags.isByVal()) { 1409 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64); 1410 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode, 1411 Flags.getByValAlign(), 1412 /*isVolatile = */ false, 1413 /*alwaysInline = */ false, 1414 DstInfo, MachinePointerInfo(0)); 1415 MemOpChains.push_back(Cpy); 1416 } else { 1417 // Normal stack argument, put it where it's needed. 1418 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo, 1419 false, false, 0); 1420 MemOpChains.push_back(Store); 1421 } 1422 } 1423 1424 // The loads and stores generated above shouldn't clash with each 1425 // other. Combining them with this TokenFactor notes that fact for the rest of 1426 // the backend. 1427 if (!MemOpChains.empty()) 1428 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1429 &MemOpChains[0], MemOpChains.size()); 1430 1431 // Most of the rest of the instructions need to be glued together; we don't 1432 // want assignments to actual registers used by a call to be rearranged by a 1433 // well-meaning scheduler. 1434 SDValue InFlag; 1435 1436 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1437 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1438 RegsToPass[i].second, InFlag); 1439 InFlag = Chain.getValue(1); 1440 } 1441 1442 // The linker is responsible for inserting veneers when necessary to put a 1443 // function call destination in range, so we don't need to bother with a 1444 // wrapper here. 1445 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1446 const GlobalValue *GV = G->getGlobal(); 1447 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy()); 1448 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1449 const char *Sym = S->getSymbol(); 1450 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 1451 } 1452 1453 // We don't usually want to end the call-sequence here because we would tidy 1454 // the frame up *after* the call, however in the ABI-changing tail-call case 1455 // we've carefully laid out the parameters so that when sp is reset they'll be 1456 // in the correct location. 1457 if (IsTailCall && !IsSibCall) { 1458 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1459 DAG.getIntPtrConstant(0, true), InFlag, dl); 1460 InFlag = Chain.getValue(1); 1461 } 1462 1463 // We produce the following DAG scheme for the actual call instruction: 1464 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag? 1465 // 1466 // Most arguments aren't going to be used and just keep the values live as 1467 // far as LLVM is concerned. It's expected to be selected as simply "bl 1468 // callee" (for a direct, non-tail call). 1469 std::vector<SDValue> Ops; 1470 Ops.push_back(Chain); 1471 Ops.push_back(Callee); 1472 1473 if (IsTailCall) { 1474 // Each tail call may have to adjust the stack by a different amount, so 1475 // this information must travel along with the operation for eventual 1476 // consumption by emitEpilogue. 1477 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32)); 1478 } 1479 1480 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1481 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1482 RegsToPass[i].second.getValueType())); 1483 1484 1485 // Add a register mask operand representing the call-preserved registers. This 1486 // is used later in codegen to constrain register-allocation. 1487 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1488 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 1489 assert(Mask && "Missing call preserved mask for calling convention"); 1490 Ops.push_back(DAG.getRegisterMask(Mask)); 1491 1492 // If we needed glue, put it in as the last argument. 1493 if (InFlag.getNode()) 1494 Ops.push_back(InFlag); 1495 1496 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1497 1498 if (IsTailCall) { 1499 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1500 } 1501 1502 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size()); 1503 InFlag = Chain.getValue(1); 1504 1505 // Now we can reclaim the stack, just as well do it before working out where 1506 // our return value is. 1507 if (!IsSibCall) { 1508 uint64_t CalleePopBytes 1509 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0; 1510 1511 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1512 DAG.getIntPtrConstant(CalleePopBytes, true), 1513 InFlag, dl); 1514 InFlag = Chain.getValue(1); 1515 } 1516 1517 return LowerCallResult(Chain, InFlag, CallConv, 1518 IsVarArg, Ins, dl, DAG, InVals); 1519} 1520 1521SDValue 1522AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1523 CallingConv::ID CallConv, bool IsVarArg, 1524 const SmallVectorImpl<ISD::InputArg> &Ins, 1525 SDLoc dl, SelectionDAG &DAG, 1526 SmallVectorImpl<SDValue> &InVals) const { 1527 // Assign locations to each value returned by this call. 1528 SmallVector<CCValAssign, 16> RVLocs; 1529 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1530 getTargetMachine(), RVLocs, *DAG.getContext()); 1531 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv)); 1532 1533 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1534 CCValAssign VA = RVLocs[i]; 1535 1536 // Return values that are too big to fit into registers should use an sret 1537 // pointer, so this can be a lot simpler than the main argument code. 1538 assert(VA.isRegLoc() && "Memory locations not expected for call return"); 1539 1540 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1541 InFlag); 1542 Chain = Val.getValue(1); 1543 InFlag = Val.getValue(2); 1544 1545 switch (VA.getLocInfo()) { 1546 default: llvm_unreachable("Unknown loc info!"); 1547 case CCValAssign::Full: break; 1548 case CCValAssign::BCvt: 1549 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1550 break; 1551 case CCValAssign::ZExt: 1552 case CCValAssign::SExt: 1553 case CCValAssign::AExt: 1554 // Floating-point arguments only get extended/truncated if they're going 1555 // in memory, so using the integer operation is acceptable here. 1556 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 1557 break; 1558 } 1559 1560 InVals.push_back(Val); 1561 } 1562 1563 return Chain; 1564} 1565 1566bool 1567AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1568 CallingConv::ID CalleeCC, 1569 bool IsVarArg, 1570 bool IsCalleeStructRet, 1571 bool IsCallerStructRet, 1572 const SmallVectorImpl<ISD::OutputArg> &Outs, 1573 const SmallVectorImpl<SDValue> &OutVals, 1574 const SmallVectorImpl<ISD::InputArg> &Ins, 1575 SelectionDAG& DAG) const { 1576 1577 // For CallingConv::C this function knows whether the ABI needs 1578 // changing. That's not true for other conventions so they will have to opt in 1579 // manually. 1580 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1581 return false; 1582 1583 const MachineFunction &MF = DAG.getMachineFunction(); 1584 const Function *CallerF = MF.getFunction(); 1585 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1586 bool CCMatch = CallerCC == CalleeCC; 1587 1588 // Byval parameters hand the function a pointer directly into the stack area 1589 // we want to reuse during a tail call. Working around this *is* possible (see 1590 // X86) but less efficient and uglier in LowerCall. 1591 for (Function::const_arg_iterator i = CallerF->arg_begin(), 1592 e = CallerF->arg_end(); i != e; ++i) 1593 if (i->hasByValAttr()) 1594 return false; 1595 1596 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 1597 if (IsTailCallConvention(CalleeCC) && CCMatch) 1598 return true; 1599 return false; 1600 } 1601 1602 // Now we search for cases where we can use a tail call without changing the 1603 // ABI. Sibcall is used in some places (particularly gcc) to refer to this 1604 // concept. 1605 1606 // I want anyone implementing a new calling convention to think long and hard 1607 // about this assert. 1608 assert((!IsVarArg || CalleeCC == CallingConv::C) 1609 && "Unexpected variadic calling convention"); 1610 1611 if (IsVarArg && !Outs.empty()) { 1612 // At least two cases here: if caller is fastcc then we can't have any 1613 // memory arguments (we'd be expected to clean up the stack afterwards). If 1614 // caller is C then we could potentially use its argument area. 1615 1616 // FIXME: for now we take the most conservative of these in both cases: 1617 // disallow all variadic memory operands. 1618 SmallVector<CCValAssign, 16> ArgLocs; 1619 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1620 getTargetMachine(), ArgLocs, *DAG.getContext()); 1621 1622 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1623 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 1624 if (!ArgLocs[i].isRegLoc()) 1625 return false; 1626 } 1627 1628 // If the calling conventions do not match, then we'd better make sure the 1629 // results are returned in the same way as what the caller expects. 1630 if (!CCMatch) { 1631 SmallVector<CCValAssign, 16> RVLocs1; 1632 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1633 getTargetMachine(), RVLocs1, *DAG.getContext()); 1634 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC)); 1635 1636 SmallVector<CCValAssign, 16> RVLocs2; 1637 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1638 getTargetMachine(), RVLocs2, *DAG.getContext()); 1639 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC)); 1640 1641 if (RVLocs1.size() != RVLocs2.size()) 1642 return false; 1643 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1644 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1645 return false; 1646 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1647 return false; 1648 if (RVLocs1[i].isRegLoc()) { 1649 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1650 return false; 1651 } else { 1652 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1653 return false; 1654 } 1655 } 1656 } 1657 1658 // Nothing more to check if the callee is taking no arguments 1659 if (Outs.empty()) 1660 return true; 1661 1662 SmallVector<CCValAssign, 16> ArgLocs; 1663 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1664 getTargetMachine(), ArgLocs, *DAG.getContext()); 1665 1666 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1667 1668 const AArch64MachineFunctionInfo *FuncInfo 1669 = MF.getInfo<AArch64MachineFunctionInfo>(); 1670 1671 // If the stack arguments for this call would fit into our own save area then 1672 // the call can be made tail. 1673 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea(); 1674} 1675 1676bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, 1677 bool TailCallOpt) const { 1678 return CallCC == CallingConv::Fast && TailCallOpt; 1679} 1680 1681bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const { 1682 return CallCC == CallingConv::Fast; 1683} 1684 1685SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, 1686 SelectionDAG &DAG, 1687 MachineFrameInfo *MFI, 1688 int ClobberedFI) const { 1689 SmallVector<SDValue, 8> ArgChains; 1690 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI); 1691 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1; 1692 1693 // Include the original chain at the beginning of the list. When this is 1694 // used by target LowerCall hooks, this helps legalize find the 1695 // CALLSEQ_BEGIN node. 1696 ArgChains.push_back(Chain); 1697 1698 // Add a chain value for each stack argument corresponding 1699 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1700 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U) 1701 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 1702 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 1703 if (FI->getIndex() < 0) { 1704 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex()); 1705 int64_t InLastByte = InFirstByte; 1706 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1; 1707 1708 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1709 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1710 ArgChains.push_back(SDValue(L, 1)); 1711 } 1712 1713 // Build a tokenfactor for all the chains. 1714 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, 1715 &ArgChains[0], ArgChains.size()); 1716} 1717 1718static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) { 1719 switch (CC) { 1720 case ISD::SETEQ: return A64CC::EQ; 1721 case ISD::SETGT: return A64CC::GT; 1722 case ISD::SETGE: return A64CC::GE; 1723 case ISD::SETLT: return A64CC::LT; 1724 case ISD::SETLE: return A64CC::LE; 1725 case ISD::SETNE: return A64CC::NE; 1726 case ISD::SETUGT: return A64CC::HI; 1727 case ISD::SETUGE: return A64CC::HS; 1728 case ISD::SETULT: return A64CC::LO; 1729 case ISD::SETULE: return A64CC::LS; 1730 default: llvm_unreachable("Unexpected condition code"); 1731 } 1732} 1733 1734bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const { 1735 // icmp is implemented using adds/subs immediate, which take an unsigned 1736 // 12-bit immediate, optionally shifted left by 12 bits. 1737 1738 // Symmetric by using adds/subs 1739 if (Val < 0) 1740 Val = -Val; 1741 1742 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0; 1743} 1744 1745SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS, 1746 ISD::CondCode CC, SDValue &A64cc, 1747 SelectionDAG &DAG, SDLoc &dl) const { 1748 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 1749 int64_t C = 0; 1750 EVT VT = RHSC->getValueType(0); 1751 bool knownInvalid = false; 1752 1753 // I'm not convinced the rest of LLVM handles these edge cases properly, but 1754 // we can at least get it right. 1755 if (isSignedIntSetCC(CC)) { 1756 C = RHSC->getSExtValue(); 1757 } else if (RHSC->getZExtValue() > INT64_MAX) { 1758 // A 64-bit constant not representable by a signed 64-bit integer is far 1759 // too big to fit into a SUBS immediate anyway. 1760 knownInvalid = true; 1761 } else { 1762 C = RHSC->getZExtValue(); 1763 } 1764 1765 if (!knownInvalid && !isLegalICmpImmediate(C)) { 1766 // Constant does not fit, try adjusting it by one? 1767 switch (CC) { 1768 default: break; 1769 case ISD::SETLT: 1770 case ISD::SETGE: 1771 if (isLegalICmpImmediate(C-1)) { 1772 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 1773 RHS = DAG.getConstant(C-1, VT); 1774 } 1775 break; 1776 case ISD::SETULT: 1777 case ISD::SETUGE: 1778 if (isLegalICmpImmediate(C-1)) { 1779 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 1780 RHS = DAG.getConstant(C-1, VT); 1781 } 1782 break; 1783 case ISD::SETLE: 1784 case ISD::SETGT: 1785 if (isLegalICmpImmediate(C+1)) { 1786 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 1787 RHS = DAG.getConstant(C+1, VT); 1788 } 1789 break; 1790 case ISD::SETULE: 1791 case ISD::SETUGT: 1792 if (isLegalICmpImmediate(C+1)) { 1793 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 1794 RHS = DAG.getConstant(C+1, VT); 1795 } 1796 break; 1797 } 1798 } 1799 } 1800 1801 A64CC::CondCodes CondCode = IntCCToA64CC(CC); 1802 A64cc = DAG.getConstant(CondCode, MVT::i32); 1803 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1804 DAG.getCondCode(CC)); 1805} 1806 1807static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC, 1808 A64CC::CondCodes &Alternative) { 1809 A64CC::CondCodes CondCode = A64CC::Invalid; 1810 Alternative = A64CC::Invalid; 1811 1812 switch (CC) { 1813 default: llvm_unreachable("Unknown FP condition!"); 1814 case ISD::SETEQ: 1815 case ISD::SETOEQ: CondCode = A64CC::EQ; break; 1816 case ISD::SETGT: 1817 case ISD::SETOGT: CondCode = A64CC::GT; break; 1818 case ISD::SETGE: 1819 case ISD::SETOGE: CondCode = A64CC::GE; break; 1820 case ISD::SETOLT: CondCode = A64CC::MI; break; 1821 case ISD::SETOLE: CondCode = A64CC::LS; break; 1822 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break; 1823 case ISD::SETO: CondCode = A64CC::VC; break; 1824 case ISD::SETUO: CondCode = A64CC::VS; break; 1825 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break; 1826 case ISD::SETUGT: CondCode = A64CC::HI; break; 1827 case ISD::SETUGE: CondCode = A64CC::PL; break; 1828 case ISD::SETLT: 1829 case ISD::SETULT: CondCode = A64CC::LT; break; 1830 case ISD::SETLE: 1831 case ISD::SETULE: CondCode = A64CC::LE; break; 1832 case ISD::SETNE: 1833 case ISD::SETUNE: CondCode = A64CC::NE; break; 1834 } 1835 return CondCode; 1836} 1837 1838SDValue 1839AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 1840 SDLoc DL(Op); 1841 EVT PtrVT = getPointerTy(); 1842 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1843 1844 switch(getTargetMachine().getCodeModel()) { 1845 case CodeModel::Small: 1846 // The most efficient code is PC-relative anyway for the small memory model, 1847 // so we don't need to worry about relocation model. 1848 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 1849 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1850 AArch64II::MO_NO_FLAG), 1851 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1852 AArch64II::MO_LO12), 1853 DAG.getConstant(/*Alignment=*/ 4, MVT::i32)); 1854 case CodeModel::Large: 1855 return DAG.getNode( 1856 AArch64ISD::WrapperLarge, DL, PtrVT, 1857 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3), 1858 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 1859 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 1860 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 1861 default: 1862 llvm_unreachable("Only small and large code models supported now"); 1863 } 1864} 1865 1866 1867// (BRCOND chain, val, dest) 1868SDValue 1869AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 1870 SDLoc dl(Op); 1871 SDValue Chain = Op.getOperand(0); 1872 SDValue TheBit = Op.getOperand(1); 1873 SDValue DestBB = Op.getOperand(2); 1874 1875 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 1876 // that as the consumer we are responsible for ignoring rubbish in higher 1877 // bits. 1878 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 1879 DAG.getConstant(1, MVT::i32)); 1880 1881 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 1882 DAG.getConstant(0, TheBit.getValueType()), 1883 DAG.getCondCode(ISD::SETNE)); 1884 1885 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain, 1886 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32), 1887 DestBB); 1888} 1889 1890// (BR_CC chain, condcode, lhs, rhs, dest) 1891SDValue 1892AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1893 SDLoc dl(Op); 1894 SDValue Chain = Op.getOperand(0); 1895 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1896 SDValue LHS = Op.getOperand(2); 1897 SDValue RHS = Op.getOperand(3); 1898 SDValue DestBB = Op.getOperand(4); 1899 1900 if (LHS.getValueType() == MVT::f128) { 1901 // f128 comparisons are lowered to runtime calls by a routine which sets 1902 // LHS, RHS and CC appropriately for the rest of this function to continue. 1903 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 1904 1905 // If softenSetCCOperands returned a scalar, we need to compare the result 1906 // against zero to select between true and false values. 1907 if (RHS.getNode() == 0) { 1908 RHS = DAG.getConstant(0, LHS.getValueType()); 1909 CC = ISD::SETNE; 1910 } 1911 } 1912 1913 if (LHS.getValueType().isInteger()) { 1914 SDValue A64cc; 1915 1916 // Integers are handled in a separate function because the combinations of 1917 // immediates and tests can get hairy and we may want to fiddle things. 1918 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 1919 1920 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1921 Chain, CmpOp, A64cc, DestBB); 1922 } 1923 1924 // Note that some LLVM floating-point CondCodes can't be lowered to a single 1925 // conditional branch, hence FPCCToA64CC can set a second test, where either 1926 // passing is sufficient. 1927 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 1928 CondCode = FPCCToA64CC(CC, Alternative); 1929 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 1930 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1931 DAG.getCondCode(CC)); 1932 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1933 Chain, SetCC, A64cc, DestBB); 1934 1935 if (Alternative != A64CC::Invalid) { 1936 A64cc = DAG.getConstant(Alternative, MVT::i32); 1937 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1938 A64BR_CC, SetCC, A64cc, DestBB); 1939 1940 } 1941 1942 return A64BR_CC; 1943} 1944 1945SDValue 1946AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG, 1947 RTLIB::Libcall Call) const { 1948 ArgListTy Args; 1949 ArgListEntry Entry; 1950 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { 1951 EVT ArgVT = Op.getOperand(i).getValueType(); 1952 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1953 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy; 1954 Entry.isSExt = false; 1955 Entry.isZExt = false; 1956 Args.push_back(Entry); 1957 } 1958 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy()); 1959 1960 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 1961 1962 // By default, the input chain to this libcall is the entry node of the 1963 // function. If the libcall is going to be emitted as a tail call then 1964 // isUsedByReturnOnly will change it to the right chain if the return 1965 // node which is being folded has a non-entry input chain. 1966 SDValue InChain = DAG.getEntryNode(); 1967 1968 // isTailCall may be true since the callee does not reference caller stack 1969 // frame. Check if it's in the right position. 1970 SDValue TCChain = InChain; 1971 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain); 1972 if (isTailCall) 1973 InChain = TCChain; 1974 1975 TargetLowering:: 1976 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false, 1977 0, getLibcallCallingConv(Call), isTailCall, 1978 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1979 Callee, Args, DAG, SDLoc(Op)); 1980 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 1981 1982 if (!CallInfo.second.getNode()) 1983 // It's a tailcall, return the chain (which is the DAG root). 1984 return DAG.getRoot(); 1985 1986 return CallInfo.first; 1987} 1988 1989SDValue 1990AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 1991 if (Op.getOperand(0).getValueType() != MVT::f128) { 1992 // It's legal except when f128 is involved 1993 return Op; 1994 } 1995 1996 RTLIB::Libcall LC; 1997 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 1998 1999 SDValue SrcVal = Op.getOperand(0); 2000 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1, 2001 /*isSigned*/ false, SDLoc(Op)).first; 2002} 2003 2004SDValue 2005AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 2006 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); 2007 2008 RTLIB::Libcall LC; 2009 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 2010 2011 return LowerF128ToCall(Op, DAG, LC); 2012} 2013 2014SDValue 2015AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 2016 bool IsSigned) const { 2017 if (Op.getOperand(0).getValueType() != MVT::f128) { 2018 // It's legal except when f128 is involved 2019 return Op; 2020 } 2021 2022 RTLIB::Libcall LC; 2023 if (IsSigned) 2024 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2025 else 2026 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2027 2028 return LowerF128ToCall(Op, DAG, LC); 2029} 2030 2031SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2032 MachineFunction &MF = DAG.getMachineFunction(); 2033 MachineFrameInfo *MFI = MF.getFrameInfo(); 2034 MFI->setReturnAddressIsTaken(true); 2035 2036 EVT VT = Op.getValueType(); 2037 SDLoc dl(Op); 2038 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2039 if (Depth) { 2040 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2041 SDValue Offset = DAG.getConstant(8, MVT::i64); 2042 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2043 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2044 MachinePointerInfo(), false, false, false, 0); 2045 } 2046 2047 // Return X30, which contains the return address. Mark it an implicit live-in. 2048 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64)); 2049 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64); 2050} 2051 2052 2053SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) 2054 const { 2055 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2056 MFI->setFrameAddressIsTaken(true); 2057 2058 EVT VT = Op.getValueType(); 2059 SDLoc dl(Op); 2060 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2061 unsigned FrameReg = AArch64::X29; 2062 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2063 while (Depth--) 2064 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2065 MachinePointerInfo(), 2066 false, false, false, 0); 2067 return FrameAddr; 2068} 2069 2070SDValue 2071AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op, 2072 SelectionDAG &DAG) const { 2073 assert(getTargetMachine().getCodeModel() == CodeModel::Large); 2074 assert(getTargetMachine().getRelocationModel() == Reloc::Static); 2075 2076 EVT PtrVT = getPointerTy(); 2077 SDLoc dl(Op); 2078 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2079 const GlobalValue *GV = GN->getGlobal(); 2080 2081 SDValue GlobalAddr = DAG.getNode( 2082 AArch64ISD::WrapperLarge, dl, PtrVT, 2083 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3), 2084 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 2085 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 2086 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 2087 2088 if (GN->getOffset() != 0) 2089 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2090 DAG.getConstant(GN->getOffset(), PtrVT)); 2091 2092 return GlobalAddr; 2093} 2094 2095SDValue 2096AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op, 2097 SelectionDAG &DAG) const { 2098 assert(getTargetMachine().getCodeModel() == CodeModel::Small); 2099 2100 EVT PtrVT = getPointerTy(); 2101 SDLoc dl(Op); 2102 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2103 const GlobalValue *GV = GN->getGlobal(); 2104 unsigned Alignment = GV->getAlignment(); 2105 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2106 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) { 2107 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate 2108 // to zero when they remain undefined. In PIC mode the GOT can take care of 2109 // this, but in absolute mode we use a constant pool load. 2110 SDValue PoolAddr; 2111 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2112 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2113 AArch64II::MO_NO_FLAG), 2114 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2115 AArch64II::MO_LO12), 2116 DAG.getConstant(8, MVT::i32)); 2117 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr, 2118 MachinePointerInfo::getConstantPool(), 2119 /*isVolatile=*/ false, 2120 /*isNonTemporal=*/ true, 2121 /*isInvariant=*/ true, 8); 2122 if (GN->getOffset() != 0) 2123 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2124 DAG.getConstant(GN->getOffset(), PtrVT)); 2125 2126 return GlobalAddr; 2127 } 2128 2129 if (Alignment == 0) { 2130 const PointerType *GVPtrTy = cast<PointerType>(GV->getType()); 2131 if (GVPtrTy->getElementType()->isSized()) { 2132 Alignment 2133 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType()); 2134 } else { 2135 // Be conservative if we can't guess, not that it really matters: 2136 // functions and labels aren't valid for loads, and the methods used to 2137 // actually calculate an address work with any alignment. 2138 Alignment = 1; 2139 } 2140 } 2141 2142 unsigned char HiFixup, LoFixup; 2143 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM); 2144 2145 if (UseGOT) { 2146 HiFixup = AArch64II::MO_GOT; 2147 LoFixup = AArch64II::MO_GOT_LO12; 2148 Alignment = 8; 2149 } else { 2150 HiFixup = AArch64II::MO_NO_FLAG; 2151 LoFixup = AArch64II::MO_LO12; 2152 } 2153 2154 // AArch64's small model demands the following sequence: 2155 // ADRP x0, somewhere 2156 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly). 2157 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2158 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2159 HiFixup), 2160 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2161 LoFixup), 2162 DAG.getConstant(Alignment, MVT::i32)); 2163 2164 if (UseGOT) { 2165 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(), 2166 GlobalRef); 2167 } 2168 2169 if (GN->getOffset() != 0) 2170 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef, 2171 DAG.getConstant(GN->getOffset(), PtrVT)); 2172 2173 return GlobalRef; 2174} 2175 2176SDValue 2177AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op, 2178 SelectionDAG &DAG) const { 2179 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so 2180 // we make those distinctions here. 2181 2182 switch (getTargetMachine().getCodeModel()) { 2183 case CodeModel::Small: 2184 return LowerGlobalAddressELFSmall(Op, DAG); 2185 case CodeModel::Large: 2186 return LowerGlobalAddressELFLarge(Op, DAG); 2187 default: 2188 llvm_unreachable("Only small and large code models supported now"); 2189 } 2190} 2191 2192SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr, 2193 SDValue DescAddr, 2194 SDLoc DL, 2195 SelectionDAG &DAG) const { 2196 EVT PtrVT = getPointerTy(); 2197 2198 // The function we need to call is simply the first entry in the GOT for this 2199 // descriptor, load it in preparation. 2200 SDValue Func, Chain; 2201 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2202 DescAddr); 2203 2204 // The function takes only one argument: the address of the descriptor itself 2205 // in X0. 2206 SDValue Glue; 2207 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue); 2208 Glue = Chain.getValue(1); 2209 2210 // Finally, there's a special calling-convention which means that the lookup 2211 // must preserve all registers (except X0, obviously). 2212 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2213 const AArch64RegisterInfo *A64RI 2214 = static_cast<const AArch64RegisterInfo *>(TRI); 2215 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask(); 2216 2217 // We're now ready to populate the argument list, as with a normal call: 2218 std::vector<SDValue> Ops; 2219 Ops.push_back(Chain); 2220 Ops.push_back(Func); 2221 Ops.push_back(SymAddr); 2222 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT)); 2223 Ops.push_back(DAG.getRegisterMask(Mask)); 2224 Ops.push_back(Glue); 2225 2226 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2227 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0], 2228 Ops.size()); 2229 Glue = Chain.getValue(1); 2230 2231 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it 2232 // back to the generic handling code. 2233 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); 2234} 2235 2236SDValue 2237AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, 2238 SelectionDAG &DAG) const { 2239 assert(getSubtarget()->isTargetELF() && 2240 "TLS not implemented for non-ELF targets"); 2241 assert(getTargetMachine().getCodeModel() == CodeModel::Small 2242 && "TLS only supported in small memory model"); 2243 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2244 2245 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); 2246 2247 SDValue TPOff; 2248 EVT PtrVT = getPointerTy(); 2249 SDLoc DL(Op); 2250 const GlobalValue *GV = GA->getGlobal(); 2251 2252 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); 2253 2254 if (Model == TLSModel::InitialExec) { 2255 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2256 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2257 AArch64II::MO_GOTTPREL), 2258 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2259 AArch64II::MO_GOTTPREL_LO12), 2260 DAG.getConstant(8, MVT::i32)); 2261 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2262 TPOff); 2263 } else if (Model == TLSModel::LocalExec) { 2264 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2265 AArch64II::MO_TPREL_G1); 2266 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2267 AArch64II::MO_TPREL_G0_NC); 2268 2269 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2270 DAG.getTargetConstant(1, MVT::i32)), 0); 2271 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2272 TPOff, LoVar, 2273 DAG.getTargetConstant(0, MVT::i32)), 0); 2274 } else if (Model == TLSModel::GeneralDynamic) { 2275 // Accesses used in this sequence go via the TLS descriptor which lives in 2276 // the GOT. Prepare an address we can use to handle this. 2277 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2278 AArch64II::MO_TLSDESC); 2279 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2280 AArch64II::MO_TLSDESC_LO12); 2281 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2282 HiDesc, LoDesc, 2283 DAG.getConstant(8, MVT::i32)); 2284 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0); 2285 2286 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2287 } else if (Model == TLSModel::LocalDynamic) { 2288 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS 2289 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate 2290 // the beginning of the module's TLS region, followed by a DTPREL offset 2291 // calculation. 2292 2293 // These accesses will need deduplicating if there's more than one. 2294 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction() 2295 .getInfo<AArch64MachineFunctionInfo>(); 2296 MFI->incNumLocalDynamicTLSAccesses(); 2297 2298 2299 // Get the location of _TLS_MODULE_BASE_: 2300 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2301 AArch64II::MO_TLSDESC); 2302 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2303 AArch64II::MO_TLSDESC_LO12); 2304 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2305 HiDesc, LoDesc, 2306 DAG.getConstant(8, MVT::i32)); 2307 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT); 2308 2309 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2310 2311 // Get the variable's offset from _TLS_MODULE_BASE_ 2312 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2313 AArch64II::MO_DTPREL_G1); 2314 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2315 AArch64II::MO_DTPREL_G0_NC); 2316 2317 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2318 DAG.getTargetConstant(0, MVT::i32)), 0); 2319 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2320 TPOff, LoVar, 2321 DAG.getTargetConstant(0, MVT::i32)), 0); 2322 } else 2323 llvm_unreachable("Unsupported TLS access model"); 2324 2325 2326 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); 2327} 2328 2329SDValue 2330AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2331 bool IsSigned) const { 2332 if (Op.getValueType() != MVT::f128) { 2333 // Legal for everything except f128. 2334 return Op; 2335 } 2336 2337 RTLIB::Libcall LC; 2338 if (IsSigned) 2339 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2340 else 2341 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2342 2343 return LowerF128ToCall(Op, DAG, LC); 2344} 2345 2346 2347SDValue 2348AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2349 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2350 SDLoc dl(JT); 2351 EVT PtrVT = getPointerTy(); 2352 2353 // When compiling PIC, jump tables get put in the code section so a static 2354 // relocation-style is acceptable for both cases. 2355 switch (getTargetMachine().getCodeModel()) { 2356 case CodeModel::Small: 2357 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2358 DAG.getTargetJumpTable(JT->getIndex(), PtrVT), 2359 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2360 AArch64II::MO_LO12), 2361 DAG.getConstant(1, MVT::i32)); 2362 case CodeModel::Large: 2363 return DAG.getNode( 2364 AArch64ISD::WrapperLarge, dl, PtrVT, 2365 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3), 2366 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC), 2367 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC), 2368 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC)); 2369 default: 2370 llvm_unreachable("Only small and large code models supported now"); 2371 } 2372} 2373 2374// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode) 2375SDValue 2376AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2377 SDLoc dl(Op); 2378 SDValue LHS = Op.getOperand(0); 2379 SDValue RHS = Op.getOperand(1); 2380 SDValue IfTrue = Op.getOperand(2); 2381 SDValue IfFalse = Op.getOperand(3); 2382 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2383 2384 if (LHS.getValueType() == MVT::f128) { 2385 // f128 comparisons are lowered to libcalls, but slot in nicely here 2386 // afterwards. 2387 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2388 2389 // If softenSetCCOperands returned a scalar, we need to compare the result 2390 // against zero to select between true and false values. 2391 if (RHS.getNode() == 0) { 2392 RHS = DAG.getConstant(0, LHS.getValueType()); 2393 CC = ISD::SETNE; 2394 } 2395 } 2396 2397 if (LHS.getValueType().isInteger()) { 2398 SDValue A64cc; 2399 2400 // Integers are handled in a separate function because the combinations of 2401 // immediates and tests can get hairy and we may want to fiddle things. 2402 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2403 2404 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2405 CmpOp, IfTrue, IfFalse, A64cc); 2406 } 2407 2408 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2409 // conditional branch, hence FPCCToA64CC can set a second test, where either 2410 // passing is sufficient. 2411 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2412 CondCode = FPCCToA64CC(CC, Alternative); 2413 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2414 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2415 DAG.getCondCode(CC)); 2416 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, 2417 Op.getValueType(), 2418 SetCC, IfTrue, IfFalse, A64cc); 2419 2420 if (Alternative != A64CC::Invalid) { 2421 A64cc = DAG.getConstant(Alternative, MVT::i32); 2422 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2423 SetCC, IfTrue, A64SELECT_CC, A64cc); 2424 2425 } 2426 2427 return A64SELECT_CC; 2428} 2429 2430// (SELECT testbit, iftrue, iffalse) 2431SDValue 2432AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2433 SDLoc dl(Op); 2434 SDValue TheBit = Op.getOperand(0); 2435 SDValue IfTrue = Op.getOperand(1); 2436 SDValue IfFalse = Op.getOperand(2); 2437 2438 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 2439 // that as the consumer we are responsible for ignoring rubbish in higher 2440 // bits. 2441 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 2442 DAG.getConstant(1, MVT::i32)); 2443 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 2444 DAG.getConstant(0, TheBit.getValueType()), 2445 DAG.getCondCode(ISD::SETNE)); 2446 2447 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2448 A64CMP, IfTrue, IfFalse, 2449 DAG.getConstant(A64CC::NE, MVT::i32)); 2450} 2451 2452static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) { 2453 SDLoc DL(Op); 2454 SDValue LHS = Op.getOperand(0); 2455 SDValue RHS = Op.getOperand(1); 2456 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2457 EVT VT = Op.getValueType(); 2458 bool Invert = false; 2459 SDValue Op0, Op1; 2460 unsigned Opcode; 2461 2462 if (LHS.getValueType().isInteger()) { 2463 2464 // Attempt to use Vector Integer Compare Mask Test instruction. 2465 // TST = icmp ne (and (op0, op1), zero). 2466 if (CC == ISD::SETNE) { 2467 if (((LHS.getOpcode() == ISD::AND) && 2468 ISD::isBuildVectorAllZeros(RHS.getNode())) || 2469 ((RHS.getOpcode() == ISD::AND) && 2470 ISD::isBuildVectorAllZeros(LHS.getNode()))) { 2471 2472 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS; 2473 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0)); 2474 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1)); 2475 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS); 2476 } 2477 } 2478 2479 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed). 2480 // Note: Compare against Zero does not support unsigned predicates. 2481 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) || 2482 ISD::isBuildVectorAllZeros(LHS.getNode())) && 2483 !isUnsignedIntSetCC(CC)) { 2484 2485 // If LHS is the zero value, swap operands and CondCode. 2486 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2487 CC = getSetCCSwappedOperands(CC); 2488 Op0 = RHS; 2489 } else 2490 Op0 = LHS; 2491 2492 // Ensure valid CondCode for Compare Mask against Zero instruction: 2493 // EQ, GE, GT, LE, LT. 2494 if (ISD::SETNE == CC) { 2495 Invert = true; 2496 CC = ISD::SETEQ; 2497 } 2498 2499 // Using constant type to differentiate integer and FP compares with zero. 2500 Op1 = DAG.getConstant(0, MVT::i32); 2501 Opcode = AArch64ISD::NEON_CMPZ; 2502 2503 } else { 2504 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned). 2505 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT. 2506 bool Swap = false; 2507 switch (CC) { 2508 default: 2509 llvm_unreachable("Illegal integer comparison."); 2510 case ISD::SETEQ: 2511 case ISD::SETGT: 2512 case ISD::SETGE: 2513 case ISD::SETUGT: 2514 case ISD::SETUGE: 2515 break; 2516 case ISD::SETNE: 2517 Invert = true; 2518 CC = ISD::SETEQ; 2519 break; 2520 case ISD::SETULT: 2521 case ISD::SETULE: 2522 case ISD::SETLT: 2523 case ISD::SETLE: 2524 Swap = true; 2525 CC = getSetCCSwappedOperands(CC); 2526 } 2527 2528 if (Swap) 2529 std::swap(LHS, RHS); 2530 2531 Opcode = AArch64ISD::NEON_CMP; 2532 Op0 = LHS; 2533 Op1 = RHS; 2534 } 2535 2536 // Generate Compare Mask instr or Compare Mask against Zero instr. 2537 SDValue NeonCmp = 2538 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2539 2540 if (Invert) 2541 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2542 2543 return NeonCmp; 2544 } 2545 2546 // Now handle Floating Point cases. 2547 // Attempt to use Vector Floating Point Compare Mask against Zero instruction. 2548 if (ISD::isBuildVectorAllZeros(RHS.getNode()) || 2549 ISD::isBuildVectorAllZeros(LHS.getNode())) { 2550 2551 // If LHS is the zero value, swap operands and CondCode. 2552 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2553 CC = getSetCCSwappedOperands(CC); 2554 Op0 = RHS; 2555 } else 2556 Op0 = LHS; 2557 2558 // Using constant type to differentiate integer and FP compares with zero. 2559 Op1 = DAG.getConstantFP(0, MVT::f32); 2560 Opcode = AArch64ISD::NEON_CMPZ; 2561 } else { 2562 // Attempt to use Vector Floating Point Compare Mask instruction. 2563 Op0 = LHS; 2564 Op1 = RHS; 2565 Opcode = AArch64ISD::NEON_CMP; 2566 } 2567 2568 SDValue NeonCmpAlt; 2569 // Some register compares have to be implemented with swapped CC and operands, 2570 // e.g.: OLT implemented as OGT with swapped operands. 2571 bool SwapIfRegArgs = false; 2572 2573 // Ensure valid CondCode for FP Compare Mask against Zero instruction: 2574 // EQ, GE, GT, LE, LT. 2575 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT. 2576 switch (CC) { 2577 default: 2578 llvm_unreachable("Illegal FP comparison"); 2579 case ISD::SETUNE: 2580 case ISD::SETNE: 2581 Invert = true; // Fallthrough 2582 case ISD::SETOEQ: 2583 case ISD::SETEQ: 2584 CC = ISD::SETEQ; 2585 break; 2586 case ISD::SETOLT: 2587 case ISD::SETLT: 2588 CC = ISD::SETLT; 2589 SwapIfRegArgs = true; 2590 break; 2591 case ISD::SETOGT: 2592 case ISD::SETGT: 2593 CC = ISD::SETGT; 2594 break; 2595 case ISD::SETOLE: 2596 case ISD::SETLE: 2597 CC = ISD::SETLE; 2598 SwapIfRegArgs = true; 2599 break; 2600 case ISD::SETOGE: 2601 case ISD::SETGE: 2602 CC = ISD::SETGE; 2603 break; 2604 case ISD::SETUGE: 2605 Invert = true; 2606 CC = ISD::SETLT; 2607 SwapIfRegArgs = true; 2608 break; 2609 case ISD::SETULE: 2610 Invert = true; 2611 CC = ISD::SETGT; 2612 break; 2613 case ISD::SETUGT: 2614 Invert = true; 2615 CC = ISD::SETLE; 2616 SwapIfRegArgs = true; 2617 break; 2618 case ISD::SETULT: 2619 Invert = true; 2620 CC = ISD::SETGE; 2621 break; 2622 case ISD::SETUEQ: 2623 Invert = true; // Fallthrough 2624 case ISD::SETONE: 2625 // Expand this to (OGT |OLT). 2626 NeonCmpAlt = 2627 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT)); 2628 CC = ISD::SETLT; 2629 SwapIfRegArgs = true; 2630 break; 2631 case ISD::SETUO: 2632 Invert = true; // Fallthrough 2633 case ISD::SETO: 2634 // Expand this to (OGE | OLT). 2635 NeonCmpAlt = 2636 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE)); 2637 CC = ISD::SETLT; 2638 SwapIfRegArgs = true; 2639 break; 2640 } 2641 2642 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) { 2643 CC = getSetCCSwappedOperands(CC); 2644 std::swap(Op0, Op1); 2645 } 2646 2647 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr 2648 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2649 2650 if (NeonCmpAlt.getNode()) 2651 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt); 2652 2653 if (Invert) 2654 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2655 2656 return NeonCmp; 2657} 2658 2659// (SETCC lhs, rhs, condcode) 2660SDValue 2661AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2662 SDLoc dl(Op); 2663 SDValue LHS = Op.getOperand(0); 2664 SDValue RHS = Op.getOperand(1); 2665 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2666 EVT VT = Op.getValueType(); 2667 2668 if (VT.isVector()) 2669 return LowerVectorSETCC(Op, DAG); 2670 2671 if (LHS.getValueType() == MVT::f128) { 2672 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS 2673 // for the rest of the function (some i32 or i64 values). 2674 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2675 2676 // If softenSetCCOperands returned a scalar, use it. 2677 if (RHS.getNode() == 0) { 2678 assert(LHS.getValueType() == Op.getValueType() && 2679 "Unexpected setcc expansion!"); 2680 return LHS; 2681 } 2682 } 2683 2684 if (LHS.getValueType().isInteger()) { 2685 SDValue A64cc; 2686 2687 // Integers are handled in a separate function because the combinations of 2688 // immediates and tests can get hairy and we may want to fiddle things. 2689 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2690 2691 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2692 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT), 2693 A64cc); 2694 } 2695 2696 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2697 // conditional branch, hence FPCCToA64CC can set a second test, where either 2698 // passing is sufficient. 2699 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2700 CondCode = FPCCToA64CC(CC, Alternative); 2701 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2702 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2703 DAG.getCondCode(CC)); 2704 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2705 CmpOp, DAG.getConstant(1, VT), 2706 DAG.getConstant(0, VT), A64cc); 2707 2708 if (Alternative != A64CC::Invalid) { 2709 A64cc = DAG.getConstant(Alternative, MVT::i32); 2710 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, 2711 DAG.getConstant(1, VT), A64SELECT_CC, A64cc); 2712 } 2713 2714 return A64SELECT_CC; 2715} 2716 2717SDValue 2718AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2719 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2720 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2721 2722 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes 2723 // rather than just 8. 2724 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), 2725 Op.getOperand(1), Op.getOperand(2), 2726 DAG.getConstant(32, MVT::i32), 8, false, false, 2727 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); 2728} 2729 2730SDValue 2731AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2732 // The layout of the va_list struct is specified in the AArch64 Procedure Call 2733 // Standard, section B.3. 2734 MachineFunction &MF = DAG.getMachineFunction(); 2735 AArch64MachineFunctionInfo *FuncInfo 2736 = MF.getInfo<AArch64MachineFunctionInfo>(); 2737 SDLoc DL(Op); 2738 2739 SDValue Chain = Op.getOperand(0); 2740 SDValue VAList = Op.getOperand(1); 2741 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2742 SmallVector<SDValue, 4> MemOps; 2743 2744 // void *__stack at offset 0 2745 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(), 2746 getPointerTy()); 2747 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, 2748 MachinePointerInfo(SV), false, false, 0)); 2749 2750 // void *__gr_top at offset 8 2751 int GPRSize = FuncInfo->getVariadicGPRSize(); 2752 if (GPRSize > 0) { 2753 SDValue GRTop, GRTopAddr; 2754 2755 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2756 DAG.getConstant(8, getPointerTy())); 2757 2758 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy()); 2759 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop, 2760 DAG.getConstant(GPRSize, getPointerTy())); 2761 2762 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, 2763 MachinePointerInfo(SV, 8), 2764 false, false, 0)); 2765 } 2766 2767 // void *__vr_top at offset 16 2768 int FPRSize = FuncInfo->getVariadicFPRSize(); 2769 if (FPRSize > 0) { 2770 SDValue VRTop, VRTopAddr; 2771 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2772 DAG.getConstant(16, getPointerTy())); 2773 2774 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy()); 2775 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop, 2776 DAG.getConstant(FPRSize, getPointerTy())); 2777 2778 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, 2779 MachinePointerInfo(SV, 16), 2780 false, false, 0)); 2781 } 2782 2783 // int __gr_offs at offset 24 2784 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2785 DAG.getConstant(24, getPointerTy())); 2786 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32), 2787 GROffsAddr, MachinePointerInfo(SV, 24), 2788 false, false, 0)); 2789 2790 // int __vr_offs at offset 28 2791 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2792 DAG.getConstant(28, getPointerTy())); 2793 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32), 2794 VROffsAddr, MachinePointerInfo(SV, 28), 2795 false, false, 0)); 2796 2797 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 2798 MemOps.size()); 2799} 2800 2801SDValue 2802AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2803 switch (Op.getOpcode()) { 2804 default: llvm_unreachable("Don't know how to custom lower this!"); 2805 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128); 2806 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128); 2807 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128); 2808 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128); 2809 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true); 2810 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false); 2811 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true); 2812 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false); 2813 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 2814 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 2815 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 2816 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 2817 2818 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2819 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 2820 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 2821 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG); 2822 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2823 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2824 case ISD::SELECT: return LowerSELECT(Op, DAG); 2825 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2826 case ISD::SETCC: return LowerSETCC(Op, DAG); 2827 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 2828 case ISD::VASTART: return LowerVASTART(Op, DAG); 2829 case ISD::BUILD_VECTOR: 2830 return LowerBUILD_VECTOR(Op, DAG, getSubtarget()); 2831 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2832 } 2833 2834 return SDValue(); 2835} 2836 2837/// Check if the specified splat value corresponds to a valid vector constant 2838/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If 2839/// so, return the encoded 8-bit immediate and the OpCmode instruction fields 2840/// values. 2841static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 2842 unsigned SplatBitSize, SelectionDAG &DAG, 2843 bool is128Bits, NeonModImmType type, EVT &VT, 2844 unsigned &Imm, unsigned &OpCmode) { 2845 switch (SplatBitSize) { 2846 default: 2847 llvm_unreachable("unexpected size for isNeonModifiedImm"); 2848 case 8: { 2849 if (type != Neon_Mov_Imm) 2850 return false; 2851 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 2852 // Neon movi per byte: Op=0, Cmode=1110. 2853 OpCmode = 0xe; 2854 Imm = SplatBits; 2855 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 2856 break; 2857 } 2858 case 16: { 2859 // Neon move inst per halfword 2860 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 2861 if ((SplatBits & ~0xff) == 0) { 2862 // Value = 0x00nn is 0x00nn LSL 0 2863 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000 2864 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001 2865 // Op=x, Cmode=100y 2866 Imm = SplatBits; 2867 OpCmode = 0x8; 2868 break; 2869 } 2870 if ((SplatBits & ~0xff00) == 0) { 2871 // Value = 0xnn00 is 0x00nn LSL 8 2872 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010 2873 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011 2874 // Op=x, Cmode=101x 2875 Imm = SplatBits >> 8; 2876 OpCmode = 0xa; 2877 break; 2878 } 2879 // can't handle any other 2880 return false; 2881 } 2882 2883 case 32: { 2884 // First the LSL variants (MSL is unusable by some interested instructions). 2885 2886 // Neon move instr per word, shift zeros 2887 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 2888 if ((SplatBits & ~0xff) == 0) { 2889 // Value = 0x000000nn is 0x000000nn LSL 0 2890 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000 2891 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001 2892 // Op=x, Cmode=000x 2893 Imm = SplatBits; 2894 OpCmode = 0; 2895 break; 2896 } 2897 if ((SplatBits & ~0xff00) == 0) { 2898 // Value = 0x0000nn00 is 0x000000nn LSL 8 2899 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010 2900 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011 2901 // Op=x, Cmode=001x 2902 Imm = SplatBits >> 8; 2903 OpCmode = 0x2; 2904 break; 2905 } 2906 if ((SplatBits & ~0xff0000) == 0) { 2907 // Value = 0x00nn0000 is 0x000000nn LSL 16 2908 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100 2909 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101 2910 // Op=x, Cmode=010x 2911 Imm = SplatBits >> 16; 2912 OpCmode = 0x4; 2913 break; 2914 } 2915 if ((SplatBits & ~0xff000000) == 0) { 2916 // Value = 0xnn000000 is 0x000000nn LSL 24 2917 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110 2918 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111 2919 // Op=x, Cmode=011x 2920 Imm = SplatBits >> 24; 2921 OpCmode = 0x6; 2922 break; 2923 } 2924 2925 // Now the MSL immediates. 2926 2927 // Neon move instr per word, shift ones 2928 if ((SplatBits & ~0xffff) == 0 && 2929 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 2930 // Value = 0x0000nnff is 0x000000nn MSL 8 2931 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100 2932 // Op=x, Cmode=1100 2933 Imm = SplatBits >> 8; 2934 OpCmode = 0xc; 2935 break; 2936 } 2937 if ((SplatBits & ~0xffffff) == 0 && 2938 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 2939 // Value = 0x00nnffff is 0x000000nn MSL 16 2940 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101 2941 // Op=x, Cmode=1101 2942 Imm = SplatBits >> 16; 2943 OpCmode = 0xd; 2944 break; 2945 } 2946 // can't handle any other 2947 return false; 2948 } 2949 2950 case 64: { 2951 if (type != Neon_Mov_Imm) 2952 return false; 2953 // Neon move instr bytemask, where each byte is either 0x00 or 0xff. 2954 // movi Op=1, Cmode=1110. 2955 OpCmode = 0x1e; 2956 uint64_t BitMask = 0xff; 2957 uint64_t Val = 0; 2958 unsigned ImmMask = 1; 2959 Imm = 0; 2960 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 2961 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 2962 Val |= BitMask; 2963 Imm |= ImmMask; 2964 } else if ((SplatBits & BitMask) != 0) { 2965 return false; 2966 } 2967 BitMask <<= 8; 2968 ImmMask <<= 1; 2969 } 2970 SplatBits = Val; 2971 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 2972 break; 2973 } 2974 } 2975 2976 return true; 2977} 2978 2979static SDValue PerformANDCombine(SDNode *N, 2980 TargetLowering::DAGCombinerInfo &DCI) { 2981 2982 SelectionDAG &DAG = DCI.DAG; 2983 SDLoc DL(N); 2984 EVT VT = N->getValueType(0); 2985 2986 // We're looking for an SRA/SHL pair which form an SBFX. 2987 2988 if (VT != MVT::i32 && VT != MVT::i64) 2989 return SDValue(); 2990 2991 if (!isa<ConstantSDNode>(N->getOperand(1))) 2992 return SDValue(); 2993 2994 uint64_t TruncMask = N->getConstantOperandVal(1); 2995 if (!isMask_64(TruncMask)) 2996 return SDValue(); 2997 2998 uint64_t Width = CountPopulation_64(TruncMask); 2999 SDValue Shift = N->getOperand(0); 3000 3001 if (Shift.getOpcode() != ISD::SRL) 3002 return SDValue(); 3003 3004 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3005 return SDValue(); 3006 uint64_t LSB = Shift->getConstantOperandVal(1); 3007 3008 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3009 return SDValue(); 3010 3011 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0), 3012 DAG.getConstant(LSB, MVT::i64), 3013 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3014} 3015 3016/// For a true bitfield insert, the bits getting into that contiguous mask 3017/// should come from the low part of an existing value: they must be formed from 3018/// a compatible SHL operation (unless they're already low). This function 3019/// checks that condition and returns the least-significant bit that's 3020/// intended. If the operation not a field preparation, -1 is returned. 3021static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT, 3022 SDValue &MaskedVal, uint64_t Mask) { 3023 if (!isShiftedMask_64(Mask)) 3024 return -1; 3025 3026 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI 3027 // instruction. BFI will do a left-shift by LSB before applying the mask we've 3028 // spotted, so in general we should pre-emptively "undo" that by making sure 3029 // the incoming bits have had a right-shift applied to them. 3030 // 3031 // This right shift, however, will combine with existing left/right shifts. In 3032 // the simplest case of a completely straight bitfield operation, it will be 3033 // expected to completely cancel out with an existing SHL. More complicated 3034 // cases (e.g. bitfield to bitfield copy) may still need a real shift before 3035 // the BFI. 3036 3037 uint64_t LSB = countTrailingZeros(Mask); 3038 int64_t ShiftRightRequired = LSB; 3039 if (MaskedVal.getOpcode() == ISD::SHL && 3040 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3041 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1); 3042 MaskedVal = MaskedVal.getOperand(0); 3043 } else if (MaskedVal.getOpcode() == ISD::SRL && 3044 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3045 ShiftRightRequired += MaskedVal.getConstantOperandVal(1); 3046 MaskedVal = MaskedVal.getOperand(0); 3047 } 3048 3049 if (ShiftRightRequired > 0) 3050 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal, 3051 DAG.getConstant(ShiftRightRequired, MVT::i64)); 3052 else if (ShiftRightRequired < 0) { 3053 // We could actually end up with a residual left shift, for example with 3054 // "struc.bitfield = val << 1". 3055 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal, 3056 DAG.getConstant(-ShiftRightRequired, MVT::i64)); 3057 } 3058 3059 return LSB; 3060} 3061 3062/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by 3063/// a mask and an extension. Returns true if a BFI was found and provides 3064/// information on its surroundings. 3065static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask, 3066 bool &Extended) { 3067 Extended = false; 3068 if (N.getOpcode() == ISD::ZERO_EXTEND) { 3069 Extended = true; 3070 N = N.getOperand(0); 3071 } 3072 3073 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) { 3074 Mask = N->getConstantOperandVal(1); 3075 N = N.getOperand(0); 3076 } else { 3077 // Mask is the whole width. 3078 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits()); 3079 } 3080 3081 if (N.getOpcode() == AArch64ISD::BFI) { 3082 BFI = N; 3083 return true; 3084 } 3085 3086 return false; 3087} 3088 3089/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which 3090/// is roughly equivalent to (and (BFI ...), mask). This form is used because it 3091/// can often be further combined with a larger mask. Ultimately, we want mask 3092/// to be 2^32-1 or 2^64-1 so the AND can be skipped. 3093static SDValue tryCombineToBFI(SDNode *N, 3094 TargetLowering::DAGCombinerInfo &DCI, 3095 const AArch64Subtarget *Subtarget) { 3096 SelectionDAG &DAG = DCI.DAG; 3097 SDLoc DL(N); 3098 EVT VT = N->getValueType(0); 3099 3100 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3101 3102 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or 3103 // abandon the effort. 3104 SDValue LHS = N->getOperand(0); 3105 if (LHS.getOpcode() != ISD::AND) 3106 return SDValue(); 3107 3108 uint64_t LHSMask; 3109 if (isa<ConstantSDNode>(LHS.getOperand(1))) 3110 LHSMask = LHS->getConstantOperandVal(1); 3111 else 3112 return SDValue(); 3113 3114 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask 3115 // is or abandon the effort. 3116 SDValue RHS = N->getOperand(1); 3117 if (RHS.getOpcode() != ISD::AND) 3118 return SDValue(); 3119 3120 uint64_t RHSMask; 3121 if (isa<ConstantSDNode>(RHS.getOperand(1))) 3122 RHSMask = RHS->getConstantOperandVal(1); 3123 else 3124 return SDValue(); 3125 3126 // Can't do anything if the masks are incompatible. 3127 if (LHSMask & RHSMask) 3128 return SDValue(); 3129 3130 // Now we need one of the masks to be a contiguous field. Without loss of 3131 // generality that should be the RHS one. 3132 SDValue Bitfield = LHS.getOperand(0); 3133 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) { 3134 // We know that LHS is a candidate new value, and RHS isn't already a better 3135 // one. 3136 std::swap(LHS, RHS); 3137 std::swap(LHSMask, RHSMask); 3138 } 3139 3140 // We've done our best to put the right operands in the right places, all we 3141 // can do now is check whether a BFI exists. 3142 Bitfield = RHS.getOperand(0); 3143 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask); 3144 if (LSB == -1) 3145 return SDValue(); 3146 3147 uint32_t Width = CountPopulation_64(RHSMask); 3148 assert(Width && "Expected non-zero bitfield width"); 3149 3150 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3151 LHS.getOperand(0), Bitfield, 3152 DAG.getConstant(LSB, MVT::i64), 3153 DAG.getConstant(Width, MVT::i64)); 3154 3155 // Mask is trivial 3156 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3157 return BFI; 3158 3159 return DAG.getNode(ISD::AND, DL, VT, BFI, 3160 DAG.getConstant(LHSMask | RHSMask, VT)); 3161} 3162 3163/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its 3164/// original input. This is surprisingly common because SROA splits things up 3165/// into i8 chunks, so the originally detected MaskedBFI may actually only act 3166/// on the low (say) byte of a word. This is then orred into the rest of the 3167/// word afterwards. 3168/// 3169/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)). 3170/// 3171/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the 3172/// MaskedBFI. We can also deal with a certain amount of extend/truncate being 3173/// involved. 3174static SDValue tryCombineToLargerBFI(SDNode *N, 3175 TargetLowering::DAGCombinerInfo &DCI, 3176 const AArch64Subtarget *Subtarget) { 3177 SelectionDAG &DAG = DCI.DAG; 3178 SDLoc DL(N); 3179 EVT VT = N->getValueType(0); 3180 3181 // First job is to hunt for a MaskedBFI on either the left or right. Swap 3182 // operands if it's actually on the right. 3183 SDValue BFI; 3184 SDValue PossExtraMask; 3185 uint64_t ExistingMask = 0; 3186 bool Extended = false; 3187 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended)) 3188 PossExtraMask = N->getOperand(1); 3189 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended)) 3190 PossExtraMask = N->getOperand(0); 3191 else 3192 return SDValue(); 3193 3194 // We can only combine a BFI with another compatible mask. 3195 if (PossExtraMask.getOpcode() != ISD::AND || 3196 !isa<ConstantSDNode>(PossExtraMask.getOperand(1))) 3197 return SDValue(); 3198 3199 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1); 3200 3201 // Masks must be compatible. 3202 if (ExtraMask & ExistingMask) 3203 return SDValue(); 3204 3205 SDValue OldBFIVal = BFI.getOperand(0); 3206 SDValue NewBFIVal = BFI.getOperand(1); 3207 if (Extended) { 3208 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be 3209 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments 3210 // need to be made compatible. 3211 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32 3212 && "Invalid types for BFI"); 3213 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal); 3214 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal); 3215 } 3216 3217 // We need the MaskedBFI to be combined with a mask of the *same* value. 3218 if (PossExtraMask.getOperand(0) != OldBFIVal) 3219 return SDValue(); 3220 3221 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3222 OldBFIVal, NewBFIVal, 3223 BFI.getOperand(2), BFI.getOperand(3)); 3224 3225 // If the masking is trivial, we don't need to create it. 3226 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3227 return BFI; 3228 3229 return DAG.getNode(ISD::AND, DL, VT, BFI, 3230 DAG.getConstant(ExtraMask | ExistingMask, VT)); 3231} 3232 3233/// An EXTR instruction is made up of two shifts, ORed together. This helper 3234/// searches for and classifies those shifts. 3235static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, 3236 bool &FromHi) { 3237 if (N.getOpcode() == ISD::SHL) 3238 FromHi = false; 3239 else if (N.getOpcode() == ISD::SRL) 3240 FromHi = true; 3241 else 3242 return false; 3243 3244 if (!isa<ConstantSDNode>(N.getOperand(1))) 3245 return false; 3246 3247 ShiftAmount = N->getConstantOperandVal(1); 3248 Src = N->getOperand(0); 3249 return true; 3250} 3251 3252/// EXTR instruction extracts a contiguous chunk of bits from two existing 3253/// registers viewed as a high/low pair. This function looks for the pattern: 3254/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an 3255/// EXTR. Can't quite be done in TableGen because the two immediates aren't 3256/// independent. 3257static SDValue tryCombineToEXTR(SDNode *N, 3258 TargetLowering::DAGCombinerInfo &DCI) { 3259 SelectionDAG &DAG = DCI.DAG; 3260 SDLoc DL(N); 3261 EVT VT = N->getValueType(0); 3262 3263 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3264 3265 if (VT != MVT::i32 && VT != MVT::i64) 3266 return SDValue(); 3267 3268 SDValue LHS; 3269 uint32_t ShiftLHS = 0; 3270 bool LHSFromHi = 0; 3271 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) 3272 return SDValue(); 3273 3274 SDValue RHS; 3275 uint32_t ShiftRHS = 0; 3276 bool RHSFromHi = 0; 3277 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) 3278 return SDValue(); 3279 3280 // If they're both trying to come from the high part of the register, they're 3281 // not really an EXTR. 3282 if (LHSFromHi == RHSFromHi) 3283 return SDValue(); 3284 3285 if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) 3286 return SDValue(); 3287 3288 if (LHSFromHi) { 3289 std::swap(LHS, RHS); 3290 std::swap(ShiftLHS, ShiftRHS); 3291 } 3292 3293 return DAG.getNode(AArch64ISD::EXTR, DL, VT, 3294 LHS, RHS, 3295 DAG.getConstant(ShiftRHS, MVT::i64)); 3296} 3297 3298/// Target-specific dag combine xforms for ISD::OR 3299static SDValue PerformORCombine(SDNode *N, 3300 TargetLowering::DAGCombinerInfo &DCI, 3301 const AArch64Subtarget *Subtarget) { 3302 3303 SelectionDAG &DAG = DCI.DAG; 3304 SDLoc DL(N); 3305 EVT VT = N->getValueType(0); 3306 3307 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 3308 return SDValue(); 3309 3310 // Attempt to recognise bitfield-insert operations. 3311 SDValue Res = tryCombineToBFI(N, DCI, Subtarget); 3312 if (Res.getNode()) 3313 return Res; 3314 3315 // Attempt to combine an existing MaskedBFI operation into one with a larger 3316 // mask. 3317 Res = tryCombineToLargerBFI(N, DCI, Subtarget); 3318 if (Res.getNode()) 3319 return Res; 3320 3321 Res = tryCombineToEXTR(N, DCI); 3322 if (Res.getNode()) 3323 return Res; 3324 3325 if (!Subtarget->hasNEON()) 3326 return SDValue(); 3327 3328 // Attempt to use vector immediate-form BSL 3329 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 3330 3331 SDValue N0 = N->getOperand(0); 3332 if (N0.getOpcode() != ISD::AND) 3333 return SDValue(); 3334 3335 SDValue N1 = N->getOperand(1); 3336 if (N1.getOpcode() != ISD::AND) 3337 return SDValue(); 3338 3339 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 3340 APInt SplatUndef; 3341 unsigned SplatBitSize; 3342 bool HasAnyUndefs; 3343 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 3344 APInt SplatBits0; 3345 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 3346 HasAnyUndefs) && 3347 !HasAnyUndefs) { 3348 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 3349 APInt SplatBits1; 3350 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 3351 HasAnyUndefs) && 3352 !HasAnyUndefs && SplatBits0 == ~SplatBits1) { 3353 // Canonicalize the vector type to make instruction selection simpler. 3354 EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8; 3355 SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT, 3356 N0->getOperand(1), N0->getOperand(0), 3357 N1->getOperand(0)); 3358 return DAG.getNode(ISD::BITCAST, DL, VT, Result); 3359 } 3360 } 3361 } 3362 3363 return SDValue(); 3364} 3365 3366/// Target-specific dag combine xforms for ISD::SRA 3367static SDValue PerformSRACombine(SDNode *N, 3368 TargetLowering::DAGCombinerInfo &DCI) { 3369 3370 SelectionDAG &DAG = DCI.DAG; 3371 SDLoc DL(N); 3372 EVT VT = N->getValueType(0); 3373 3374 // We're looking for an SRA/SHL pair which form an SBFX. 3375 3376 if (VT != MVT::i32 && VT != MVT::i64) 3377 return SDValue(); 3378 3379 if (!isa<ConstantSDNode>(N->getOperand(1))) 3380 return SDValue(); 3381 3382 uint64_t ExtraSignBits = N->getConstantOperandVal(1); 3383 SDValue Shift = N->getOperand(0); 3384 3385 if (Shift.getOpcode() != ISD::SHL) 3386 return SDValue(); 3387 3388 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3389 return SDValue(); 3390 3391 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1); 3392 uint64_t Width = VT.getSizeInBits() - ExtraSignBits; 3393 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft; 3394 3395 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3396 return SDValue(); 3397 3398 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0), 3399 DAG.getConstant(LSB, MVT::i64), 3400 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3401} 3402 3403/// Check if this is a valid build_vector for the immediate operand of 3404/// a vector shift operation, where all the elements of the build_vector 3405/// must have the same constant integer value. 3406static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 3407 // Ignore bit_converts. 3408 while (Op.getOpcode() == ISD::BITCAST) 3409 Op = Op.getOperand(0); 3410 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3411 APInt SplatBits, SplatUndef; 3412 unsigned SplatBitSize; 3413 bool HasAnyUndefs; 3414 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 3415 HasAnyUndefs, ElementBits) || 3416 SplatBitSize > ElementBits) 3417 return false; 3418 Cnt = SplatBits.getSExtValue(); 3419 return true; 3420} 3421 3422/// Check if this is a valid build_vector for the immediate operand of 3423/// a vector shift left operation. That value must be in the range: 3424/// 0 <= Value < ElementBits 3425static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) { 3426 assert(VT.isVector() && "vector shift count is not a vector type"); 3427 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3428 if (!getVShiftImm(Op, ElementBits, Cnt)) 3429 return false; 3430 return (Cnt >= 0 && Cnt < ElementBits); 3431} 3432 3433/// Check if this is a valid build_vector for the immediate operand of a 3434/// vector shift right operation. The value must be in the range: 3435/// 1 <= Value <= ElementBits 3436static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) { 3437 assert(VT.isVector() && "vector shift count is not a vector type"); 3438 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3439 if (!getVShiftImm(Op, ElementBits, Cnt)) 3440 return false; 3441 return (Cnt >= 1 && Cnt <= ElementBits); 3442} 3443 3444/// Checks for immediate versions of vector shifts and lowers them. 3445static SDValue PerformShiftCombine(SDNode *N, 3446 TargetLowering::DAGCombinerInfo &DCI, 3447 const AArch64Subtarget *ST) { 3448 SelectionDAG &DAG = DCI.DAG; 3449 EVT VT = N->getValueType(0); 3450 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64)) 3451 return PerformSRACombine(N, DCI); 3452 3453 // Nothing to be done for scalar shifts. 3454 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3455 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 3456 return SDValue(); 3457 3458 assert(ST->hasNEON() && "unexpected vector shift"); 3459 int64_t Cnt; 3460 3461 switch (N->getOpcode()) { 3462 default: 3463 llvm_unreachable("unexpected shift opcode"); 3464 3465 case ISD::SHL: 3466 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) { 3467 SDValue RHS = 3468 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3469 DAG.getConstant(Cnt, MVT::i32)); 3470 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS); 3471 } 3472 break; 3473 3474 case ISD::SRA: 3475 case ISD::SRL: 3476 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) { 3477 SDValue RHS = 3478 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3479 DAG.getConstant(Cnt, MVT::i32)); 3480 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS); 3481 } 3482 break; 3483 } 3484 3485 return SDValue(); 3486} 3487 3488/// ARM-specific DAG combining for intrinsics. 3489static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 3490 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3491 3492 switch (IntNo) { 3493 default: 3494 // Don't do anything for most intrinsics. 3495 break; 3496 3497 case Intrinsic::arm_neon_vqshifts: 3498 case Intrinsic::arm_neon_vqshiftu: 3499 EVT VT = N->getOperand(1).getValueType(); 3500 int64_t Cnt; 3501 if (!isVShiftLImm(N->getOperand(2), VT, Cnt)) 3502 break; 3503 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts) 3504 ? AArch64ISD::NEON_QSHLs 3505 : AArch64ISD::NEON_QSHLu; 3506 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), 3507 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 3508 } 3509 3510 return SDValue(); 3511} 3512 3513/// Target-specific DAG combine function for NEON load/store intrinsics 3514/// to merge base address updates. 3515static SDValue CombineBaseUpdate(SDNode *N, 3516 TargetLowering::DAGCombinerInfo &DCI) { 3517 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 3518 return SDValue(); 3519 3520 SelectionDAG &DAG = DCI.DAG; 3521 unsigned AddrOpIdx = 2; 3522 SDValue Addr = N->getOperand(AddrOpIdx); 3523 3524 // Search for a use of the address operand that is an increment. 3525 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 3526 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 3527 SDNode *User = *UI; 3528 if (User->getOpcode() != ISD::ADD || 3529 UI.getUse().getResNo() != Addr.getResNo()) 3530 continue; 3531 3532 // Check that the add is independent of the load/store. Otherwise, folding 3533 // it would create a cycle. 3534 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 3535 continue; 3536 3537 // Find the new opcode for the updating load/store. 3538 bool isLoad = true; 3539 unsigned NewOpc = 0; 3540 unsigned NumVecs = 0; 3541 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 3542 switch (IntNo) { 3543 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 3544 case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD; 3545 NumVecs = 1; break; 3546 case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD; 3547 NumVecs = 2; break; 3548 case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD; 3549 NumVecs = 3; break; 3550 case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD; 3551 NumVecs = 4; break; 3552 case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD; 3553 NumVecs = 1; isLoad = false; break; 3554 case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD; 3555 NumVecs = 2; isLoad = false; break; 3556 case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD; 3557 NumVecs = 3; isLoad = false; break; 3558 case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD; 3559 NumVecs = 4; isLoad = false; break; 3560 case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD; 3561 NumVecs = 2; break; 3562 case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD; 3563 NumVecs = 3; break; 3564 case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD; 3565 NumVecs = 4; break; 3566 case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD; 3567 NumVecs = 2; isLoad = false; break; 3568 case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD; 3569 NumVecs = 3; isLoad = false; break; 3570 case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD; 3571 NumVecs = 4; isLoad = false; break; 3572 } 3573 3574 // Find the size of memory referenced by the load/store. 3575 EVT VecTy; 3576 if (isLoad) 3577 VecTy = N->getValueType(0); 3578 else 3579 VecTy = N->getOperand(AddrOpIdx + 1).getValueType(); 3580 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 3581 3582 // If the increment is a constant, it must match the memory ref size. 3583 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 3584 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 3585 uint32_t IncVal = CInc->getZExtValue(); 3586 if (IncVal != NumBytes) 3587 continue; 3588 Inc = DAG.getTargetConstant(IncVal, MVT::i32); 3589 } 3590 3591 // Create the new updating load/store node. 3592 EVT Tys[6]; 3593 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 3594 unsigned n; 3595 for (n = 0; n < NumResultVecs; ++n) 3596 Tys[n] = VecTy; 3597 Tys[n++] = MVT::i64; 3598 Tys[n] = MVT::Other; 3599 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2); 3600 SmallVector<SDValue, 8> Ops; 3601 Ops.push_back(N->getOperand(0)); // incoming chain 3602 Ops.push_back(N->getOperand(AddrOpIdx)); 3603 Ops.push_back(Inc); 3604 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 3605 Ops.push_back(N->getOperand(i)); 3606 } 3607 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 3608 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, 3609 Ops.data(), Ops.size(), 3610 MemInt->getMemoryVT(), 3611 MemInt->getMemOperand()); 3612 3613 // Update the uses. 3614 std::vector<SDValue> NewResults; 3615 for (unsigned i = 0; i < NumResultVecs; ++i) { 3616 NewResults.push_back(SDValue(UpdN.getNode(), i)); 3617 } 3618 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain 3619 DCI.CombineTo(N, NewResults); 3620 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 3621 3622 break; 3623 } 3624 return SDValue(); 3625} 3626 3627SDValue 3628AArch64TargetLowering::PerformDAGCombine(SDNode *N, 3629 DAGCombinerInfo &DCI) const { 3630 switch (N->getOpcode()) { 3631 default: break; 3632 case ISD::AND: return PerformANDCombine(N, DCI); 3633 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget()); 3634 case ISD::SHL: 3635 case ISD::SRA: 3636 case ISD::SRL: 3637 return PerformShiftCombine(N, DCI, getSubtarget()); 3638 case ISD::INTRINSIC_WO_CHAIN: 3639 return PerformIntrinsicCombine(N, DCI.DAG); 3640 case ISD::INTRINSIC_VOID: 3641 case ISD::INTRINSIC_W_CHAIN: 3642 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 3643 case Intrinsic::arm_neon_vld1: 3644 case Intrinsic::arm_neon_vld2: 3645 case Intrinsic::arm_neon_vld3: 3646 case Intrinsic::arm_neon_vld4: 3647 case Intrinsic::arm_neon_vst1: 3648 case Intrinsic::arm_neon_vst2: 3649 case Intrinsic::arm_neon_vst3: 3650 case Intrinsic::arm_neon_vst4: 3651 case Intrinsic::aarch64_neon_vld1x2: 3652 case Intrinsic::aarch64_neon_vld1x3: 3653 case Intrinsic::aarch64_neon_vld1x4: 3654 case Intrinsic::aarch64_neon_vst1x2: 3655 case Intrinsic::aarch64_neon_vst1x3: 3656 case Intrinsic::aarch64_neon_vst1x4: 3657 return CombineBaseUpdate(N, DCI); 3658 default: 3659 break; 3660 } 3661 } 3662 return SDValue(); 3663} 3664 3665bool 3666AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3667 VT = VT.getScalarType(); 3668 3669 if (!VT.isSimple()) 3670 return false; 3671 3672 switch (VT.getSimpleVT().SimpleTy) { 3673 case MVT::f16: 3674 case MVT::f32: 3675 case MVT::f64: 3676 return true; 3677 case MVT::f128: 3678 return false; 3679 default: 3680 break; 3681 } 3682 3683 return false; 3684} 3685 3686// If this is a case we can't handle, return null and let the default 3687// expansion code take care of it. 3688SDValue 3689AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3690 const AArch64Subtarget *ST) const { 3691 3692 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3693 SDLoc DL(Op); 3694 EVT VT = Op.getValueType(); 3695 3696 APInt SplatBits, SplatUndef; 3697 unsigned SplatBitSize; 3698 bool HasAnyUndefs; 3699 3700 unsigned UseNeonMov = VT.getSizeInBits() >= 64; 3701 3702 // Note we favor lowering MOVI over MVNI. 3703 // This has implications on the definition of patterns in TableGen to select 3704 // BIC immediate instructions but not ORR immediate instructions. 3705 // If this lowering order is changed, TableGen patterns for BIC immediate and 3706 // ORR immediate instructions have to be updated. 3707 if (UseNeonMov && 3708 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3709 if (SplatBitSize <= 64) { 3710 // First attempt to use vector immediate-form MOVI 3711 EVT NeonMovVT; 3712 unsigned Imm = 0; 3713 unsigned OpCmode = 0; 3714 3715 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 3716 SplatBitSize, DAG, VT.is128BitVector(), 3717 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) { 3718 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3719 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3720 3721 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3722 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT, 3723 ImmVal, OpCmodeVal); 3724 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3725 } 3726 } 3727 3728 // Then attempt to use vector immediate-form MVNI 3729 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 3730 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, 3731 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT, 3732 Imm, OpCmode)) { 3733 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3734 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3735 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3736 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT, 3737 ImmVal, OpCmodeVal); 3738 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3739 } 3740 } 3741 3742 // Attempt to use vector immediate-form FMOV 3743 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) || 3744 (VT == MVT::v2f64 && SplatBitSize == 64)) { 3745 APFloat RealVal( 3746 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble, 3747 SplatBits); 3748 uint32_t ImmVal; 3749 if (A64Imms::isFPImm(RealVal, ImmVal)) { 3750 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 3751 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val); 3752 } 3753 } 3754 } 3755 } 3756 3757 unsigned NumElts = VT.getVectorNumElements(); 3758 bool isOnlyLowElement = true; 3759 bool usesOnlyOneValue = true; 3760 bool hasDominantValue = false; 3761 bool isConstant = true; 3762 3763 // Map of the number of times a particular SDValue appears in the 3764 // element list. 3765 DenseMap<SDValue, unsigned> ValueCounts; 3766 SDValue Value; 3767 for (unsigned i = 0; i < NumElts; ++i) { 3768 SDValue V = Op.getOperand(i); 3769 if (V.getOpcode() == ISD::UNDEF) 3770 continue; 3771 if (i > 0) 3772 isOnlyLowElement = false; 3773 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 3774 isConstant = false; 3775 3776 ValueCounts.insert(std::make_pair(V, 0)); 3777 unsigned &Count = ValueCounts[V]; 3778 3779 // Is this value dominant? (takes up more than half of the lanes) 3780 if (++Count > (NumElts / 2)) { 3781 hasDominantValue = true; 3782 Value = V; 3783 } 3784 } 3785 if (ValueCounts.size() != 1) 3786 usesOnlyOneValue = false; 3787 if (!Value.getNode() && ValueCounts.size() > 0) 3788 Value = ValueCounts.begin()->first; 3789 3790 if (ValueCounts.size() == 0) 3791 return DAG.getUNDEF(VT); 3792 3793 // Loads are better lowered with insert_vector_elt. 3794 // Keep going if we are hitting this case. 3795 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 3796 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 3797 3798 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3799 // Use VDUP for non-constant splats. 3800 if (hasDominantValue && EltSize <= 64) { 3801 if (!isConstant) { 3802 SDValue N; 3803 3804 // If we are DUPing a value that comes directly from a vector, we could 3805 // just use DUPLANE. We can only do this if the lane being extracted 3806 // is at a constant index, as the DUP from lane instructions only have 3807 // constant-index forms. 3808 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 3809 isa<ConstantSDNode>(Value->getOperand(1))) { 3810 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, 3811 Value->getOperand(0), Value->getOperand(1)); 3812 } else 3813 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 3814 3815 if (!usesOnlyOneValue) { 3816 // The dominant value was splatted as 'N', but we now have to insert 3817 // all differing elements. 3818 for (unsigned I = 0; I < NumElts; ++I) { 3819 if (Op.getOperand(I) == Value) 3820 continue; 3821 SmallVector<SDValue, 3> Ops; 3822 Ops.push_back(N); 3823 Ops.push_back(Op.getOperand(I)); 3824 Ops.push_back(DAG.getConstant(I, MVT::i32)); 3825 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3); 3826 } 3827 } 3828 return N; 3829 } 3830 if (usesOnlyOneValue && isConstant) { 3831 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 3832 } 3833 } 3834 // If all elements are constants and the case above didn't get hit, fall back 3835 // to the default expansion, which will generate a load from the constant 3836 // pool. 3837 if (isConstant) 3838 return SDValue(); 3839 3840 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 3841 // know the default expansion would otherwise fall back on something even 3842 // worse. For a vector with one or two non-undef values, that's 3843 // scalar_to_vector for the elements followed by a shuffle (provided the 3844 // shuffle is valid for the target) and materialization element by element 3845 // on the stack followed by a load for everything else. 3846 if (!isConstant && !usesOnlyOneValue) { 3847 SDValue Vec = DAG.getUNDEF(VT); 3848 for (unsigned i = 0 ; i < NumElts; ++i) { 3849 SDValue V = Op.getOperand(i); 3850 if (V.getOpcode() == ISD::UNDEF) 3851 continue; 3852 SDValue LaneIdx = DAG.getConstant(i, MVT::i32); 3853 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx); 3854 } 3855 return Vec; 3856 } 3857 return SDValue(); 3858} 3859 3860/// isREVMask - Check if a vector shuffle corresponds to a REV 3861/// instruction with the specified blocksize. (The order of the elements 3862/// within each block of the vector is reversed.) 3863static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 3864 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && 3865 "Only possible block sizes for REV are: 16, 32, 64"); 3866 3867 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 3868 if (EltSz == 64) 3869 return false; 3870 3871 unsigned NumElts = VT.getVectorNumElements(); 3872 unsigned BlockElts = M[0] + 1; 3873 // If the first shuffle index is UNDEF, be optimistic. 3874 if (M[0] < 0) 3875 BlockElts = BlockSize / EltSz; 3876 3877 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 3878 return false; 3879 3880 for (unsigned i = 0; i < NumElts; ++i) { 3881 if (M[i] < 0) 3882 continue; // ignore UNDEF indices 3883 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) 3884 return false; 3885 } 3886 3887 return true; 3888} 3889 3890SDValue 3891AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 3892 SelectionDAG &DAG) const { 3893 SDValue V1 = Op.getOperand(0); 3894 SDValue V2 = Op.getOperand(1); 3895 SDLoc dl(Op); 3896 EVT VT = Op.getValueType(); 3897 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 3898 3899 // Convert shuffles that are directly supported on NEON to target-specific 3900 // DAG nodes, instead of keeping them as shuffles and matching them again 3901 // during code selection. This is more efficient and avoids the possibility 3902 // of inconsistencies between legalization and selection. 3903 ArrayRef<int> ShuffleMask = SVN->getMask(); 3904 3905 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3906 if (EltSize > 64) 3907 return SDValue(); 3908 3909 if (isREVMask(ShuffleMask, VT, 64)) 3910 return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1); 3911 if (isREVMask(ShuffleMask, VT, 32)) 3912 return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1); 3913 if (isREVMask(ShuffleMask, VT, 16)) 3914 return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1); 3915 3916 // If the element of shuffle mask are all the same constant, we can 3917 // transform it into either NEON_VDUP or NEON_VDUPLANE 3918 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 3919 int Lane = SVN->getSplatIndex(); 3920 // If this is undef splat, generate it via "just" vdup, if possible. 3921 if (Lane == -1) Lane = 0; 3922 3923 // Test if V1 is a SCALAR_TO_VECTOR. 3924 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 3925 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0)); 3926 } 3927 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR. 3928 if (V1.getOpcode() == ISD::BUILD_VECTOR) { 3929 bool IsScalarToVector = true; 3930 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i) 3931 if (V1.getOperand(i).getOpcode() != ISD::UNDEF && 3932 i != (unsigned)Lane) { 3933 IsScalarToVector = false; 3934 break; 3935 } 3936 if (IsScalarToVector) 3937 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, 3938 V1.getOperand(Lane)); 3939 } 3940 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1, 3941 DAG.getConstant(Lane, MVT::i64)); 3942 } 3943 3944 int Length = ShuffleMask.size(); 3945 int V1EltNum = V1.getValueType().getVectorNumElements(); 3946 3947 // If the number of v1 elements is the same as the number of shuffle mask 3948 // element and the shuffle masks are sequential values, we can transform 3949 // it into NEON_VEXTRACT. 3950 if (V1EltNum == Length) { 3951 // Check if the shuffle mask is sequential. 3952 bool IsSequential = true; 3953 int CurMask = ShuffleMask[0]; 3954 for (int I = 0; I < Length; ++I) { 3955 if (ShuffleMask[I] != CurMask) { 3956 IsSequential = false; 3957 break; 3958 } 3959 CurMask++; 3960 } 3961 if (IsSequential) { 3962 assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect"); 3963 unsigned VecSize = EltSize * V1EltNum; 3964 unsigned Index = (EltSize/8) * ShuffleMask[0]; 3965 if (VecSize == 64 || VecSize == 128) 3966 return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2, 3967 DAG.getConstant(Index, MVT::i64)); 3968 } 3969 } 3970 3971 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert 3972 // by element from V2 to V1 . 3973 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a 3974 // better choice to be inserted than V1 as less insert needed, so we count 3975 // element to be inserted for both V1 and V2, and select less one as insert 3976 // target. 3977 3978 // Collect elements need to be inserted and their index. 3979 SmallVector<int, 8> NV1Elt; 3980 SmallVector<int, 8> N1Index; 3981 SmallVector<int, 8> NV2Elt; 3982 SmallVector<int, 8> N2Index; 3983 for (int I = 0; I != Length; ++I) { 3984 if (ShuffleMask[I] != I) { 3985 NV1Elt.push_back(ShuffleMask[I]); 3986 N1Index.push_back(I); 3987 } 3988 } 3989 for (int I = 0; I != Length; ++I) { 3990 if (ShuffleMask[I] != (I + V1EltNum)) { 3991 NV2Elt.push_back(ShuffleMask[I]); 3992 N2Index.push_back(I); 3993 } 3994 } 3995 3996 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2 3997 // will be inserted. 3998 SDValue InsV = V1; 3999 SmallVector<int, 8> InsMasks = NV1Elt; 4000 SmallVector<int, 8> InsIndex = N1Index; 4001 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) { 4002 if (NV1Elt.size() > NV2Elt.size()) { 4003 InsV = V2; 4004 InsMasks = NV2Elt; 4005 InsIndex = N2Index; 4006 } 4007 } else { 4008 InsV = DAG.getNode(ISD::UNDEF, dl, VT); 4009 } 4010 4011 for (int I = 0, E = InsMasks.size(); I != E; ++I) { 4012 SDValue ExtV = V1; 4013 int Mask = InsMasks[I]; 4014 if (Mask >= V1EltNum) { 4015 ExtV = V2; 4016 Mask -= V1EltNum; 4017 } 4018 // Any value type smaller than i32 is illegal in AArch64, and this lower 4019 // function is called after legalize pass, so we need to legalize 4020 // the result here. 4021 EVT EltVT; 4022 if (VT.getVectorElementType().isFloatingPoint()) 4023 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32; 4024 else 4025 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32; 4026 4027 ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV, 4028 DAG.getConstant(Mask, MVT::i64)); 4029 InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV, 4030 DAG.getConstant(InsIndex[I], MVT::i64)); 4031 } 4032 return InsV; 4033} 4034 4035AArch64TargetLowering::ConstraintType 4036AArch64TargetLowering::getConstraintType(const std::string &Constraint) const { 4037 if (Constraint.size() == 1) { 4038 switch (Constraint[0]) { 4039 default: break; 4040 case 'w': // An FP/SIMD vector register 4041 return C_RegisterClass; 4042 case 'I': // Constant that can be used with an ADD instruction 4043 case 'J': // Constant that can be used with a SUB instruction 4044 case 'K': // Constant that can be used with a 32-bit logical instruction 4045 case 'L': // Constant that can be used with a 64-bit logical instruction 4046 case 'M': // Constant that can be used as a 32-bit MOV immediate 4047 case 'N': // Constant that can be used as a 64-bit MOV immediate 4048 case 'Y': // Floating point constant zero 4049 case 'Z': // Integer constant zero 4050 return C_Other; 4051 case 'Q': // A memory reference with base register and no offset 4052 return C_Memory; 4053 case 'S': // A symbolic address 4054 return C_Other; 4055 } 4056 } 4057 4058 // FIXME: Ump, Utf, Usa, Ush 4059 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, 4060 // whatever they may be 4061 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be 4062 // Usa: An absolute symbolic address 4063 // Ush: The high part (bits 32:12) of a pc-relative symbolic address 4064 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa" 4065 && Constraint != "Ush" && "Unimplemented constraints"); 4066 4067 return TargetLowering::getConstraintType(Constraint); 4068} 4069 4070TargetLowering::ConstraintWeight 4071AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info, 4072 const char *Constraint) const { 4073 4074 llvm_unreachable("Constraint weight unimplemented"); 4075} 4076 4077void 4078AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4079 std::string &Constraint, 4080 std::vector<SDValue> &Ops, 4081 SelectionDAG &DAG) const { 4082 SDValue Result(0, 0); 4083 4084 // Only length 1 constraints are C_Other. 4085 if (Constraint.size() != 1) return; 4086 4087 // Only C_Other constraints get lowered like this. That means constants for us 4088 // so return early if there's no hope the constraint can be lowered. 4089 4090 switch(Constraint[0]) { 4091 default: break; 4092 case 'I': case 'J': case 'K': case 'L': 4093 case 'M': case 'N': case 'Z': { 4094 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4095 if (!C) 4096 return; 4097 4098 uint64_t CVal = C->getZExtValue(); 4099 uint32_t Bits; 4100 4101 switch (Constraint[0]) { 4102 default: 4103 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J' 4104 // is a peculiarly useless SUB constraint. 4105 llvm_unreachable("Unimplemented C_Other constraint"); 4106 case 'I': 4107 if (CVal <= 0xfff) 4108 break; 4109 return; 4110 case 'K': 4111 if (A64Imms::isLogicalImm(32, CVal, Bits)) 4112 break; 4113 return; 4114 case 'L': 4115 if (A64Imms::isLogicalImm(64, CVal, Bits)) 4116 break; 4117 return; 4118 case 'Z': 4119 if (CVal == 0) 4120 break; 4121 return; 4122 } 4123 4124 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 4125 break; 4126 } 4127 case 'S': { 4128 // An absolute symbolic address or label reference. 4129 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 4130 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4131 GA->getValueType(0)); 4132 } else if (const BlockAddressSDNode *BA 4133 = dyn_cast<BlockAddressSDNode>(Op)) { 4134 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(), 4135 BA->getValueType(0)); 4136 } else if (const ExternalSymbolSDNode *ES 4137 = dyn_cast<ExternalSymbolSDNode>(Op)) { 4138 Result = DAG.getTargetExternalSymbol(ES->getSymbol(), 4139 ES->getValueType(0)); 4140 } else 4141 return; 4142 break; 4143 } 4144 case 'Y': 4145 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 4146 if (CFP->isExactlyValue(0.0)) { 4147 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0)); 4148 break; 4149 } 4150 } 4151 return; 4152 } 4153 4154 if (Result.getNode()) { 4155 Ops.push_back(Result); 4156 return; 4157 } 4158 4159 // It's an unknown constraint for us. Let generic code have a go. 4160 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 4161} 4162 4163std::pair<unsigned, const TargetRegisterClass*> 4164AArch64TargetLowering::getRegForInlineAsmConstraint( 4165 const std::string &Constraint, 4166 MVT VT) const { 4167 if (Constraint.size() == 1) { 4168 switch (Constraint[0]) { 4169 case 'r': 4170 if (VT.getSizeInBits() <= 32) 4171 return std::make_pair(0U, &AArch64::GPR32RegClass); 4172 else if (VT == MVT::i64) 4173 return std::make_pair(0U, &AArch64::GPR64RegClass); 4174 break; 4175 case 'w': 4176 if (VT == MVT::f16) 4177 return std::make_pair(0U, &AArch64::FPR16RegClass); 4178 else if (VT == MVT::f32) 4179 return std::make_pair(0U, &AArch64::FPR32RegClass); 4180 else if (VT.getSizeInBits() == 64) 4181 return std::make_pair(0U, &AArch64::FPR64RegClass); 4182 else if (VT.getSizeInBits() == 128) 4183 return std::make_pair(0U, &AArch64::FPR128RegClass); 4184 break; 4185 } 4186 } 4187 4188 // Use the default implementation in TargetLowering to convert the register 4189 // constraint into a member of a register class. 4190 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 4191} 4192 4193/// Represent NEON load and store intrinsics as MemIntrinsicNodes. 4194/// The associated MachineMemOperands record the alignment specified 4195/// in the intrinsic calls. 4196bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 4197 const CallInst &I, 4198 unsigned Intrinsic) const { 4199 switch (Intrinsic) { 4200 case Intrinsic::arm_neon_vld1: 4201 case Intrinsic::arm_neon_vld2: 4202 case Intrinsic::arm_neon_vld3: 4203 case Intrinsic::arm_neon_vld4: 4204 case Intrinsic::aarch64_neon_vld1x2: 4205 case Intrinsic::aarch64_neon_vld1x3: 4206 case Intrinsic::aarch64_neon_vld1x4: { 4207 Info.opc = ISD::INTRINSIC_W_CHAIN; 4208 // Conservatively set memVT to the entire set of vectors loaded. 4209 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 4210 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4211 Info.ptrVal = I.getArgOperand(0); 4212 Info.offset = 0; 4213 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4214 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4215 Info.vol = false; // volatile loads with NEON intrinsics not supported 4216 Info.readMem = true; 4217 Info.writeMem = false; 4218 return true; 4219 } 4220 case Intrinsic::arm_neon_vst1: 4221 case Intrinsic::arm_neon_vst2: 4222 case Intrinsic::arm_neon_vst3: 4223 case Intrinsic::arm_neon_vst4: 4224 case Intrinsic::aarch64_neon_vst1x2: 4225 case Intrinsic::aarch64_neon_vst1x3: 4226 case Intrinsic::aarch64_neon_vst1x4: { 4227 Info.opc = ISD::INTRINSIC_VOID; 4228 // Conservatively set memVT to the entire set of vectors stored. 4229 unsigned NumElts = 0; 4230 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 4231 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 4232 if (!ArgTy->isVectorTy()) 4233 break; 4234 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 4235 } 4236 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4237 Info.ptrVal = I.getArgOperand(0); 4238 Info.offset = 0; 4239 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4240 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4241 Info.vol = false; // volatile stores with NEON intrinsics not supported 4242 Info.readMem = false; 4243 Info.writeMem = true; 4244 return true; 4245 } 4246 default: 4247 break; 4248 } 4249 4250 return false; 4251} 4252