AArch64ISelLowering.cpp revision 863c7b48a6672f7074b2e69683fe4259c8c31bd7
1//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that AArch64 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "aarch64-isel" 16#include "AArch64.h" 17#include "AArch64ISelLowering.h" 18#include "AArch64MachineFunctionInfo.h" 19#include "AArch64TargetMachine.h" 20#include "AArch64TargetObjectFile.h" 21#include "Utils/AArch64BaseInfo.h" 22#include "llvm/CodeGen/Analysis.h" 23#include "llvm/CodeGen/CallingConvLower.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28#include "llvm/IR/CallingConv.h" 29 30using namespace llvm; 31 32static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) { 33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 34 35 if (Subtarget->isTargetLinux()) 36 return new AArch64LinuxTargetObjectFile(); 37 if (Subtarget->isTargetELF()) 38 return new TargetLoweringObjectFileELF(); 39 llvm_unreachable("unknown subtarget type"); 40} 41 42AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) 43 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) { 44 45 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 46 47 // SIMD compares set the entire lane's bits to 1 48 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 49 50 // Scalar register <-> type mapping 51 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass); 52 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass); 53 54 if (Subtarget->hasFPARMv8()) { 55 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); 56 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); 57 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); 58 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); 59 } 60 61 if (Subtarget->hasNEON()) { 62 // And the vectors 63 addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass); 64 addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass); 65 addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass); 66 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 67 addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass); 68 addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass); 69 addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass); 70 addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass); 71 addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass); 72 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 73 addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass); 74 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass); 75 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass); 76 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass); 77 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass); 78 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass); 79 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass); 80 } 81 82 computeRegisterProperties(); 83 84 // We combine OR nodes for bitfield and NEON BSL operations. 85 setTargetDAGCombine(ISD::OR); 86 87 setTargetDAGCombine(ISD::AND); 88 setTargetDAGCombine(ISD::SRA); 89 setTargetDAGCombine(ISD::SRL); 90 setTargetDAGCombine(ISD::SHL); 91 92 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 93 setTargetDAGCombine(ISD::INTRINSIC_VOID); 94 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 95 96 // AArch64 does not have i1 loads, or much of anything for i1 really. 97 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 98 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 99 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 100 101 setStackPointerRegisterToSaveRestore(AArch64::XSP); 102 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 103 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 104 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 105 106 // We'll lower globals to wrappers for selection. 107 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 108 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 109 110 // A64 instructions have the comparison predicate attached to the user of the 111 // result, but having a separate comparison is valuable for matching. 112 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 113 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 114 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 115 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 116 117 setOperationAction(ISD::SELECT, MVT::i32, Custom); 118 setOperationAction(ISD::SELECT, MVT::i64, Custom); 119 setOperationAction(ISD::SELECT, MVT::f32, Custom); 120 setOperationAction(ISD::SELECT, MVT::f64, Custom); 121 122 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 123 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 124 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 125 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 126 127 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 128 129 setOperationAction(ISD::SETCC, MVT::i32, Custom); 130 setOperationAction(ISD::SETCC, MVT::i64, Custom); 131 setOperationAction(ISD::SETCC, MVT::f32, Custom); 132 setOperationAction(ISD::SETCC, MVT::f64, Custom); 133 134 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 135 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 136 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 137 138 setOperationAction(ISD::VASTART, MVT::Other, Custom); 139 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 140 setOperationAction(ISD::VAEND, MVT::Other, Expand); 141 setOperationAction(ISD::VAARG, MVT::Other, Expand); 142 143 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 144 145 setOperationAction(ISD::ROTL, MVT::i32, Expand); 146 setOperationAction(ISD::ROTL, MVT::i64, Expand); 147 148 setOperationAction(ISD::UREM, MVT::i32, Expand); 149 setOperationAction(ISD::UREM, MVT::i64, Expand); 150 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 151 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 152 153 setOperationAction(ISD::SREM, MVT::i32, Expand); 154 setOperationAction(ISD::SREM, MVT::i64, Expand); 155 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 156 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 157 158 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 159 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 160 161 // Legal floating-point operations. 162 setOperationAction(ISD::FABS, MVT::f32, Legal); 163 setOperationAction(ISD::FABS, MVT::f64, Legal); 164 165 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 166 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 167 168 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 169 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 170 171 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 172 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 173 174 setOperationAction(ISD::FNEG, MVT::f32, Legal); 175 setOperationAction(ISD::FNEG, MVT::f64, Legal); 176 177 setOperationAction(ISD::FRINT, MVT::f32, Legal); 178 setOperationAction(ISD::FRINT, MVT::f64, Legal); 179 180 setOperationAction(ISD::FSQRT, MVT::f32, Legal); 181 setOperationAction(ISD::FSQRT, MVT::f64, Legal); 182 183 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 184 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 185 186 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 187 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 188 setOperationAction(ISD::ConstantFP, MVT::f128, Legal); 189 190 // Illegal floating-point operations. 191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 192 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 193 194 setOperationAction(ISD::FCOS, MVT::f32, Expand); 195 setOperationAction(ISD::FCOS, MVT::f64, Expand); 196 197 setOperationAction(ISD::FEXP, MVT::f32, Expand); 198 setOperationAction(ISD::FEXP, MVT::f64, Expand); 199 200 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 201 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 202 203 setOperationAction(ISD::FLOG, MVT::f32, Expand); 204 setOperationAction(ISD::FLOG, MVT::f64, Expand); 205 206 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 207 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 208 209 setOperationAction(ISD::FLOG10, MVT::f32, Expand); 210 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 211 212 setOperationAction(ISD::FPOW, MVT::f32, Expand); 213 setOperationAction(ISD::FPOW, MVT::f64, Expand); 214 215 setOperationAction(ISD::FPOWI, MVT::f32, Expand); 216 setOperationAction(ISD::FPOWI, MVT::f64, Expand); 217 218 setOperationAction(ISD::FREM, MVT::f32, Expand); 219 setOperationAction(ISD::FREM, MVT::f64, Expand); 220 221 setOperationAction(ISD::FSIN, MVT::f32, Expand); 222 setOperationAction(ISD::FSIN, MVT::f64, Expand); 223 224 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 225 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 226 227 // Virtually no operation on f128 is legal, but LLVM can't expand them when 228 // there's a valid register class, so we need custom operations in most cases. 229 setOperationAction(ISD::FABS, MVT::f128, Expand); 230 setOperationAction(ISD::FADD, MVT::f128, Custom); 231 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 232 setOperationAction(ISD::FCOS, MVT::f128, Expand); 233 setOperationAction(ISD::FDIV, MVT::f128, Custom); 234 setOperationAction(ISD::FMA, MVT::f128, Expand); 235 setOperationAction(ISD::FMUL, MVT::f128, Custom); 236 setOperationAction(ISD::FNEG, MVT::f128, Expand); 237 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); 238 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand); 239 setOperationAction(ISD::FPOW, MVT::f128, Expand); 240 setOperationAction(ISD::FREM, MVT::f128, Expand); 241 setOperationAction(ISD::FRINT, MVT::f128, Expand); 242 setOperationAction(ISD::FSIN, MVT::f128, Expand); 243 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 244 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 245 setOperationAction(ISD::FSUB, MVT::f128, Custom); 246 setOperationAction(ISD::FTRUNC, MVT::f128, Expand); 247 setOperationAction(ISD::SETCC, MVT::f128, Custom); 248 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 249 setOperationAction(ISD::SELECT, MVT::f128, Expand); 250 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 251 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 252 253 // Lowering for many of the conversions is actually specified by the non-f128 254 // type. The LowerXXX function will be trivial when f128 isn't involved. 255 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 256 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 257 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); 258 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 259 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 260 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); 261 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 262 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 263 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); 264 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 265 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 266 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); 267 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 268 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 269 270 // This prevents LLVM trying to compress double constants into a floating 271 // constant-pool entry and trying to load from there. It's of doubtful benefit 272 // for A64: we'd need LDR followed by FCVT, I believe. 273 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); 274 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 275 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand); 276 277 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 278 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 279 setTruncStoreAction(MVT::f128, MVT::f16, Expand); 280 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 281 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 282 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 283 284 setExceptionPointerRegister(AArch64::X0); 285 setExceptionSelectorRegister(AArch64::X1); 286 287 if (Subtarget->hasNEON()) { 288 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom); 289 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 290 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 291 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom); 292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 293 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 294 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom); 295 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 296 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 297 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 298 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 299 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom); 300 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom); 301 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 302 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom); 303 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 304 305 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 306 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom); 309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom); 311 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 312 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 313 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom); 314 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 315 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom); 316 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 317 318 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal); 319 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 320 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 321 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 322 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 323 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 324 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 325 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal); 326 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal); 327 328 setOperationAction(ISD::SETCC, MVT::v8i8, Custom); 329 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 330 setOperationAction(ISD::SETCC, MVT::v4i16, Custom); 331 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 332 setOperationAction(ISD::SETCC, MVT::v2i32, Custom); 333 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 334 setOperationAction(ISD::SETCC, MVT::v1i64, Custom); 335 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 336 setOperationAction(ISD::SETCC, MVT::v1f32, Custom); 337 setOperationAction(ISD::SETCC, MVT::v2f32, Custom); 338 setOperationAction(ISD::SETCC, MVT::v4f32, Custom); 339 setOperationAction(ISD::SETCC, MVT::v1f64, Custom); 340 setOperationAction(ISD::SETCC, MVT::v2f64, Custom); 341 342 setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal); 343 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 344 setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal); 345 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 346 347 setOperationAction(ISD::FCEIL, MVT::v2f32, Legal); 348 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 349 setOperationAction(ISD::FCEIL, MVT::v1f64, Legal); 350 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 351 352 setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal); 353 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 354 setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal); 355 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 356 357 setOperationAction(ISD::FRINT, MVT::v2f32, Legal); 358 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 359 setOperationAction(ISD::FRINT, MVT::v1f64, Legal); 360 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 361 362 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal); 363 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 364 setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal); 365 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 366 367 setOperationAction(ISD::FROUND, MVT::v2f32, Legal); 368 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 369 setOperationAction(ISD::FROUND, MVT::v1f64, Legal); 370 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 371 } 372} 373 374EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 375 // It's reasonably important that this value matches the "natural" legal 376 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself 377 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64). 378 if (!VT.isVector()) return MVT::i32; 379 return VT.changeVectorElementTypeToInteger(); 380} 381 382static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, 383 unsigned &LdrOpc, 384 unsigned &StrOpc) { 385 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, 386 AArch64::LDXR_word, AArch64::LDXR_dword}; 387 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, 388 AArch64::LDAXR_word, AArch64::LDAXR_dword}; 389 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, 390 AArch64::STXR_word, AArch64::STXR_dword}; 391 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword, 392 AArch64::STLXR_word, AArch64::STLXR_dword}; 393 394 const unsigned *LoadOps, *StoreOps; 395 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) 396 LoadOps = LoadAcqs; 397 else 398 LoadOps = LoadBares; 399 400 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent) 401 StoreOps = StoreRels; 402 else 403 StoreOps = StoreBares; 404 405 assert(isPowerOf2_32(Size) && Size <= 8 && 406 "unsupported size for atomic binary op!"); 407 408 LdrOpc = LoadOps[Log2_32(Size)]; 409 StrOpc = StoreOps[Log2_32(Size)]; 410} 411 412// FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really 413// have value type mapped, and they are both being defined as MVT::untyped. 414// Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost 415// would fail to figure out the register pressure correctly. 416std::pair<const TargetRegisterClass*, uint8_t> 417AArch64TargetLowering::findRepresentativeClass(MVT VT) const{ 418 const TargetRegisterClass *RRC = 0; 419 uint8_t Cost = 1; 420 switch (VT.SimpleTy) { 421 default: 422 return TargetLowering::findRepresentativeClass(VT); 423 case MVT::v4i64: 424 RRC = &AArch64::QPairRegClass; 425 Cost = 2; 426 break; 427 case MVT::v8i64: 428 RRC = &AArch64::QQuadRegClass; 429 Cost = 4; 430 break; 431 } 432 return std::make_pair(RRC, Cost); 433} 434 435MachineBasicBlock * 436AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 437 unsigned Size, 438 unsigned BinOpcode) const { 439 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 440 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 441 442 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 443 MachineFunction *MF = BB->getParent(); 444 MachineFunction::iterator It = BB; 445 ++It; 446 447 unsigned dest = MI->getOperand(0).getReg(); 448 unsigned ptr = MI->getOperand(1).getReg(); 449 unsigned incr = MI->getOperand(2).getReg(); 450 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 451 DebugLoc dl = MI->getDebugLoc(); 452 453 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 454 455 unsigned ldrOpc, strOpc; 456 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 457 458 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 459 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 460 MF->insert(It, loopMBB); 461 MF->insert(It, exitMBB); 462 463 // Transfer the remainder of BB and its successor edges to exitMBB. 464 exitMBB->splice(exitMBB->begin(), BB, 465 llvm::next(MachineBasicBlock::iterator(MI)), 466 BB->end()); 467 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 468 469 const TargetRegisterClass *TRC 470 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; 471 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 472 473 // thisMBB: 474 // ... 475 // fallthrough --> loopMBB 476 BB->addSuccessor(loopMBB); 477 478 // loopMBB: 479 // ldxr dest, ptr 480 // <binop> scratch, dest, incr 481 // stxr stxr_status, scratch, ptr 482 // cbnz stxr_status, loopMBB 483 // fallthrough --> exitMBB 484 BB = loopMBB; 485 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 486 if (BinOpcode) { 487 // All arithmetic operations we'll be creating are designed to take an extra 488 // shift or extend operand, which we can conveniently set to zero. 489 490 // Operand order needs to go the other way for NAND. 491 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl) 492 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 493 .addReg(incr).addReg(dest).addImm(0); 494 else 495 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 496 .addReg(dest).addReg(incr).addImm(0); 497 } 498 499 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp 500 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 501 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 502 503 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr); 504 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 505 .addReg(stxr_status).addMBB(loopMBB); 506 507 BB->addSuccessor(loopMBB); 508 BB->addSuccessor(exitMBB); 509 510 // exitMBB: 511 // ... 512 BB = exitMBB; 513 514 MI->eraseFromParent(); // The instruction is gone now. 515 516 return BB; 517} 518 519MachineBasicBlock * 520AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, 521 MachineBasicBlock *BB, 522 unsigned Size, 523 unsigned CmpOp, 524 A64CC::CondCodes Cond) const { 525 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 526 527 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 528 MachineFunction *MF = BB->getParent(); 529 MachineFunction::iterator It = BB; 530 ++It; 531 532 unsigned dest = MI->getOperand(0).getReg(); 533 unsigned ptr = MI->getOperand(1).getReg(); 534 unsigned incr = MI->getOperand(2).getReg(); 535 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 536 537 unsigned oldval = dest; 538 DebugLoc dl = MI->getDebugLoc(); 539 540 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 541 const TargetRegisterClass *TRC, *TRCsp; 542 if (Size == 8) { 543 TRC = &AArch64::GPR64RegClass; 544 TRCsp = &AArch64::GPR64xspRegClass; 545 } else { 546 TRC = &AArch64::GPR32RegClass; 547 TRCsp = &AArch64::GPR32wspRegClass; 548 } 549 550 unsigned ldrOpc, strOpc; 551 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 552 553 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 554 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 555 MF->insert(It, loopMBB); 556 MF->insert(It, exitMBB); 557 558 // Transfer the remainder of BB and its successor edges to exitMBB. 559 exitMBB->splice(exitMBB->begin(), BB, 560 llvm::next(MachineBasicBlock::iterator(MI)), 561 BB->end()); 562 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 563 564 unsigned scratch = MRI.createVirtualRegister(TRC); 565 MRI.constrainRegClass(scratch, TRCsp); 566 567 // thisMBB: 568 // ... 569 // fallthrough --> loopMBB 570 BB->addSuccessor(loopMBB); 571 572 // loopMBB: 573 // ldxr dest, ptr 574 // cmp incr, dest (, sign extend if necessary) 575 // csel scratch, dest, incr, cond 576 // stxr stxr_status, scratch, ptr 577 // cbnz stxr_status, loopMBB 578 // fallthrough --> exitMBB 579 BB = loopMBB; 580 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 581 582 // Build compare and cmov instructions. 583 MRI.constrainRegClass(incr, TRCsp); 584 BuildMI(BB, dl, TII->get(CmpOp)) 585 .addReg(incr).addReg(oldval).addImm(0); 586 587 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc), 588 scratch) 589 .addReg(oldval).addReg(incr).addImm(Cond); 590 591 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 592 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 593 594 BuildMI(BB, dl, TII->get(strOpc), stxr_status) 595 .addReg(scratch).addReg(ptr); 596 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 597 .addReg(stxr_status).addMBB(loopMBB); 598 599 BB->addSuccessor(loopMBB); 600 BB->addSuccessor(exitMBB); 601 602 // exitMBB: 603 // ... 604 BB = exitMBB; 605 606 MI->eraseFromParent(); // The instruction is gone now. 607 608 return BB; 609} 610 611MachineBasicBlock * 612AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, 613 MachineBasicBlock *BB, 614 unsigned Size) const { 615 unsigned dest = MI->getOperand(0).getReg(); 616 unsigned ptr = MI->getOperand(1).getReg(); 617 unsigned oldval = MI->getOperand(2).getReg(); 618 unsigned newval = MI->getOperand(3).getReg(); 619 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm()); 620 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 621 DebugLoc dl = MI->getDebugLoc(); 622 623 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 624 const TargetRegisterClass *TRCsp; 625 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass; 626 627 unsigned ldrOpc, strOpc; 628 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 629 630 MachineFunction *MF = BB->getParent(); 631 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 632 MachineFunction::iterator It = BB; 633 ++It; // insert the new blocks after the current block 634 635 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 636 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 637 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 638 MF->insert(It, loop1MBB); 639 MF->insert(It, loop2MBB); 640 MF->insert(It, exitMBB); 641 642 // Transfer the remainder of BB and its successor edges to exitMBB. 643 exitMBB->splice(exitMBB->begin(), BB, 644 llvm::next(MachineBasicBlock::iterator(MI)), 645 BB->end()); 646 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 647 648 // thisMBB: 649 // ... 650 // fallthrough --> loop1MBB 651 BB->addSuccessor(loop1MBB); 652 653 // loop1MBB: 654 // ldxr dest, [ptr] 655 // cmp dest, oldval 656 // b.ne exitMBB 657 BB = loop1MBB; 658 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 659 660 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl; 661 MRI.constrainRegClass(dest, TRCsp); 662 BuildMI(BB, dl, TII->get(CmpOp)) 663 .addReg(dest).addReg(oldval).addImm(0); 664 BuildMI(BB, dl, TII->get(AArch64::Bcc)) 665 .addImm(A64CC::NE).addMBB(exitMBB); 666 BB->addSuccessor(loop2MBB); 667 BB->addSuccessor(exitMBB); 668 669 // loop2MBB: 670 // strex stxr_status, newval, [ptr] 671 // cbnz stxr_status, loop1MBB 672 BB = loop2MBB; 673 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 674 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 675 676 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr); 677 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 678 .addReg(stxr_status).addMBB(loop1MBB); 679 BB->addSuccessor(loop1MBB); 680 BB->addSuccessor(exitMBB); 681 682 // exitMBB: 683 // ... 684 BB = exitMBB; 685 686 MI->eraseFromParent(); // The instruction is gone now. 687 688 return BB; 689} 690 691MachineBasicBlock * 692AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, 693 MachineBasicBlock *MBB) const { 694 // We materialise the F128CSEL pseudo-instruction using conditional branches 695 // and loads, giving an instruciton sequence like: 696 // str q0, [sp] 697 // b.ne IfTrue 698 // b Finish 699 // IfTrue: 700 // str q1, [sp] 701 // Finish: 702 // ldr q0, [sp] 703 // 704 // Using virtual registers would probably not be beneficial since COPY 705 // instructions are expensive for f128 (there's no actual instruction to 706 // implement them). 707 // 708 // An alternative would be to do an integer-CSEL on some address. E.g.: 709 // mov x0, sp 710 // add x1, sp, #16 711 // str q0, [x0] 712 // str q1, [x1] 713 // csel x0, x0, x1, ne 714 // ldr q0, [x0] 715 // 716 // It's unclear which approach is actually optimal. 717 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 718 MachineFunction *MF = MBB->getParent(); 719 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 720 DebugLoc DL = MI->getDebugLoc(); 721 MachineFunction::iterator It = MBB; 722 ++It; 723 724 unsigned DestReg = MI->getOperand(0).getReg(); 725 unsigned IfTrueReg = MI->getOperand(1).getReg(); 726 unsigned IfFalseReg = MI->getOperand(2).getReg(); 727 unsigned CondCode = MI->getOperand(3).getImm(); 728 bool NZCVKilled = MI->getOperand(4).isKill(); 729 730 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); 731 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); 732 MF->insert(It, TrueBB); 733 MF->insert(It, EndBB); 734 735 // Transfer rest of current basic-block to EndBB 736 EndBB->splice(EndBB->begin(), MBB, 737 llvm::next(MachineBasicBlock::iterator(MI)), 738 MBB->end()); 739 EndBB->transferSuccessorsAndUpdatePHIs(MBB); 740 741 // We need somewhere to store the f128 value needed. 742 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16); 743 744 // [... start of incoming MBB ...] 745 // str qIFFALSE, [sp] 746 // b.cc IfTrue 747 // b Done 748 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR)) 749 .addReg(IfFalseReg) 750 .addFrameIndex(ScratchFI) 751 .addImm(0); 752 BuildMI(MBB, DL, TII->get(AArch64::Bcc)) 753 .addImm(CondCode) 754 .addMBB(TrueBB); 755 BuildMI(MBB, DL, TII->get(AArch64::Bimm)) 756 .addMBB(EndBB); 757 MBB->addSuccessor(TrueBB); 758 MBB->addSuccessor(EndBB); 759 760 if (!NZCVKilled) { 761 // NZCV is live-through TrueBB. 762 TrueBB->addLiveIn(AArch64::NZCV); 763 EndBB->addLiveIn(AArch64::NZCV); 764 } 765 766 // IfTrue: 767 // str qIFTRUE, [sp] 768 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR)) 769 .addReg(IfTrueReg) 770 .addFrameIndex(ScratchFI) 771 .addImm(0); 772 773 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the 774 // blocks. 775 TrueBB->addSuccessor(EndBB); 776 777 // Done: 778 // ldr qDEST, [sp] 779 // [... rest of incoming MBB ...] 780 MachineInstr *StartOfEnd = EndBB->begin(); 781 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg) 782 .addFrameIndex(ScratchFI) 783 .addImm(0); 784 785 MI->eraseFromParent(); 786 return EndBB; 787} 788 789MachineBasicBlock * 790AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 791 MachineBasicBlock *MBB) const { 792 switch (MI->getOpcode()) { 793 default: llvm_unreachable("Unhandled instruction with custom inserter"); 794 case AArch64::F128CSEL: 795 return EmitF128CSEL(MI, MBB); 796 case AArch64::ATOMIC_LOAD_ADD_I8: 797 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl); 798 case AArch64::ATOMIC_LOAD_ADD_I16: 799 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl); 800 case AArch64::ATOMIC_LOAD_ADD_I32: 801 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl); 802 case AArch64::ATOMIC_LOAD_ADD_I64: 803 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl); 804 805 case AArch64::ATOMIC_LOAD_SUB_I8: 806 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl); 807 case AArch64::ATOMIC_LOAD_SUB_I16: 808 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl); 809 case AArch64::ATOMIC_LOAD_SUB_I32: 810 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl); 811 case AArch64::ATOMIC_LOAD_SUB_I64: 812 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl); 813 814 case AArch64::ATOMIC_LOAD_AND_I8: 815 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl); 816 case AArch64::ATOMIC_LOAD_AND_I16: 817 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl); 818 case AArch64::ATOMIC_LOAD_AND_I32: 819 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl); 820 case AArch64::ATOMIC_LOAD_AND_I64: 821 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl); 822 823 case AArch64::ATOMIC_LOAD_OR_I8: 824 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl); 825 case AArch64::ATOMIC_LOAD_OR_I16: 826 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl); 827 case AArch64::ATOMIC_LOAD_OR_I32: 828 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl); 829 case AArch64::ATOMIC_LOAD_OR_I64: 830 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl); 831 832 case AArch64::ATOMIC_LOAD_XOR_I8: 833 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl); 834 case AArch64::ATOMIC_LOAD_XOR_I16: 835 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl); 836 case AArch64::ATOMIC_LOAD_XOR_I32: 837 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl); 838 case AArch64::ATOMIC_LOAD_XOR_I64: 839 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl); 840 841 case AArch64::ATOMIC_LOAD_NAND_I8: 842 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl); 843 case AArch64::ATOMIC_LOAD_NAND_I16: 844 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl); 845 case AArch64::ATOMIC_LOAD_NAND_I32: 846 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl); 847 case AArch64::ATOMIC_LOAD_NAND_I64: 848 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl); 849 850 case AArch64::ATOMIC_LOAD_MIN_I8: 851 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT); 852 case AArch64::ATOMIC_LOAD_MIN_I16: 853 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT); 854 case AArch64::ATOMIC_LOAD_MIN_I32: 855 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT); 856 case AArch64::ATOMIC_LOAD_MIN_I64: 857 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT); 858 859 case AArch64::ATOMIC_LOAD_MAX_I8: 860 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT); 861 case AArch64::ATOMIC_LOAD_MAX_I16: 862 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT); 863 case AArch64::ATOMIC_LOAD_MAX_I32: 864 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT); 865 case AArch64::ATOMIC_LOAD_MAX_I64: 866 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT); 867 868 case AArch64::ATOMIC_LOAD_UMIN_I8: 869 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI); 870 case AArch64::ATOMIC_LOAD_UMIN_I16: 871 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI); 872 case AArch64::ATOMIC_LOAD_UMIN_I32: 873 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI); 874 case AArch64::ATOMIC_LOAD_UMIN_I64: 875 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI); 876 877 case AArch64::ATOMIC_LOAD_UMAX_I8: 878 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO); 879 case AArch64::ATOMIC_LOAD_UMAX_I16: 880 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO); 881 case AArch64::ATOMIC_LOAD_UMAX_I32: 882 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO); 883 case AArch64::ATOMIC_LOAD_UMAX_I64: 884 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO); 885 886 case AArch64::ATOMIC_SWAP_I8: 887 return emitAtomicBinary(MI, MBB, 1, 0); 888 case AArch64::ATOMIC_SWAP_I16: 889 return emitAtomicBinary(MI, MBB, 2, 0); 890 case AArch64::ATOMIC_SWAP_I32: 891 return emitAtomicBinary(MI, MBB, 4, 0); 892 case AArch64::ATOMIC_SWAP_I64: 893 return emitAtomicBinary(MI, MBB, 8, 0); 894 895 case AArch64::ATOMIC_CMP_SWAP_I8: 896 return emitAtomicCmpSwap(MI, MBB, 1); 897 case AArch64::ATOMIC_CMP_SWAP_I16: 898 return emitAtomicCmpSwap(MI, MBB, 2); 899 case AArch64::ATOMIC_CMP_SWAP_I32: 900 return emitAtomicCmpSwap(MI, MBB, 4); 901 case AArch64::ATOMIC_CMP_SWAP_I64: 902 return emitAtomicCmpSwap(MI, MBB, 8); 903 } 904} 905 906 907const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { 908 switch (Opcode) { 909 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC"; 910 case AArch64ISD::Call: return "AArch64ISD::Call"; 911 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV"; 912 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad"; 913 case AArch64ISD::BFI: return "AArch64ISD::BFI"; 914 case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; 915 case AArch64ISD::Ret: return "AArch64ISD::Ret"; 916 case AArch64ISD::SBFX: return "AArch64ISD::SBFX"; 917 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC"; 918 case AArch64ISD::SETCC: return "AArch64ISD::SETCC"; 919 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; 920 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; 921 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL"; 922 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; 923 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall"; 924 925 case AArch64ISD::NEON_BSL: 926 return "AArch64ISD::NEON_BSL"; 927 case AArch64ISD::NEON_MOVIMM: 928 return "AArch64ISD::NEON_MOVIMM"; 929 case AArch64ISD::NEON_MVNIMM: 930 return "AArch64ISD::NEON_MVNIMM"; 931 case AArch64ISD::NEON_FMOVIMM: 932 return "AArch64ISD::NEON_FMOVIMM"; 933 case AArch64ISD::NEON_CMP: 934 return "AArch64ISD::NEON_CMP"; 935 case AArch64ISD::NEON_CMPZ: 936 return "AArch64ISD::NEON_CMPZ"; 937 case AArch64ISD::NEON_TST: 938 return "AArch64ISD::NEON_TST"; 939 case AArch64ISD::NEON_QSHLs: 940 return "AArch64ISD::NEON_QSHLs"; 941 case AArch64ISD::NEON_QSHLu: 942 return "AArch64ISD::NEON_QSHLu"; 943 case AArch64ISD::NEON_VDUP: 944 return "AArch64ISD::NEON_VDUP"; 945 case AArch64ISD::NEON_VDUPLANE: 946 return "AArch64ISD::NEON_VDUPLANE"; 947 case AArch64ISD::NEON_REV16: 948 return "AArch64ISD::NEON_REV16"; 949 case AArch64ISD::NEON_REV32: 950 return "AArch64ISD::NEON_REV32"; 951 case AArch64ISD::NEON_REV64: 952 return "AArch64ISD::NEON_REV64"; 953 case AArch64ISD::NEON_UZP1: 954 return "AArch64ISD::NEON_UZP1"; 955 case AArch64ISD::NEON_UZP2: 956 return "AArch64ISD::NEON_UZP2"; 957 case AArch64ISD::NEON_ZIP1: 958 return "AArch64ISD::NEON_ZIP1"; 959 case AArch64ISD::NEON_ZIP2: 960 return "AArch64ISD::NEON_ZIP2"; 961 case AArch64ISD::NEON_TRN1: 962 return "AArch64ISD::NEON_TRN1"; 963 case AArch64ISD::NEON_TRN2: 964 return "AArch64ISD::NEON_TRN2"; 965 case AArch64ISD::NEON_LD1_UPD: 966 return "AArch64ISD::NEON_LD1_UPD"; 967 case AArch64ISD::NEON_LD2_UPD: 968 return "AArch64ISD::NEON_LD2_UPD"; 969 case AArch64ISD::NEON_LD3_UPD: 970 return "AArch64ISD::NEON_LD3_UPD"; 971 case AArch64ISD::NEON_LD4_UPD: 972 return "AArch64ISD::NEON_LD4_UPD"; 973 case AArch64ISD::NEON_ST1_UPD: 974 return "AArch64ISD::NEON_ST1_UPD"; 975 case AArch64ISD::NEON_ST2_UPD: 976 return "AArch64ISD::NEON_ST2_UPD"; 977 case AArch64ISD::NEON_ST3_UPD: 978 return "AArch64ISD::NEON_ST3_UPD"; 979 case AArch64ISD::NEON_ST4_UPD: 980 return "AArch64ISD::NEON_ST4_UPD"; 981 case AArch64ISD::NEON_LD1x2_UPD: 982 return "AArch64ISD::NEON_LD1x2_UPD"; 983 case AArch64ISD::NEON_LD1x3_UPD: 984 return "AArch64ISD::NEON_LD1x3_UPD"; 985 case AArch64ISD::NEON_LD1x4_UPD: 986 return "AArch64ISD::NEON_LD1x4_UPD"; 987 case AArch64ISD::NEON_ST1x2_UPD: 988 return "AArch64ISD::NEON_ST1x2_UPD"; 989 case AArch64ISD::NEON_ST1x3_UPD: 990 return "AArch64ISD::NEON_ST1x3_UPD"; 991 case AArch64ISD::NEON_ST1x4_UPD: 992 return "AArch64ISD::NEON_ST1x4_UPD"; 993 case AArch64ISD::NEON_LD2DUP: 994 return "AArch64ISD::NEON_LD2DUP"; 995 case AArch64ISD::NEON_LD3DUP: 996 return "AArch64ISD::NEON_LD3DUP"; 997 case AArch64ISD::NEON_LD4DUP: 998 return "AArch64ISD::NEON_LD4DUP"; 999 case AArch64ISD::NEON_LD2DUP_UPD: 1000 return "AArch64ISD::NEON_LD2DUP_UPD"; 1001 case AArch64ISD::NEON_LD3DUP_UPD: 1002 return "AArch64ISD::NEON_LD3DUP_UPD"; 1003 case AArch64ISD::NEON_LD4DUP_UPD: 1004 return "AArch64ISD::NEON_LD4DUP_UPD"; 1005 case AArch64ISD::NEON_LD2LN_UPD: 1006 return "AArch64ISD::NEON_LD2LN_UPD"; 1007 case AArch64ISD::NEON_LD3LN_UPD: 1008 return "AArch64ISD::NEON_LD3LN_UPD"; 1009 case AArch64ISD::NEON_LD4LN_UPD: 1010 return "AArch64ISD::NEON_LD4LN_UPD"; 1011 case AArch64ISD::NEON_ST2LN_UPD: 1012 return "AArch64ISD::NEON_ST2LN_UPD"; 1013 case AArch64ISD::NEON_ST3LN_UPD: 1014 return "AArch64ISD::NEON_ST3LN_UPD"; 1015 case AArch64ISD::NEON_ST4LN_UPD: 1016 return "AArch64ISD::NEON_ST4LN_UPD"; 1017 case AArch64ISD::NEON_VEXTRACT: 1018 return "AArch64ISD::NEON_VEXTRACT"; 1019 default: 1020 return NULL; 1021 } 1022} 1023 1024static const uint16_t AArch64FPRArgRegs[] = { 1025 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, 1026 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7 1027}; 1028static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs); 1029 1030static const uint16_t AArch64ArgRegs[] = { 1031 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, 1032 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7 1033}; 1034static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs); 1035 1036static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 1037 CCValAssign::LocInfo LocInfo, 1038 ISD::ArgFlagsTy ArgFlags, CCState &State) { 1039 // Mark all remaining general purpose registers as allocated. We don't 1040 // backtrack: if (for example) an i128 gets put on the stack, no subsequent 1041 // i64 will go in registers (C.11). 1042 for (unsigned i = 0; i < NumArgRegs; ++i) 1043 State.AllocateReg(AArch64ArgRegs[i]); 1044 1045 return false; 1046} 1047 1048#include "AArch64GenCallingConv.inc" 1049 1050CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const { 1051 1052 switch(CC) { 1053 default: llvm_unreachable("Unsupported calling convention"); 1054 case CallingConv::Fast: 1055 case CallingConv::C: 1056 return CC_A64_APCS; 1057 } 1058} 1059 1060void 1061AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, 1062 SDLoc DL, SDValue &Chain) const { 1063 MachineFunction &MF = DAG.getMachineFunction(); 1064 MachineFrameInfo *MFI = MF.getFrameInfo(); 1065 AArch64MachineFunctionInfo *FuncInfo 1066 = MF.getInfo<AArch64MachineFunctionInfo>(); 1067 1068 SmallVector<SDValue, 8> MemOps; 1069 1070 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs, 1071 NumArgRegs); 1072 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs, 1073 NumFPRArgRegs); 1074 1075 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR); 1076 int GPRIdx = 0; 1077 if (GPRSaveSize != 0) { 1078 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false); 1079 1080 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy()); 1081 1082 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) { 1083 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass); 1084 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 1085 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1086 MachinePointerInfo::getStack(i * 8), 1087 false, false, 0); 1088 MemOps.push_back(Store); 1089 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1090 DAG.getConstant(8, getPointerTy())); 1091 } 1092 } 1093 1094 if (getSubtarget()->hasFPARMv8()) { 1095 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); 1096 int FPRIdx = 0; 1097 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we 1098 // can omit a register save area if we know we'll never use registers of 1099 // that class. 1100 if (FPRSaveSize != 0) { 1101 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false); 1102 1103 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy()); 1104 1105 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { 1106 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i], 1107 &AArch64::FPR128RegClass); 1108 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); 1109 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1110 MachinePointerInfo::getStack(i * 16), 1111 false, false, 0); 1112 MemOps.push_back(Store); 1113 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1114 DAG.getConstant(16, getPointerTy())); 1115 } 1116 } 1117 FuncInfo->setVariadicFPRIdx(FPRIdx); 1118 FuncInfo->setVariadicFPRSize(FPRSaveSize); 1119 } 1120 1121 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true); 1122 1123 FuncInfo->setVariadicStackIdx(StackIdx); 1124 FuncInfo->setVariadicGPRIdx(GPRIdx); 1125 FuncInfo->setVariadicGPRSize(GPRSaveSize); 1126 1127 if (!MemOps.empty()) { 1128 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 1129 MemOps.size()); 1130 } 1131} 1132 1133 1134SDValue 1135AArch64TargetLowering::LowerFormalArguments(SDValue Chain, 1136 CallingConv::ID CallConv, bool isVarArg, 1137 const SmallVectorImpl<ISD::InputArg> &Ins, 1138 SDLoc dl, SelectionDAG &DAG, 1139 SmallVectorImpl<SDValue> &InVals) const { 1140 MachineFunction &MF = DAG.getMachineFunction(); 1141 AArch64MachineFunctionInfo *FuncInfo 1142 = MF.getInfo<AArch64MachineFunctionInfo>(); 1143 MachineFrameInfo *MFI = MF.getFrameInfo(); 1144 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1145 1146 SmallVector<CCValAssign, 16> ArgLocs; 1147 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1148 getTargetMachine(), ArgLocs, *DAG.getContext()); 1149 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv)); 1150 1151 SmallVector<SDValue, 16> ArgValues; 1152 1153 SDValue ArgValue; 1154 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1155 CCValAssign &VA = ArgLocs[i]; 1156 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1157 1158 if (Flags.isByVal()) { 1159 // Byval is used for small structs and HFAs in the PCS, but the system 1160 // should work in a non-compliant manner for larger structs. 1161 EVT PtrTy = getPointerTy(); 1162 int Size = Flags.getByValSize(); 1163 unsigned NumRegs = (Size + 7) / 8; 1164 1165 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs, 1166 VA.getLocMemOffset(), 1167 false); 1168 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy); 1169 InVals.push_back(FrameIdxN); 1170 1171 continue; 1172 } else if (VA.isRegLoc()) { 1173 MVT RegVT = VA.getLocVT(); 1174 const TargetRegisterClass *RC = getRegClassFor(RegVT); 1175 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1176 1177 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1178 } else { // VA.isRegLoc() 1179 assert(VA.isMemLoc()); 1180 1181 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 1182 VA.getLocMemOffset(), true); 1183 1184 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1185 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1186 MachinePointerInfo::getFixedStack(FI), 1187 false, false, false, 0); 1188 1189 1190 } 1191 1192 switch (VA.getLocInfo()) { 1193 default: llvm_unreachable("Unknown loc info!"); 1194 case CCValAssign::Full: break; 1195 case CCValAssign::BCvt: 1196 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue); 1197 break; 1198 case CCValAssign::SExt: 1199 case CCValAssign::ZExt: 1200 case CCValAssign::AExt: { 1201 unsigned DestSize = VA.getValVT().getSizeInBits(); 1202 unsigned DestSubReg; 1203 1204 switch (DestSize) { 1205 case 8: DestSubReg = AArch64::sub_8; break; 1206 case 16: DestSubReg = AArch64::sub_16; break; 1207 case 32: DestSubReg = AArch64::sub_32; break; 1208 case 64: DestSubReg = AArch64::sub_64; break; 1209 default: llvm_unreachable("Unexpected argument promotion"); 1210 } 1211 1212 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, 1213 VA.getValVT(), ArgValue, 1214 DAG.getTargetConstant(DestSubReg, MVT::i32)), 1215 0); 1216 break; 1217 } 1218 } 1219 1220 InVals.push_back(ArgValue); 1221 } 1222 1223 if (isVarArg) 1224 SaveVarArgRegisters(CCInfo, DAG, dl, Chain); 1225 1226 unsigned StackArgSize = CCInfo.getNextStackOffset(); 1227 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { 1228 // This is a non-standard ABI so by fiat I say we're allowed to make full 1229 // use of the stack area to be popped, which must be aligned to 16 bytes in 1230 // any case: 1231 StackArgSize = RoundUpToAlignment(StackArgSize, 16); 1232 1233 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding 1234 // a multiple of 16. 1235 FuncInfo->setArgumentStackToRestore(StackArgSize); 1236 1237 // This realignment carries over to the available bytes below. Our own 1238 // callers will guarantee the space is free by giving an aligned value to 1239 // CALLSEQ_START. 1240 } 1241 // Even if we're not expected to free up the space, it's useful to know how 1242 // much is there while considering tail calls (because we can reuse it). 1243 FuncInfo->setBytesInStackArgArea(StackArgSize); 1244 1245 return Chain; 1246} 1247 1248SDValue 1249AArch64TargetLowering::LowerReturn(SDValue Chain, 1250 CallingConv::ID CallConv, bool isVarArg, 1251 const SmallVectorImpl<ISD::OutputArg> &Outs, 1252 const SmallVectorImpl<SDValue> &OutVals, 1253 SDLoc dl, SelectionDAG &DAG) const { 1254 // CCValAssign - represent the assignment of the return value to a location. 1255 SmallVector<CCValAssign, 16> RVLocs; 1256 1257 // CCState - Info about the registers and stack slots. 1258 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1259 getTargetMachine(), RVLocs, *DAG.getContext()); 1260 1261 // Analyze outgoing return values. 1262 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv)); 1263 1264 SDValue Flag; 1265 SmallVector<SDValue, 4> RetOps(1, Chain); 1266 1267 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1268 // PCS: "If the type, T, of the result of a function is such that 1269 // void func(T arg) would require that arg be passed as a value in a 1270 // register (or set of registers) according to the rules in 5.4, then the 1271 // result is returned in the same registers as would be used for such an 1272 // argument. 1273 // 1274 // Otherwise, the caller shall reserve a block of memory of sufficient 1275 // size and alignment to hold the result. The address of the memory block 1276 // shall be passed as an additional argument to the function in x8." 1277 // 1278 // This is implemented in two places. The register-return values are dealt 1279 // with here, more complex returns are passed as an sret parameter, which 1280 // means we don't have to worry about it during actual return. 1281 CCValAssign &VA = RVLocs[i]; 1282 assert(VA.isRegLoc() && "Only register-returns should be created by PCS"); 1283 1284 1285 SDValue Arg = OutVals[i]; 1286 1287 // There's no convenient note in the ABI about this as there is for normal 1288 // arguments, but it says return values are passed in the same registers as 1289 // an argument would be. I believe that includes the comments about 1290 // unspecified higher bits, putting the burden of widening on the *caller* 1291 // for return values. 1292 switch (VA.getLocInfo()) { 1293 default: llvm_unreachable("Unknown loc info"); 1294 case CCValAssign::Full: break; 1295 case CCValAssign::SExt: 1296 case CCValAssign::ZExt: 1297 case CCValAssign::AExt: 1298 // Floating-point values should only be extended when they're going into 1299 // memory, which can't happen here so an integer extend is acceptable. 1300 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1301 break; 1302 case CCValAssign::BCvt: 1303 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1304 break; 1305 } 1306 1307 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1308 Flag = Chain.getValue(1); 1309 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1310 } 1311 1312 RetOps[0] = Chain; // Update chain. 1313 1314 // Add the flag if we have it. 1315 if (Flag.getNode()) 1316 RetOps.push_back(Flag); 1317 1318 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other, 1319 &RetOps[0], RetOps.size()); 1320} 1321 1322SDValue 1323AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, 1324 SmallVectorImpl<SDValue> &InVals) const { 1325 SelectionDAG &DAG = CLI.DAG; 1326 SDLoc &dl = CLI.DL; 1327 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1328 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1329 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1330 SDValue Chain = CLI.Chain; 1331 SDValue Callee = CLI.Callee; 1332 bool &IsTailCall = CLI.IsTailCall; 1333 CallingConv::ID CallConv = CLI.CallConv; 1334 bool IsVarArg = CLI.IsVarArg; 1335 1336 MachineFunction &MF = DAG.getMachineFunction(); 1337 AArch64MachineFunctionInfo *FuncInfo 1338 = MF.getInfo<AArch64MachineFunctionInfo>(); 1339 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1340 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet(); 1341 bool IsSibCall = false; 1342 1343 if (IsTailCall) { 1344 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1345 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1346 Outs, OutVals, Ins, DAG); 1347 1348 // A sibling call is one where we're under the usual C ABI and not planning 1349 // to change that but can still do a tail call: 1350 if (!TailCallOpt && IsTailCall) 1351 IsSibCall = true; 1352 } 1353 1354 SmallVector<CCValAssign, 16> ArgLocs; 1355 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1356 getTargetMachine(), ArgLocs, *DAG.getContext()); 1357 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv)); 1358 1359 // On AArch64 (and all other architectures I'm aware of) the most this has to 1360 // do is adjust the stack pointer. 1361 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16); 1362 if (IsSibCall) { 1363 // Since we're not changing the ABI to make this a tail call, the memory 1364 // operands are already available in the caller's incoming argument space. 1365 NumBytes = 0; 1366 } 1367 1368 // FPDiff is the byte offset of the call's argument area from the callee's. 1369 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1370 // by this amount for a tail call. In a sibling call it must be 0 because the 1371 // caller will deallocate the entire stack and the callee still expects its 1372 // arguments to begin at SP+0. Completely unused for non-tail calls. 1373 int FPDiff = 0; 1374 1375 if (IsTailCall && !IsSibCall) { 1376 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1377 1378 // FPDiff will be negative if this tail call requires more space than we 1379 // would automatically have in our incoming argument space. Positive if we 1380 // can actually shrink the stack. 1381 FPDiff = NumReusableBytes - NumBytes; 1382 1383 // The stack pointer must be 16-byte aligned at all times it's used for a 1384 // memory operation, which in practice means at *all* times and in 1385 // particular across call boundaries. Therefore our own arguments started at 1386 // a 16-byte aligned SP and the delta applied for the tail call should 1387 // satisfy the same constraint. 1388 assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); 1389 } 1390 1391 if (!IsSibCall) 1392 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 1393 dl); 1394 1395 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP, 1396 getPointerTy()); 1397 1398 SmallVector<SDValue, 8> MemOpChains; 1399 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1400 1401 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1402 CCValAssign &VA = ArgLocs[i]; 1403 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1404 SDValue Arg = OutVals[i]; 1405 1406 // Callee does the actual widening, so all extensions just use an implicit 1407 // definition of the rest of the Loc. Aesthetically, this would be nicer as 1408 // an ANY_EXTEND, but that isn't valid for floating-point types and this 1409 // alternative works on integer types too. 1410 switch (VA.getLocInfo()) { 1411 default: llvm_unreachable("Unknown loc info!"); 1412 case CCValAssign::Full: break; 1413 case CCValAssign::SExt: 1414 case CCValAssign::ZExt: 1415 case CCValAssign::AExt: { 1416 unsigned SrcSize = VA.getValVT().getSizeInBits(); 1417 unsigned SrcSubReg; 1418 1419 switch (SrcSize) { 1420 case 8: SrcSubReg = AArch64::sub_8; break; 1421 case 16: SrcSubReg = AArch64::sub_16; break; 1422 case 32: SrcSubReg = AArch64::sub_32; break; 1423 case 64: SrcSubReg = AArch64::sub_64; break; 1424 default: llvm_unreachable("Unexpected argument promotion"); 1425 } 1426 1427 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 1428 VA.getLocVT(), 1429 DAG.getUNDEF(VA.getLocVT()), 1430 Arg, 1431 DAG.getTargetConstant(SrcSubReg, MVT::i32)), 1432 0); 1433 1434 break; 1435 } 1436 case CCValAssign::BCvt: 1437 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1438 break; 1439 } 1440 1441 if (VA.isRegLoc()) { 1442 // A normal register (sub-) argument. For now we just note it down because 1443 // we want to copy things into registers as late as possible to avoid 1444 // register-pressure (and possibly worse). 1445 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1446 continue; 1447 } 1448 1449 assert(VA.isMemLoc() && "unexpected argument location"); 1450 1451 SDValue DstAddr; 1452 MachinePointerInfo DstInfo; 1453 if (IsTailCall) { 1454 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() : 1455 VA.getLocVT().getSizeInBits(); 1456 OpSize = (OpSize + 7) / 8; 1457 int32_t Offset = VA.getLocMemOffset() + FPDiff; 1458 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 1459 1460 DstAddr = DAG.getFrameIndex(FI, getPointerTy()); 1461 DstInfo = MachinePointerInfo::getFixedStack(FI); 1462 1463 // Make sure any stack arguments overlapping with where we're storing are 1464 // loaded before this eventual operation. Otherwise they'll be clobbered. 1465 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); 1466 } else { 1467 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1468 1469 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1470 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset()); 1471 } 1472 1473 if (Flags.isByVal()) { 1474 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64); 1475 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode, 1476 Flags.getByValAlign(), 1477 /*isVolatile = */ false, 1478 /*alwaysInline = */ false, 1479 DstInfo, MachinePointerInfo(0)); 1480 MemOpChains.push_back(Cpy); 1481 } else { 1482 // Normal stack argument, put it where it's needed. 1483 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo, 1484 false, false, 0); 1485 MemOpChains.push_back(Store); 1486 } 1487 } 1488 1489 // The loads and stores generated above shouldn't clash with each 1490 // other. Combining them with this TokenFactor notes that fact for the rest of 1491 // the backend. 1492 if (!MemOpChains.empty()) 1493 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1494 &MemOpChains[0], MemOpChains.size()); 1495 1496 // Most of the rest of the instructions need to be glued together; we don't 1497 // want assignments to actual registers used by a call to be rearranged by a 1498 // well-meaning scheduler. 1499 SDValue InFlag; 1500 1501 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1502 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1503 RegsToPass[i].second, InFlag); 1504 InFlag = Chain.getValue(1); 1505 } 1506 1507 // The linker is responsible for inserting veneers when necessary to put a 1508 // function call destination in range, so we don't need to bother with a 1509 // wrapper here. 1510 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1511 const GlobalValue *GV = G->getGlobal(); 1512 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy()); 1513 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1514 const char *Sym = S->getSymbol(); 1515 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 1516 } 1517 1518 // We don't usually want to end the call-sequence here because we would tidy 1519 // the frame up *after* the call, however in the ABI-changing tail-call case 1520 // we've carefully laid out the parameters so that when sp is reset they'll be 1521 // in the correct location. 1522 if (IsTailCall && !IsSibCall) { 1523 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1524 DAG.getIntPtrConstant(0, true), InFlag, dl); 1525 InFlag = Chain.getValue(1); 1526 } 1527 1528 // We produce the following DAG scheme for the actual call instruction: 1529 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag? 1530 // 1531 // Most arguments aren't going to be used and just keep the values live as 1532 // far as LLVM is concerned. It's expected to be selected as simply "bl 1533 // callee" (for a direct, non-tail call). 1534 std::vector<SDValue> Ops; 1535 Ops.push_back(Chain); 1536 Ops.push_back(Callee); 1537 1538 if (IsTailCall) { 1539 // Each tail call may have to adjust the stack by a different amount, so 1540 // this information must travel along with the operation for eventual 1541 // consumption by emitEpilogue. 1542 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32)); 1543 } 1544 1545 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1546 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1547 RegsToPass[i].second.getValueType())); 1548 1549 1550 // Add a register mask operand representing the call-preserved registers. This 1551 // is used later in codegen to constrain register-allocation. 1552 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1553 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 1554 assert(Mask && "Missing call preserved mask for calling convention"); 1555 Ops.push_back(DAG.getRegisterMask(Mask)); 1556 1557 // If we needed glue, put it in as the last argument. 1558 if (InFlag.getNode()) 1559 Ops.push_back(InFlag); 1560 1561 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1562 1563 if (IsTailCall) { 1564 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1565 } 1566 1567 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size()); 1568 InFlag = Chain.getValue(1); 1569 1570 // Now we can reclaim the stack, just as well do it before working out where 1571 // our return value is. 1572 if (!IsSibCall) { 1573 uint64_t CalleePopBytes 1574 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0; 1575 1576 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1577 DAG.getIntPtrConstant(CalleePopBytes, true), 1578 InFlag, dl); 1579 InFlag = Chain.getValue(1); 1580 } 1581 1582 return LowerCallResult(Chain, InFlag, CallConv, 1583 IsVarArg, Ins, dl, DAG, InVals); 1584} 1585 1586SDValue 1587AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1588 CallingConv::ID CallConv, bool IsVarArg, 1589 const SmallVectorImpl<ISD::InputArg> &Ins, 1590 SDLoc dl, SelectionDAG &DAG, 1591 SmallVectorImpl<SDValue> &InVals) const { 1592 // Assign locations to each value returned by this call. 1593 SmallVector<CCValAssign, 16> RVLocs; 1594 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1595 getTargetMachine(), RVLocs, *DAG.getContext()); 1596 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv)); 1597 1598 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1599 CCValAssign VA = RVLocs[i]; 1600 1601 // Return values that are too big to fit into registers should use an sret 1602 // pointer, so this can be a lot simpler than the main argument code. 1603 assert(VA.isRegLoc() && "Memory locations not expected for call return"); 1604 1605 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1606 InFlag); 1607 Chain = Val.getValue(1); 1608 InFlag = Val.getValue(2); 1609 1610 switch (VA.getLocInfo()) { 1611 default: llvm_unreachable("Unknown loc info!"); 1612 case CCValAssign::Full: break; 1613 case CCValAssign::BCvt: 1614 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1615 break; 1616 case CCValAssign::ZExt: 1617 case CCValAssign::SExt: 1618 case CCValAssign::AExt: 1619 // Floating-point arguments only get extended/truncated if they're going 1620 // in memory, so using the integer operation is acceptable here. 1621 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 1622 break; 1623 } 1624 1625 InVals.push_back(Val); 1626 } 1627 1628 return Chain; 1629} 1630 1631bool 1632AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1633 CallingConv::ID CalleeCC, 1634 bool IsVarArg, 1635 bool IsCalleeStructRet, 1636 bool IsCallerStructRet, 1637 const SmallVectorImpl<ISD::OutputArg> &Outs, 1638 const SmallVectorImpl<SDValue> &OutVals, 1639 const SmallVectorImpl<ISD::InputArg> &Ins, 1640 SelectionDAG& DAG) const { 1641 1642 // For CallingConv::C this function knows whether the ABI needs 1643 // changing. That's not true for other conventions so they will have to opt in 1644 // manually. 1645 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1646 return false; 1647 1648 const MachineFunction &MF = DAG.getMachineFunction(); 1649 const Function *CallerF = MF.getFunction(); 1650 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1651 bool CCMatch = CallerCC == CalleeCC; 1652 1653 // Byval parameters hand the function a pointer directly into the stack area 1654 // we want to reuse during a tail call. Working around this *is* possible (see 1655 // X86) but less efficient and uglier in LowerCall. 1656 for (Function::const_arg_iterator i = CallerF->arg_begin(), 1657 e = CallerF->arg_end(); i != e; ++i) 1658 if (i->hasByValAttr()) 1659 return false; 1660 1661 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 1662 if (IsTailCallConvention(CalleeCC) && CCMatch) 1663 return true; 1664 return false; 1665 } 1666 1667 // Now we search for cases where we can use a tail call without changing the 1668 // ABI. Sibcall is used in some places (particularly gcc) to refer to this 1669 // concept. 1670 1671 // I want anyone implementing a new calling convention to think long and hard 1672 // about this assert. 1673 assert((!IsVarArg || CalleeCC == CallingConv::C) 1674 && "Unexpected variadic calling convention"); 1675 1676 if (IsVarArg && !Outs.empty()) { 1677 // At least two cases here: if caller is fastcc then we can't have any 1678 // memory arguments (we'd be expected to clean up the stack afterwards). If 1679 // caller is C then we could potentially use its argument area. 1680 1681 // FIXME: for now we take the most conservative of these in both cases: 1682 // disallow all variadic memory operands. 1683 SmallVector<CCValAssign, 16> ArgLocs; 1684 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1685 getTargetMachine(), ArgLocs, *DAG.getContext()); 1686 1687 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1688 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 1689 if (!ArgLocs[i].isRegLoc()) 1690 return false; 1691 } 1692 1693 // If the calling conventions do not match, then we'd better make sure the 1694 // results are returned in the same way as what the caller expects. 1695 if (!CCMatch) { 1696 SmallVector<CCValAssign, 16> RVLocs1; 1697 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1698 getTargetMachine(), RVLocs1, *DAG.getContext()); 1699 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC)); 1700 1701 SmallVector<CCValAssign, 16> RVLocs2; 1702 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1703 getTargetMachine(), RVLocs2, *DAG.getContext()); 1704 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC)); 1705 1706 if (RVLocs1.size() != RVLocs2.size()) 1707 return false; 1708 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1709 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1710 return false; 1711 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1712 return false; 1713 if (RVLocs1[i].isRegLoc()) { 1714 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1715 return false; 1716 } else { 1717 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1718 return false; 1719 } 1720 } 1721 } 1722 1723 // Nothing more to check if the callee is taking no arguments 1724 if (Outs.empty()) 1725 return true; 1726 1727 SmallVector<CCValAssign, 16> ArgLocs; 1728 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1729 getTargetMachine(), ArgLocs, *DAG.getContext()); 1730 1731 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1732 1733 const AArch64MachineFunctionInfo *FuncInfo 1734 = MF.getInfo<AArch64MachineFunctionInfo>(); 1735 1736 // If the stack arguments for this call would fit into our own save area then 1737 // the call can be made tail. 1738 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea(); 1739} 1740 1741bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, 1742 bool TailCallOpt) const { 1743 return CallCC == CallingConv::Fast && TailCallOpt; 1744} 1745 1746bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const { 1747 return CallCC == CallingConv::Fast; 1748} 1749 1750SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, 1751 SelectionDAG &DAG, 1752 MachineFrameInfo *MFI, 1753 int ClobberedFI) const { 1754 SmallVector<SDValue, 8> ArgChains; 1755 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI); 1756 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1; 1757 1758 // Include the original chain at the beginning of the list. When this is 1759 // used by target LowerCall hooks, this helps legalize find the 1760 // CALLSEQ_BEGIN node. 1761 ArgChains.push_back(Chain); 1762 1763 // Add a chain value for each stack argument corresponding 1764 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1765 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U) 1766 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 1767 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 1768 if (FI->getIndex() < 0) { 1769 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex()); 1770 int64_t InLastByte = InFirstByte; 1771 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1; 1772 1773 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1774 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1775 ArgChains.push_back(SDValue(L, 1)); 1776 } 1777 1778 // Build a tokenfactor for all the chains. 1779 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, 1780 &ArgChains[0], ArgChains.size()); 1781} 1782 1783static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) { 1784 switch (CC) { 1785 case ISD::SETEQ: return A64CC::EQ; 1786 case ISD::SETGT: return A64CC::GT; 1787 case ISD::SETGE: return A64CC::GE; 1788 case ISD::SETLT: return A64CC::LT; 1789 case ISD::SETLE: return A64CC::LE; 1790 case ISD::SETNE: return A64CC::NE; 1791 case ISD::SETUGT: return A64CC::HI; 1792 case ISD::SETUGE: return A64CC::HS; 1793 case ISD::SETULT: return A64CC::LO; 1794 case ISD::SETULE: return A64CC::LS; 1795 default: llvm_unreachable("Unexpected condition code"); 1796 } 1797} 1798 1799bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const { 1800 // icmp is implemented using adds/subs immediate, which take an unsigned 1801 // 12-bit immediate, optionally shifted left by 12 bits. 1802 1803 // Symmetric by using adds/subs 1804 if (Val < 0) 1805 Val = -Val; 1806 1807 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0; 1808} 1809 1810SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS, 1811 ISD::CondCode CC, SDValue &A64cc, 1812 SelectionDAG &DAG, SDLoc &dl) const { 1813 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 1814 int64_t C = 0; 1815 EVT VT = RHSC->getValueType(0); 1816 bool knownInvalid = false; 1817 1818 // I'm not convinced the rest of LLVM handles these edge cases properly, but 1819 // we can at least get it right. 1820 if (isSignedIntSetCC(CC)) { 1821 C = RHSC->getSExtValue(); 1822 } else if (RHSC->getZExtValue() > INT64_MAX) { 1823 // A 64-bit constant not representable by a signed 64-bit integer is far 1824 // too big to fit into a SUBS immediate anyway. 1825 knownInvalid = true; 1826 } else { 1827 C = RHSC->getZExtValue(); 1828 } 1829 1830 if (!knownInvalid && !isLegalICmpImmediate(C)) { 1831 // Constant does not fit, try adjusting it by one? 1832 switch (CC) { 1833 default: break; 1834 case ISD::SETLT: 1835 case ISD::SETGE: 1836 if (isLegalICmpImmediate(C-1)) { 1837 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 1838 RHS = DAG.getConstant(C-1, VT); 1839 } 1840 break; 1841 case ISD::SETULT: 1842 case ISD::SETUGE: 1843 if (isLegalICmpImmediate(C-1)) { 1844 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 1845 RHS = DAG.getConstant(C-1, VT); 1846 } 1847 break; 1848 case ISD::SETLE: 1849 case ISD::SETGT: 1850 if (isLegalICmpImmediate(C+1)) { 1851 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 1852 RHS = DAG.getConstant(C+1, VT); 1853 } 1854 break; 1855 case ISD::SETULE: 1856 case ISD::SETUGT: 1857 if (isLegalICmpImmediate(C+1)) { 1858 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 1859 RHS = DAG.getConstant(C+1, VT); 1860 } 1861 break; 1862 } 1863 } 1864 } 1865 1866 A64CC::CondCodes CondCode = IntCCToA64CC(CC); 1867 A64cc = DAG.getConstant(CondCode, MVT::i32); 1868 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1869 DAG.getCondCode(CC)); 1870} 1871 1872static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC, 1873 A64CC::CondCodes &Alternative) { 1874 A64CC::CondCodes CondCode = A64CC::Invalid; 1875 Alternative = A64CC::Invalid; 1876 1877 switch (CC) { 1878 default: llvm_unreachable("Unknown FP condition!"); 1879 case ISD::SETEQ: 1880 case ISD::SETOEQ: CondCode = A64CC::EQ; break; 1881 case ISD::SETGT: 1882 case ISD::SETOGT: CondCode = A64CC::GT; break; 1883 case ISD::SETGE: 1884 case ISD::SETOGE: CondCode = A64CC::GE; break; 1885 case ISD::SETOLT: CondCode = A64CC::MI; break; 1886 case ISD::SETOLE: CondCode = A64CC::LS; break; 1887 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break; 1888 case ISD::SETO: CondCode = A64CC::VC; break; 1889 case ISD::SETUO: CondCode = A64CC::VS; break; 1890 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break; 1891 case ISD::SETUGT: CondCode = A64CC::HI; break; 1892 case ISD::SETUGE: CondCode = A64CC::PL; break; 1893 case ISD::SETLT: 1894 case ISD::SETULT: CondCode = A64CC::LT; break; 1895 case ISD::SETLE: 1896 case ISD::SETULE: CondCode = A64CC::LE; break; 1897 case ISD::SETNE: 1898 case ISD::SETUNE: CondCode = A64CC::NE; break; 1899 } 1900 return CondCode; 1901} 1902 1903SDValue 1904AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 1905 SDLoc DL(Op); 1906 EVT PtrVT = getPointerTy(); 1907 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1908 1909 switch(getTargetMachine().getCodeModel()) { 1910 case CodeModel::Small: 1911 // The most efficient code is PC-relative anyway for the small memory model, 1912 // so we don't need to worry about relocation model. 1913 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 1914 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1915 AArch64II::MO_NO_FLAG), 1916 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1917 AArch64II::MO_LO12), 1918 DAG.getConstant(/*Alignment=*/ 4, MVT::i32)); 1919 case CodeModel::Large: 1920 return DAG.getNode( 1921 AArch64ISD::WrapperLarge, DL, PtrVT, 1922 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3), 1923 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 1924 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 1925 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 1926 default: 1927 llvm_unreachable("Only small and large code models supported now"); 1928 } 1929} 1930 1931 1932// (BRCOND chain, val, dest) 1933SDValue 1934AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 1935 SDLoc dl(Op); 1936 SDValue Chain = Op.getOperand(0); 1937 SDValue TheBit = Op.getOperand(1); 1938 SDValue DestBB = Op.getOperand(2); 1939 1940 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 1941 // that as the consumer we are responsible for ignoring rubbish in higher 1942 // bits. 1943 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 1944 DAG.getConstant(1, MVT::i32)); 1945 1946 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 1947 DAG.getConstant(0, TheBit.getValueType()), 1948 DAG.getCondCode(ISD::SETNE)); 1949 1950 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain, 1951 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32), 1952 DestBB); 1953} 1954 1955// (BR_CC chain, condcode, lhs, rhs, dest) 1956SDValue 1957AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1958 SDLoc dl(Op); 1959 SDValue Chain = Op.getOperand(0); 1960 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1961 SDValue LHS = Op.getOperand(2); 1962 SDValue RHS = Op.getOperand(3); 1963 SDValue DestBB = Op.getOperand(4); 1964 1965 if (LHS.getValueType() == MVT::f128) { 1966 // f128 comparisons are lowered to runtime calls by a routine which sets 1967 // LHS, RHS and CC appropriately for the rest of this function to continue. 1968 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 1969 1970 // If softenSetCCOperands returned a scalar, we need to compare the result 1971 // against zero to select between true and false values. 1972 if (RHS.getNode() == 0) { 1973 RHS = DAG.getConstant(0, LHS.getValueType()); 1974 CC = ISD::SETNE; 1975 } 1976 } 1977 1978 if (LHS.getValueType().isInteger()) { 1979 SDValue A64cc; 1980 1981 // Integers are handled in a separate function because the combinations of 1982 // immediates and tests can get hairy and we may want to fiddle things. 1983 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 1984 1985 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1986 Chain, CmpOp, A64cc, DestBB); 1987 } 1988 1989 // Note that some LLVM floating-point CondCodes can't be lowered to a single 1990 // conditional branch, hence FPCCToA64CC can set a second test, where either 1991 // passing is sufficient. 1992 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 1993 CondCode = FPCCToA64CC(CC, Alternative); 1994 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 1995 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1996 DAG.getCondCode(CC)); 1997 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1998 Chain, SetCC, A64cc, DestBB); 1999 2000 if (Alternative != A64CC::Invalid) { 2001 A64cc = DAG.getConstant(Alternative, MVT::i32); 2002 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 2003 A64BR_CC, SetCC, A64cc, DestBB); 2004 2005 } 2006 2007 return A64BR_CC; 2008} 2009 2010SDValue 2011AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG, 2012 RTLIB::Libcall Call) const { 2013 ArgListTy Args; 2014 ArgListEntry Entry; 2015 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { 2016 EVT ArgVT = Op.getOperand(i).getValueType(); 2017 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2018 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy; 2019 Entry.isSExt = false; 2020 Entry.isZExt = false; 2021 Args.push_back(Entry); 2022 } 2023 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy()); 2024 2025 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 2026 2027 // By default, the input chain to this libcall is the entry node of the 2028 // function. If the libcall is going to be emitted as a tail call then 2029 // isUsedByReturnOnly will change it to the right chain if the return 2030 // node which is being folded has a non-entry input chain. 2031 SDValue InChain = DAG.getEntryNode(); 2032 2033 // isTailCall may be true since the callee does not reference caller stack 2034 // frame. Check if it's in the right position. 2035 SDValue TCChain = InChain; 2036 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain); 2037 if (isTailCall) 2038 InChain = TCChain; 2039 2040 TargetLowering:: 2041 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false, 2042 0, getLibcallCallingConv(Call), isTailCall, 2043 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2044 Callee, Args, DAG, SDLoc(Op)); 2045 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2046 2047 if (!CallInfo.second.getNode()) 2048 // It's a tailcall, return the chain (which is the DAG root). 2049 return DAG.getRoot(); 2050 2051 return CallInfo.first; 2052} 2053 2054SDValue 2055AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 2056 if (Op.getOperand(0).getValueType() != MVT::f128) { 2057 // It's legal except when f128 is involved 2058 return Op; 2059 } 2060 2061 RTLIB::Libcall LC; 2062 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 2063 2064 SDValue SrcVal = Op.getOperand(0); 2065 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1, 2066 /*isSigned*/ false, SDLoc(Op)).first; 2067} 2068 2069SDValue 2070AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 2071 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); 2072 2073 RTLIB::Libcall LC; 2074 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 2075 2076 return LowerF128ToCall(Op, DAG, LC); 2077} 2078 2079SDValue 2080AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 2081 bool IsSigned) const { 2082 if (Op.getOperand(0).getValueType() != MVT::f128) { 2083 // It's legal except when f128 is involved 2084 return Op; 2085 } 2086 2087 RTLIB::Libcall LC; 2088 if (IsSigned) 2089 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2090 else 2091 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2092 2093 return LowerF128ToCall(Op, DAG, LC); 2094} 2095 2096SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2097 MachineFunction &MF = DAG.getMachineFunction(); 2098 MachineFrameInfo *MFI = MF.getFrameInfo(); 2099 MFI->setReturnAddressIsTaken(true); 2100 2101 EVT VT = Op.getValueType(); 2102 SDLoc dl(Op); 2103 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2104 if (Depth) { 2105 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2106 SDValue Offset = DAG.getConstant(8, MVT::i64); 2107 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2108 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2109 MachinePointerInfo(), false, false, false, 0); 2110 } 2111 2112 // Return X30, which contains the return address. Mark it an implicit live-in. 2113 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64)); 2114 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64); 2115} 2116 2117 2118SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) 2119 const { 2120 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2121 MFI->setFrameAddressIsTaken(true); 2122 2123 EVT VT = Op.getValueType(); 2124 SDLoc dl(Op); 2125 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2126 unsigned FrameReg = AArch64::X29; 2127 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2128 while (Depth--) 2129 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2130 MachinePointerInfo(), 2131 false, false, false, 0); 2132 return FrameAddr; 2133} 2134 2135SDValue 2136AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op, 2137 SelectionDAG &DAG) const { 2138 assert(getTargetMachine().getCodeModel() == CodeModel::Large); 2139 assert(getTargetMachine().getRelocationModel() == Reloc::Static); 2140 2141 EVT PtrVT = getPointerTy(); 2142 SDLoc dl(Op); 2143 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2144 const GlobalValue *GV = GN->getGlobal(); 2145 2146 SDValue GlobalAddr = DAG.getNode( 2147 AArch64ISD::WrapperLarge, dl, PtrVT, 2148 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3), 2149 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 2150 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 2151 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 2152 2153 if (GN->getOffset() != 0) 2154 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2155 DAG.getConstant(GN->getOffset(), PtrVT)); 2156 2157 return GlobalAddr; 2158} 2159 2160SDValue 2161AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op, 2162 SelectionDAG &DAG) const { 2163 assert(getTargetMachine().getCodeModel() == CodeModel::Small); 2164 2165 EVT PtrVT = getPointerTy(); 2166 SDLoc dl(Op); 2167 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2168 const GlobalValue *GV = GN->getGlobal(); 2169 unsigned Alignment = GV->getAlignment(); 2170 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2171 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) { 2172 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate 2173 // to zero when they remain undefined. In PIC mode the GOT can take care of 2174 // this, but in absolute mode we use a constant pool load. 2175 SDValue PoolAddr; 2176 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2177 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2178 AArch64II::MO_NO_FLAG), 2179 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2180 AArch64II::MO_LO12), 2181 DAG.getConstant(8, MVT::i32)); 2182 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr, 2183 MachinePointerInfo::getConstantPool(), 2184 /*isVolatile=*/ false, 2185 /*isNonTemporal=*/ true, 2186 /*isInvariant=*/ true, 8); 2187 if (GN->getOffset() != 0) 2188 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2189 DAG.getConstant(GN->getOffset(), PtrVT)); 2190 2191 return GlobalAddr; 2192 } 2193 2194 if (Alignment == 0) { 2195 const PointerType *GVPtrTy = cast<PointerType>(GV->getType()); 2196 if (GVPtrTy->getElementType()->isSized()) { 2197 Alignment 2198 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType()); 2199 } else { 2200 // Be conservative if we can't guess, not that it really matters: 2201 // functions and labels aren't valid for loads, and the methods used to 2202 // actually calculate an address work with any alignment. 2203 Alignment = 1; 2204 } 2205 } 2206 2207 unsigned char HiFixup, LoFixup; 2208 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM); 2209 2210 if (UseGOT) { 2211 HiFixup = AArch64II::MO_GOT; 2212 LoFixup = AArch64II::MO_GOT_LO12; 2213 Alignment = 8; 2214 } else { 2215 HiFixup = AArch64II::MO_NO_FLAG; 2216 LoFixup = AArch64II::MO_LO12; 2217 } 2218 2219 // AArch64's small model demands the following sequence: 2220 // ADRP x0, somewhere 2221 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly). 2222 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2223 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2224 HiFixup), 2225 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2226 LoFixup), 2227 DAG.getConstant(Alignment, MVT::i32)); 2228 2229 if (UseGOT) { 2230 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(), 2231 GlobalRef); 2232 } 2233 2234 if (GN->getOffset() != 0) 2235 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef, 2236 DAG.getConstant(GN->getOffset(), PtrVT)); 2237 2238 return GlobalRef; 2239} 2240 2241SDValue 2242AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op, 2243 SelectionDAG &DAG) const { 2244 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so 2245 // we make those distinctions here. 2246 2247 switch (getTargetMachine().getCodeModel()) { 2248 case CodeModel::Small: 2249 return LowerGlobalAddressELFSmall(Op, DAG); 2250 case CodeModel::Large: 2251 return LowerGlobalAddressELFLarge(Op, DAG); 2252 default: 2253 llvm_unreachable("Only small and large code models supported now"); 2254 } 2255} 2256 2257SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr, 2258 SDValue DescAddr, 2259 SDLoc DL, 2260 SelectionDAG &DAG) const { 2261 EVT PtrVT = getPointerTy(); 2262 2263 // The function we need to call is simply the first entry in the GOT for this 2264 // descriptor, load it in preparation. 2265 SDValue Func, Chain; 2266 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2267 DescAddr); 2268 2269 // The function takes only one argument: the address of the descriptor itself 2270 // in X0. 2271 SDValue Glue; 2272 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue); 2273 Glue = Chain.getValue(1); 2274 2275 // Finally, there's a special calling-convention which means that the lookup 2276 // must preserve all registers (except X0, obviously). 2277 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2278 const AArch64RegisterInfo *A64RI 2279 = static_cast<const AArch64RegisterInfo *>(TRI); 2280 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask(); 2281 2282 // We're now ready to populate the argument list, as with a normal call: 2283 std::vector<SDValue> Ops; 2284 Ops.push_back(Chain); 2285 Ops.push_back(Func); 2286 Ops.push_back(SymAddr); 2287 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT)); 2288 Ops.push_back(DAG.getRegisterMask(Mask)); 2289 Ops.push_back(Glue); 2290 2291 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2292 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0], 2293 Ops.size()); 2294 Glue = Chain.getValue(1); 2295 2296 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it 2297 // back to the generic handling code. 2298 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); 2299} 2300 2301SDValue 2302AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, 2303 SelectionDAG &DAG) const { 2304 assert(getSubtarget()->isTargetELF() && 2305 "TLS not implemented for non-ELF targets"); 2306 assert(getTargetMachine().getCodeModel() == CodeModel::Small 2307 && "TLS only supported in small memory model"); 2308 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2309 2310 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); 2311 2312 SDValue TPOff; 2313 EVT PtrVT = getPointerTy(); 2314 SDLoc DL(Op); 2315 const GlobalValue *GV = GA->getGlobal(); 2316 2317 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); 2318 2319 if (Model == TLSModel::InitialExec) { 2320 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2321 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2322 AArch64II::MO_GOTTPREL), 2323 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2324 AArch64II::MO_GOTTPREL_LO12), 2325 DAG.getConstant(8, MVT::i32)); 2326 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2327 TPOff); 2328 } else if (Model == TLSModel::LocalExec) { 2329 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2330 AArch64II::MO_TPREL_G1); 2331 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2332 AArch64II::MO_TPREL_G0_NC); 2333 2334 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2335 DAG.getTargetConstant(1, MVT::i32)), 0); 2336 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2337 TPOff, LoVar, 2338 DAG.getTargetConstant(0, MVT::i32)), 0); 2339 } else if (Model == TLSModel::GeneralDynamic) { 2340 // Accesses used in this sequence go via the TLS descriptor which lives in 2341 // the GOT. Prepare an address we can use to handle this. 2342 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2343 AArch64II::MO_TLSDESC); 2344 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2345 AArch64II::MO_TLSDESC_LO12); 2346 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2347 HiDesc, LoDesc, 2348 DAG.getConstant(8, MVT::i32)); 2349 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0); 2350 2351 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2352 } else if (Model == TLSModel::LocalDynamic) { 2353 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS 2354 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate 2355 // the beginning of the module's TLS region, followed by a DTPREL offset 2356 // calculation. 2357 2358 // These accesses will need deduplicating if there's more than one. 2359 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction() 2360 .getInfo<AArch64MachineFunctionInfo>(); 2361 MFI->incNumLocalDynamicTLSAccesses(); 2362 2363 2364 // Get the location of _TLS_MODULE_BASE_: 2365 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2366 AArch64II::MO_TLSDESC); 2367 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2368 AArch64II::MO_TLSDESC_LO12); 2369 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2370 HiDesc, LoDesc, 2371 DAG.getConstant(8, MVT::i32)); 2372 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT); 2373 2374 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2375 2376 // Get the variable's offset from _TLS_MODULE_BASE_ 2377 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2378 AArch64II::MO_DTPREL_G1); 2379 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2380 AArch64II::MO_DTPREL_G0_NC); 2381 2382 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2383 DAG.getTargetConstant(0, MVT::i32)), 0); 2384 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2385 TPOff, LoVar, 2386 DAG.getTargetConstant(0, MVT::i32)), 0); 2387 } else 2388 llvm_unreachable("Unsupported TLS access model"); 2389 2390 2391 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); 2392} 2393 2394SDValue 2395AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2396 bool IsSigned) const { 2397 if (Op.getValueType() != MVT::f128) { 2398 // Legal for everything except f128. 2399 return Op; 2400 } 2401 2402 RTLIB::Libcall LC; 2403 if (IsSigned) 2404 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2405 else 2406 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2407 2408 return LowerF128ToCall(Op, DAG, LC); 2409} 2410 2411 2412SDValue 2413AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2414 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2415 SDLoc dl(JT); 2416 EVT PtrVT = getPointerTy(); 2417 2418 // When compiling PIC, jump tables get put in the code section so a static 2419 // relocation-style is acceptable for both cases. 2420 switch (getTargetMachine().getCodeModel()) { 2421 case CodeModel::Small: 2422 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2423 DAG.getTargetJumpTable(JT->getIndex(), PtrVT), 2424 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2425 AArch64II::MO_LO12), 2426 DAG.getConstant(1, MVT::i32)); 2427 case CodeModel::Large: 2428 return DAG.getNode( 2429 AArch64ISD::WrapperLarge, dl, PtrVT, 2430 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3), 2431 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC), 2432 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC), 2433 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC)); 2434 default: 2435 llvm_unreachable("Only small and large code models supported now"); 2436 } 2437} 2438 2439// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode) 2440SDValue 2441AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2442 SDLoc dl(Op); 2443 SDValue LHS = Op.getOperand(0); 2444 SDValue RHS = Op.getOperand(1); 2445 SDValue IfTrue = Op.getOperand(2); 2446 SDValue IfFalse = Op.getOperand(3); 2447 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2448 2449 if (LHS.getValueType() == MVT::f128) { 2450 // f128 comparisons are lowered to libcalls, but slot in nicely here 2451 // afterwards. 2452 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2453 2454 // If softenSetCCOperands returned a scalar, we need to compare the result 2455 // against zero to select between true and false values. 2456 if (RHS.getNode() == 0) { 2457 RHS = DAG.getConstant(0, LHS.getValueType()); 2458 CC = ISD::SETNE; 2459 } 2460 } 2461 2462 if (LHS.getValueType().isInteger()) { 2463 SDValue A64cc; 2464 2465 // Integers are handled in a separate function because the combinations of 2466 // immediates and tests can get hairy and we may want to fiddle things. 2467 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2468 2469 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2470 CmpOp, IfTrue, IfFalse, A64cc); 2471 } 2472 2473 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2474 // conditional branch, hence FPCCToA64CC can set a second test, where either 2475 // passing is sufficient. 2476 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2477 CondCode = FPCCToA64CC(CC, Alternative); 2478 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2479 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2480 DAG.getCondCode(CC)); 2481 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, 2482 Op.getValueType(), 2483 SetCC, IfTrue, IfFalse, A64cc); 2484 2485 if (Alternative != A64CC::Invalid) { 2486 A64cc = DAG.getConstant(Alternative, MVT::i32); 2487 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2488 SetCC, IfTrue, A64SELECT_CC, A64cc); 2489 2490 } 2491 2492 return A64SELECT_CC; 2493} 2494 2495// (SELECT testbit, iftrue, iffalse) 2496SDValue 2497AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2498 SDLoc dl(Op); 2499 SDValue TheBit = Op.getOperand(0); 2500 SDValue IfTrue = Op.getOperand(1); 2501 SDValue IfFalse = Op.getOperand(2); 2502 2503 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 2504 // that as the consumer we are responsible for ignoring rubbish in higher 2505 // bits. 2506 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 2507 DAG.getConstant(1, MVT::i32)); 2508 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 2509 DAG.getConstant(0, TheBit.getValueType()), 2510 DAG.getCondCode(ISD::SETNE)); 2511 2512 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2513 A64CMP, IfTrue, IfFalse, 2514 DAG.getConstant(A64CC::NE, MVT::i32)); 2515} 2516 2517static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) { 2518 SDLoc DL(Op); 2519 SDValue LHS = Op.getOperand(0); 2520 SDValue RHS = Op.getOperand(1); 2521 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2522 EVT VT = Op.getValueType(); 2523 bool Invert = false; 2524 SDValue Op0, Op1; 2525 unsigned Opcode; 2526 2527 if (LHS.getValueType().isInteger()) { 2528 2529 // Attempt to use Vector Integer Compare Mask Test instruction. 2530 // TST = icmp ne (and (op0, op1), zero). 2531 if (CC == ISD::SETNE) { 2532 if (((LHS.getOpcode() == ISD::AND) && 2533 ISD::isBuildVectorAllZeros(RHS.getNode())) || 2534 ((RHS.getOpcode() == ISD::AND) && 2535 ISD::isBuildVectorAllZeros(LHS.getNode()))) { 2536 2537 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS; 2538 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0)); 2539 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1)); 2540 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS); 2541 } 2542 } 2543 2544 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed). 2545 // Note: Compare against Zero does not support unsigned predicates. 2546 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) || 2547 ISD::isBuildVectorAllZeros(LHS.getNode())) && 2548 !isUnsignedIntSetCC(CC)) { 2549 2550 // If LHS is the zero value, swap operands and CondCode. 2551 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2552 CC = getSetCCSwappedOperands(CC); 2553 Op0 = RHS; 2554 } else 2555 Op0 = LHS; 2556 2557 // Ensure valid CondCode for Compare Mask against Zero instruction: 2558 // EQ, GE, GT, LE, LT. 2559 if (ISD::SETNE == CC) { 2560 Invert = true; 2561 CC = ISD::SETEQ; 2562 } 2563 2564 // Using constant type to differentiate integer and FP compares with zero. 2565 Op1 = DAG.getConstant(0, MVT::i32); 2566 Opcode = AArch64ISD::NEON_CMPZ; 2567 2568 } else { 2569 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned). 2570 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT. 2571 bool Swap = false; 2572 switch (CC) { 2573 default: 2574 llvm_unreachable("Illegal integer comparison."); 2575 case ISD::SETEQ: 2576 case ISD::SETGT: 2577 case ISD::SETGE: 2578 case ISD::SETUGT: 2579 case ISD::SETUGE: 2580 break; 2581 case ISD::SETNE: 2582 Invert = true; 2583 CC = ISD::SETEQ; 2584 break; 2585 case ISD::SETULT: 2586 case ISD::SETULE: 2587 case ISD::SETLT: 2588 case ISD::SETLE: 2589 Swap = true; 2590 CC = getSetCCSwappedOperands(CC); 2591 } 2592 2593 if (Swap) 2594 std::swap(LHS, RHS); 2595 2596 Opcode = AArch64ISD::NEON_CMP; 2597 Op0 = LHS; 2598 Op1 = RHS; 2599 } 2600 2601 // Generate Compare Mask instr or Compare Mask against Zero instr. 2602 SDValue NeonCmp = 2603 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2604 2605 if (Invert) 2606 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2607 2608 return NeonCmp; 2609 } 2610 2611 // Now handle Floating Point cases. 2612 // Attempt to use Vector Floating Point Compare Mask against Zero instruction. 2613 if (ISD::isBuildVectorAllZeros(RHS.getNode()) || 2614 ISD::isBuildVectorAllZeros(LHS.getNode())) { 2615 2616 // If LHS is the zero value, swap operands and CondCode. 2617 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2618 CC = getSetCCSwappedOperands(CC); 2619 Op0 = RHS; 2620 } else 2621 Op0 = LHS; 2622 2623 // Using constant type to differentiate integer and FP compares with zero. 2624 Op1 = DAG.getConstantFP(0, MVT::f32); 2625 Opcode = AArch64ISD::NEON_CMPZ; 2626 } else { 2627 // Attempt to use Vector Floating Point Compare Mask instruction. 2628 Op0 = LHS; 2629 Op1 = RHS; 2630 Opcode = AArch64ISD::NEON_CMP; 2631 } 2632 2633 SDValue NeonCmpAlt; 2634 // Some register compares have to be implemented with swapped CC and operands, 2635 // e.g.: OLT implemented as OGT with swapped operands. 2636 bool SwapIfRegArgs = false; 2637 2638 // Ensure valid CondCode for FP Compare Mask against Zero instruction: 2639 // EQ, GE, GT, LE, LT. 2640 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT. 2641 switch (CC) { 2642 default: 2643 llvm_unreachable("Illegal FP comparison"); 2644 case ISD::SETUNE: 2645 case ISD::SETNE: 2646 Invert = true; // Fallthrough 2647 case ISD::SETOEQ: 2648 case ISD::SETEQ: 2649 CC = ISD::SETEQ; 2650 break; 2651 case ISD::SETOLT: 2652 case ISD::SETLT: 2653 CC = ISD::SETLT; 2654 SwapIfRegArgs = true; 2655 break; 2656 case ISD::SETOGT: 2657 case ISD::SETGT: 2658 CC = ISD::SETGT; 2659 break; 2660 case ISD::SETOLE: 2661 case ISD::SETLE: 2662 CC = ISD::SETLE; 2663 SwapIfRegArgs = true; 2664 break; 2665 case ISD::SETOGE: 2666 case ISD::SETGE: 2667 CC = ISD::SETGE; 2668 break; 2669 case ISD::SETUGE: 2670 Invert = true; 2671 CC = ISD::SETLT; 2672 SwapIfRegArgs = true; 2673 break; 2674 case ISD::SETULE: 2675 Invert = true; 2676 CC = ISD::SETGT; 2677 break; 2678 case ISD::SETUGT: 2679 Invert = true; 2680 CC = ISD::SETLE; 2681 SwapIfRegArgs = true; 2682 break; 2683 case ISD::SETULT: 2684 Invert = true; 2685 CC = ISD::SETGE; 2686 break; 2687 case ISD::SETUEQ: 2688 Invert = true; // Fallthrough 2689 case ISD::SETONE: 2690 // Expand this to (OGT |OLT). 2691 NeonCmpAlt = 2692 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT)); 2693 CC = ISD::SETLT; 2694 SwapIfRegArgs = true; 2695 break; 2696 case ISD::SETUO: 2697 Invert = true; // Fallthrough 2698 case ISD::SETO: 2699 // Expand this to (OGE | OLT). 2700 NeonCmpAlt = 2701 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE)); 2702 CC = ISD::SETLT; 2703 SwapIfRegArgs = true; 2704 break; 2705 } 2706 2707 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) { 2708 CC = getSetCCSwappedOperands(CC); 2709 std::swap(Op0, Op1); 2710 } 2711 2712 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr 2713 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2714 2715 if (NeonCmpAlt.getNode()) 2716 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt); 2717 2718 if (Invert) 2719 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2720 2721 return NeonCmp; 2722} 2723 2724// (SETCC lhs, rhs, condcode) 2725SDValue 2726AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2727 SDLoc dl(Op); 2728 SDValue LHS = Op.getOperand(0); 2729 SDValue RHS = Op.getOperand(1); 2730 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2731 EVT VT = Op.getValueType(); 2732 2733 if (VT.isVector()) 2734 return LowerVectorSETCC(Op, DAG); 2735 2736 if (LHS.getValueType() == MVT::f128) { 2737 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS 2738 // for the rest of the function (some i32 or i64 values). 2739 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2740 2741 // If softenSetCCOperands returned a scalar, use it. 2742 if (RHS.getNode() == 0) { 2743 assert(LHS.getValueType() == Op.getValueType() && 2744 "Unexpected setcc expansion!"); 2745 return LHS; 2746 } 2747 } 2748 2749 if (LHS.getValueType().isInteger()) { 2750 SDValue A64cc; 2751 2752 // Integers are handled in a separate function because the combinations of 2753 // immediates and tests can get hairy and we may want to fiddle things. 2754 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2755 2756 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2757 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT), 2758 A64cc); 2759 } 2760 2761 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2762 // conditional branch, hence FPCCToA64CC can set a second test, where either 2763 // passing is sufficient. 2764 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2765 CondCode = FPCCToA64CC(CC, Alternative); 2766 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2767 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2768 DAG.getCondCode(CC)); 2769 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2770 CmpOp, DAG.getConstant(1, VT), 2771 DAG.getConstant(0, VT), A64cc); 2772 2773 if (Alternative != A64CC::Invalid) { 2774 A64cc = DAG.getConstant(Alternative, MVT::i32); 2775 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, 2776 DAG.getConstant(1, VT), A64SELECT_CC, A64cc); 2777 } 2778 2779 return A64SELECT_CC; 2780} 2781 2782SDValue 2783AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2784 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2785 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2786 2787 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes 2788 // rather than just 8. 2789 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), 2790 Op.getOperand(1), Op.getOperand(2), 2791 DAG.getConstant(32, MVT::i32), 8, false, false, 2792 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); 2793} 2794 2795SDValue 2796AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2797 // The layout of the va_list struct is specified in the AArch64 Procedure Call 2798 // Standard, section B.3. 2799 MachineFunction &MF = DAG.getMachineFunction(); 2800 AArch64MachineFunctionInfo *FuncInfo 2801 = MF.getInfo<AArch64MachineFunctionInfo>(); 2802 SDLoc DL(Op); 2803 2804 SDValue Chain = Op.getOperand(0); 2805 SDValue VAList = Op.getOperand(1); 2806 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2807 SmallVector<SDValue, 4> MemOps; 2808 2809 // void *__stack at offset 0 2810 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(), 2811 getPointerTy()); 2812 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, 2813 MachinePointerInfo(SV), false, false, 0)); 2814 2815 // void *__gr_top at offset 8 2816 int GPRSize = FuncInfo->getVariadicGPRSize(); 2817 if (GPRSize > 0) { 2818 SDValue GRTop, GRTopAddr; 2819 2820 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2821 DAG.getConstant(8, getPointerTy())); 2822 2823 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy()); 2824 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop, 2825 DAG.getConstant(GPRSize, getPointerTy())); 2826 2827 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, 2828 MachinePointerInfo(SV, 8), 2829 false, false, 0)); 2830 } 2831 2832 // void *__vr_top at offset 16 2833 int FPRSize = FuncInfo->getVariadicFPRSize(); 2834 if (FPRSize > 0) { 2835 SDValue VRTop, VRTopAddr; 2836 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2837 DAG.getConstant(16, getPointerTy())); 2838 2839 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy()); 2840 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop, 2841 DAG.getConstant(FPRSize, getPointerTy())); 2842 2843 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, 2844 MachinePointerInfo(SV, 16), 2845 false, false, 0)); 2846 } 2847 2848 // int __gr_offs at offset 24 2849 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2850 DAG.getConstant(24, getPointerTy())); 2851 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32), 2852 GROffsAddr, MachinePointerInfo(SV, 24), 2853 false, false, 0)); 2854 2855 // int __vr_offs at offset 28 2856 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2857 DAG.getConstant(28, getPointerTy())); 2858 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32), 2859 VROffsAddr, MachinePointerInfo(SV, 28), 2860 false, false, 0)); 2861 2862 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 2863 MemOps.size()); 2864} 2865 2866SDValue 2867AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2868 switch (Op.getOpcode()) { 2869 default: llvm_unreachable("Don't know how to custom lower this!"); 2870 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128); 2871 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128); 2872 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128); 2873 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128); 2874 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true); 2875 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false); 2876 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true); 2877 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false); 2878 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 2879 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 2880 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 2881 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 2882 2883 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2884 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 2885 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 2886 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG); 2887 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2888 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2889 case ISD::SELECT: return LowerSELECT(Op, DAG); 2890 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2891 case ISD::SETCC: return LowerSETCC(Op, DAG); 2892 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 2893 case ISD::VASTART: return LowerVASTART(Op, DAG); 2894 case ISD::BUILD_VECTOR: 2895 return LowerBUILD_VECTOR(Op, DAG, getSubtarget()); 2896 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2897 } 2898 2899 return SDValue(); 2900} 2901 2902/// Check if the specified splat value corresponds to a valid vector constant 2903/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If 2904/// so, return the encoded 8-bit immediate and the OpCmode instruction fields 2905/// values. 2906static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 2907 unsigned SplatBitSize, SelectionDAG &DAG, 2908 bool is128Bits, NeonModImmType type, EVT &VT, 2909 unsigned &Imm, unsigned &OpCmode) { 2910 switch (SplatBitSize) { 2911 default: 2912 llvm_unreachable("unexpected size for isNeonModifiedImm"); 2913 case 8: { 2914 if (type != Neon_Mov_Imm) 2915 return false; 2916 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 2917 // Neon movi per byte: Op=0, Cmode=1110. 2918 OpCmode = 0xe; 2919 Imm = SplatBits; 2920 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 2921 break; 2922 } 2923 case 16: { 2924 // Neon move inst per halfword 2925 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 2926 if ((SplatBits & ~0xff) == 0) { 2927 // Value = 0x00nn is 0x00nn LSL 0 2928 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000 2929 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001 2930 // Op=x, Cmode=100y 2931 Imm = SplatBits; 2932 OpCmode = 0x8; 2933 break; 2934 } 2935 if ((SplatBits & ~0xff00) == 0) { 2936 // Value = 0xnn00 is 0x00nn LSL 8 2937 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010 2938 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011 2939 // Op=x, Cmode=101x 2940 Imm = SplatBits >> 8; 2941 OpCmode = 0xa; 2942 break; 2943 } 2944 // can't handle any other 2945 return false; 2946 } 2947 2948 case 32: { 2949 // First the LSL variants (MSL is unusable by some interested instructions). 2950 2951 // Neon move instr per word, shift zeros 2952 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 2953 if ((SplatBits & ~0xff) == 0) { 2954 // Value = 0x000000nn is 0x000000nn LSL 0 2955 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000 2956 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001 2957 // Op=x, Cmode=000x 2958 Imm = SplatBits; 2959 OpCmode = 0; 2960 break; 2961 } 2962 if ((SplatBits & ~0xff00) == 0) { 2963 // Value = 0x0000nn00 is 0x000000nn LSL 8 2964 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010 2965 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011 2966 // Op=x, Cmode=001x 2967 Imm = SplatBits >> 8; 2968 OpCmode = 0x2; 2969 break; 2970 } 2971 if ((SplatBits & ~0xff0000) == 0) { 2972 // Value = 0x00nn0000 is 0x000000nn LSL 16 2973 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100 2974 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101 2975 // Op=x, Cmode=010x 2976 Imm = SplatBits >> 16; 2977 OpCmode = 0x4; 2978 break; 2979 } 2980 if ((SplatBits & ~0xff000000) == 0) { 2981 // Value = 0xnn000000 is 0x000000nn LSL 24 2982 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110 2983 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111 2984 // Op=x, Cmode=011x 2985 Imm = SplatBits >> 24; 2986 OpCmode = 0x6; 2987 break; 2988 } 2989 2990 // Now the MSL immediates. 2991 2992 // Neon move instr per word, shift ones 2993 if ((SplatBits & ~0xffff) == 0 && 2994 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 2995 // Value = 0x0000nnff is 0x000000nn MSL 8 2996 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100 2997 // Op=x, Cmode=1100 2998 Imm = SplatBits >> 8; 2999 OpCmode = 0xc; 3000 break; 3001 } 3002 if ((SplatBits & ~0xffffff) == 0 && 3003 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3004 // Value = 0x00nnffff is 0x000000nn MSL 16 3005 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101 3006 // Op=x, Cmode=1101 3007 Imm = SplatBits >> 16; 3008 OpCmode = 0xd; 3009 break; 3010 } 3011 // can't handle any other 3012 return false; 3013 } 3014 3015 case 64: { 3016 if (type != Neon_Mov_Imm) 3017 return false; 3018 // Neon move instr bytemask, where each byte is either 0x00 or 0xff. 3019 // movi Op=1, Cmode=1110. 3020 OpCmode = 0x1e; 3021 uint64_t BitMask = 0xff; 3022 uint64_t Val = 0; 3023 unsigned ImmMask = 1; 3024 Imm = 0; 3025 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3026 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3027 Val |= BitMask; 3028 Imm |= ImmMask; 3029 } else if ((SplatBits & BitMask) != 0) { 3030 return false; 3031 } 3032 BitMask <<= 8; 3033 ImmMask <<= 1; 3034 } 3035 SplatBits = Val; 3036 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3037 break; 3038 } 3039 } 3040 3041 return true; 3042} 3043 3044static SDValue PerformANDCombine(SDNode *N, 3045 TargetLowering::DAGCombinerInfo &DCI) { 3046 3047 SelectionDAG &DAG = DCI.DAG; 3048 SDLoc DL(N); 3049 EVT VT = N->getValueType(0); 3050 3051 // We're looking for an SRA/SHL pair which form an SBFX. 3052 3053 if (VT != MVT::i32 && VT != MVT::i64) 3054 return SDValue(); 3055 3056 if (!isa<ConstantSDNode>(N->getOperand(1))) 3057 return SDValue(); 3058 3059 uint64_t TruncMask = N->getConstantOperandVal(1); 3060 if (!isMask_64(TruncMask)) 3061 return SDValue(); 3062 3063 uint64_t Width = CountPopulation_64(TruncMask); 3064 SDValue Shift = N->getOperand(0); 3065 3066 if (Shift.getOpcode() != ISD::SRL) 3067 return SDValue(); 3068 3069 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3070 return SDValue(); 3071 uint64_t LSB = Shift->getConstantOperandVal(1); 3072 3073 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3074 return SDValue(); 3075 3076 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0), 3077 DAG.getConstant(LSB, MVT::i64), 3078 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3079} 3080 3081/// For a true bitfield insert, the bits getting into that contiguous mask 3082/// should come from the low part of an existing value: they must be formed from 3083/// a compatible SHL operation (unless they're already low). This function 3084/// checks that condition and returns the least-significant bit that's 3085/// intended. If the operation not a field preparation, -1 is returned. 3086static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT, 3087 SDValue &MaskedVal, uint64_t Mask) { 3088 if (!isShiftedMask_64(Mask)) 3089 return -1; 3090 3091 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI 3092 // instruction. BFI will do a left-shift by LSB before applying the mask we've 3093 // spotted, so in general we should pre-emptively "undo" that by making sure 3094 // the incoming bits have had a right-shift applied to them. 3095 // 3096 // This right shift, however, will combine with existing left/right shifts. In 3097 // the simplest case of a completely straight bitfield operation, it will be 3098 // expected to completely cancel out with an existing SHL. More complicated 3099 // cases (e.g. bitfield to bitfield copy) may still need a real shift before 3100 // the BFI. 3101 3102 uint64_t LSB = countTrailingZeros(Mask); 3103 int64_t ShiftRightRequired = LSB; 3104 if (MaskedVal.getOpcode() == ISD::SHL && 3105 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3106 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1); 3107 MaskedVal = MaskedVal.getOperand(0); 3108 } else if (MaskedVal.getOpcode() == ISD::SRL && 3109 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3110 ShiftRightRequired += MaskedVal.getConstantOperandVal(1); 3111 MaskedVal = MaskedVal.getOperand(0); 3112 } 3113 3114 if (ShiftRightRequired > 0) 3115 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal, 3116 DAG.getConstant(ShiftRightRequired, MVT::i64)); 3117 else if (ShiftRightRequired < 0) { 3118 // We could actually end up with a residual left shift, for example with 3119 // "struc.bitfield = val << 1". 3120 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal, 3121 DAG.getConstant(-ShiftRightRequired, MVT::i64)); 3122 } 3123 3124 return LSB; 3125} 3126 3127/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by 3128/// a mask and an extension. Returns true if a BFI was found and provides 3129/// information on its surroundings. 3130static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask, 3131 bool &Extended) { 3132 Extended = false; 3133 if (N.getOpcode() == ISD::ZERO_EXTEND) { 3134 Extended = true; 3135 N = N.getOperand(0); 3136 } 3137 3138 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) { 3139 Mask = N->getConstantOperandVal(1); 3140 N = N.getOperand(0); 3141 } else { 3142 // Mask is the whole width. 3143 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits()); 3144 } 3145 3146 if (N.getOpcode() == AArch64ISD::BFI) { 3147 BFI = N; 3148 return true; 3149 } 3150 3151 return false; 3152} 3153 3154/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which 3155/// is roughly equivalent to (and (BFI ...), mask). This form is used because it 3156/// can often be further combined with a larger mask. Ultimately, we want mask 3157/// to be 2^32-1 or 2^64-1 so the AND can be skipped. 3158static SDValue tryCombineToBFI(SDNode *N, 3159 TargetLowering::DAGCombinerInfo &DCI, 3160 const AArch64Subtarget *Subtarget) { 3161 SelectionDAG &DAG = DCI.DAG; 3162 SDLoc DL(N); 3163 EVT VT = N->getValueType(0); 3164 3165 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3166 3167 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or 3168 // abandon the effort. 3169 SDValue LHS = N->getOperand(0); 3170 if (LHS.getOpcode() != ISD::AND) 3171 return SDValue(); 3172 3173 uint64_t LHSMask; 3174 if (isa<ConstantSDNode>(LHS.getOperand(1))) 3175 LHSMask = LHS->getConstantOperandVal(1); 3176 else 3177 return SDValue(); 3178 3179 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask 3180 // is or abandon the effort. 3181 SDValue RHS = N->getOperand(1); 3182 if (RHS.getOpcode() != ISD::AND) 3183 return SDValue(); 3184 3185 uint64_t RHSMask; 3186 if (isa<ConstantSDNode>(RHS.getOperand(1))) 3187 RHSMask = RHS->getConstantOperandVal(1); 3188 else 3189 return SDValue(); 3190 3191 // Can't do anything if the masks are incompatible. 3192 if (LHSMask & RHSMask) 3193 return SDValue(); 3194 3195 // Now we need one of the masks to be a contiguous field. Without loss of 3196 // generality that should be the RHS one. 3197 SDValue Bitfield = LHS.getOperand(0); 3198 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) { 3199 // We know that LHS is a candidate new value, and RHS isn't already a better 3200 // one. 3201 std::swap(LHS, RHS); 3202 std::swap(LHSMask, RHSMask); 3203 } 3204 3205 // We've done our best to put the right operands in the right places, all we 3206 // can do now is check whether a BFI exists. 3207 Bitfield = RHS.getOperand(0); 3208 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask); 3209 if (LSB == -1) 3210 return SDValue(); 3211 3212 uint32_t Width = CountPopulation_64(RHSMask); 3213 assert(Width && "Expected non-zero bitfield width"); 3214 3215 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3216 LHS.getOperand(0), Bitfield, 3217 DAG.getConstant(LSB, MVT::i64), 3218 DAG.getConstant(Width, MVT::i64)); 3219 3220 // Mask is trivial 3221 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3222 return BFI; 3223 3224 return DAG.getNode(ISD::AND, DL, VT, BFI, 3225 DAG.getConstant(LHSMask | RHSMask, VT)); 3226} 3227 3228/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its 3229/// original input. This is surprisingly common because SROA splits things up 3230/// into i8 chunks, so the originally detected MaskedBFI may actually only act 3231/// on the low (say) byte of a word. This is then orred into the rest of the 3232/// word afterwards. 3233/// 3234/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)). 3235/// 3236/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the 3237/// MaskedBFI. We can also deal with a certain amount of extend/truncate being 3238/// involved. 3239static SDValue tryCombineToLargerBFI(SDNode *N, 3240 TargetLowering::DAGCombinerInfo &DCI, 3241 const AArch64Subtarget *Subtarget) { 3242 SelectionDAG &DAG = DCI.DAG; 3243 SDLoc DL(N); 3244 EVT VT = N->getValueType(0); 3245 3246 // First job is to hunt for a MaskedBFI on either the left or right. Swap 3247 // operands if it's actually on the right. 3248 SDValue BFI; 3249 SDValue PossExtraMask; 3250 uint64_t ExistingMask = 0; 3251 bool Extended = false; 3252 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended)) 3253 PossExtraMask = N->getOperand(1); 3254 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended)) 3255 PossExtraMask = N->getOperand(0); 3256 else 3257 return SDValue(); 3258 3259 // We can only combine a BFI with another compatible mask. 3260 if (PossExtraMask.getOpcode() != ISD::AND || 3261 !isa<ConstantSDNode>(PossExtraMask.getOperand(1))) 3262 return SDValue(); 3263 3264 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1); 3265 3266 // Masks must be compatible. 3267 if (ExtraMask & ExistingMask) 3268 return SDValue(); 3269 3270 SDValue OldBFIVal = BFI.getOperand(0); 3271 SDValue NewBFIVal = BFI.getOperand(1); 3272 if (Extended) { 3273 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be 3274 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments 3275 // need to be made compatible. 3276 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32 3277 && "Invalid types for BFI"); 3278 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal); 3279 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal); 3280 } 3281 3282 // We need the MaskedBFI to be combined with a mask of the *same* value. 3283 if (PossExtraMask.getOperand(0) != OldBFIVal) 3284 return SDValue(); 3285 3286 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3287 OldBFIVal, NewBFIVal, 3288 BFI.getOperand(2), BFI.getOperand(3)); 3289 3290 // If the masking is trivial, we don't need to create it. 3291 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3292 return BFI; 3293 3294 return DAG.getNode(ISD::AND, DL, VT, BFI, 3295 DAG.getConstant(ExtraMask | ExistingMask, VT)); 3296} 3297 3298/// An EXTR instruction is made up of two shifts, ORed together. This helper 3299/// searches for and classifies those shifts. 3300static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, 3301 bool &FromHi) { 3302 if (N.getOpcode() == ISD::SHL) 3303 FromHi = false; 3304 else if (N.getOpcode() == ISD::SRL) 3305 FromHi = true; 3306 else 3307 return false; 3308 3309 if (!isa<ConstantSDNode>(N.getOperand(1))) 3310 return false; 3311 3312 ShiftAmount = N->getConstantOperandVal(1); 3313 Src = N->getOperand(0); 3314 return true; 3315} 3316 3317/// EXTR instruction extracts a contiguous chunk of bits from two existing 3318/// registers viewed as a high/low pair. This function looks for the pattern: 3319/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an 3320/// EXTR. Can't quite be done in TableGen because the two immediates aren't 3321/// independent. 3322static SDValue tryCombineToEXTR(SDNode *N, 3323 TargetLowering::DAGCombinerInfo &DCI) { 3324 SelectionDAG &DAG = DCI.DAG; 3325 SDLoc DL(N); 3326 EVT VT = N->getValueType(0); 3327 3328 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3329 3330 if (VT != MVT::i32 && VT != MVT::i64) 3331 return SDValue(); 3332 3333 SDValue LHS; 3334 uint32_t ShiftLHS = 0; 3335 bool LHSFromHi = 0; 3336 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) 3337 return SDValue(); 3338 3339 SDValue RHS; 3340 uint32_t ShiftRHS = 0; 3341 bool RHSFromHi = 0; 3342 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) 3343 return SDValue(); 3344 3345 // If they're both trying to come from the high part of the register, they're 3346 // not really an EXTR. 3347 if (LHSFromHi == RHSFromHi) 3348 return SDValue(); 3349 3350 if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) 3351 return SDValue(); 3352 3353 if (LHSFromHi) { 3354 std::swap(LHS, RHS); 3355 std::swap(ShiftLHS, ShiftRHS); 3356 } 3357 3358 return DAG.getNode(AArch64ISD::EXTR, DL, VT, 3359 LHS, RHS, 3360 DAG.getConstant(ShiftRHS, MVT::i64)); 3361} 3362 3363/// Target-specific dag combine xforms for ISD::OR 3364static SDValue PerformORCombine(SDNode *N, 3365 TargetLowering::DAGCombinerInfo &DCI, 3366 const AArch64Subtarget *Subtarget) { 3367 3368 SelectionDAG &DAG = DCI.DAG; 3369 SDLoc DL(N); 3370 EVT VT = N->getValueType(0); 3371 3372 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 3373 return SDValue(); 3374 3375 // Attempt to recognise bitfield-insert operations. 3376 SDValue Res = tryCombineToBFI(N, DCI, Subtarget); 3377 if (Res.getNode()) 3378 return Res; 3379 3380 // Attempt to combine an existing MaskedBFI operation into one with a larger 3381 // mask. 3382 Res = tryCombineToLargerBFI(N, DCI, Subtarget); 3383 if (Res.getNode()) 3384 return Res; 3385 3386 Res = tryCombineToEXTR(N, DCI); 3387 if (Res.getNode()) 3388 return Res; 3389 3390 if (!Subtarget->hasNEON()) 3391 return SDValue(); 3392 3393 // Attempt to use vector immediate-form BSL 3394 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 3395 3396 SDValue N0 = N->getOperand(0); 3397 if (N0.getOpcode() != ISD::AND) 3398 return SDValue(); 3399 3400 SDValue N1 = N->getOperand(1); 3401 if (N1.getOpcode() != ISD::AND) 3402 return SDValue(); 3403 3404 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 3405 APInt SplatUndef; 3406 unsigned SplatBitSize; 3407 bool HasAnyUndefs; 3408 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 3409 APInt SplatBits0; 3410 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 3411 HasAnyUndefs) && 3412 !HasAnyUndefs) { 3413 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 3414 APInt SplatBits1; 3415 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 3416 HasAnyUndefs) && 3417 !HasAnyUndefs && SplatBits0 == ~SplatBits1) { 3418 // Canonicalize the vector type to make instruction selection simpler. 3419 EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8; 3420 SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT, 3421 N0->getOperand(1), N0->getOperand(0), 3422 N1->getOperand(0)); 3423 return DAG.getNode(ISD::BITCAST, DL, VT, Result); 3424 } 3425 } 3426 } 3427 3428 return SDValue(); 3429} 3430 3431/// Target-specific dag combine xforms for ISD::SRA 3432static SDValue PerformSRACombine(SDNode *N, 3433 TargetLowering::DAGCombinerInfo &DCI) { 3434 3435 SelectionDAG &DAG = DCI.DAG; 3436 SDLoc DL(N); 3437 EVT VT = N->getValueType(0); 3438 3439 // We're looking for an SRA/SHL pair which form an SBFX. 3440 3441 if (VT != MVT::i32 && VT != MVT::i64) 3442 return SDValue(); 3443 3444 if (!isa<ConstantSDNode>(N->getOperand(1))) 3445 return SDValue(); 3446 3447 uint64_t ExtraSignBits = N->getConstantOperandVal(1); 3448 SDValue Shift = N->getOperand(0); 3449 3450 if (Shift.getOpcode() != ISD::SHL) 3451 return SDValue(); 3452 3453 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3454 return SDValue(); 3455 3456 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1); 3457 uint64_t Width = VT.getSizeInBits() - ExtraSignBits; 3458 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft; 3459 3460 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3461 return SDValue(); 3462 3463 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0), 3464 DAG.getConstant(LSB, MVT::i64), 3465 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3466} 3467 3468/// Check if this is a valid build_vector for the immediate operand of 3469/// a vector shift operation, where all the elements of the build_vector 3470/// must have the same constant integer value. 3471static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 3472 // Ignore bit_converts. 3473 while (Op.getOpcode() == ISD::BITCAST) 3474 Op = Op.getOperand(0); 3475 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3476 APInt SplatBits, SplatUndef; 3477 unsigned SplatBitSize; 3478 bool HasAnyUndefs; 3479 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 3480 HasAnyUndefs, ElementBits) || 3481 SplatBitSize > ElementBits) 3482 return false; 3483 Cnt = SplatBits.getSExtValue(); 3484 return true; 3485} 3486 3487/// Check if this is a valid build_vector for the immediate operand of 3488/// a vector shift left operation. That value must be in the range: 3489/// 0 <= Value < ElementBits 3490static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) { 3491 assert(VT.isVector() && "vector shift count is not a vector type"); 3492 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3493 if (!getVShiftImm(Op, ElementBits, Cnt)) 3494 return false; 3495 return (Cnt >= 0 && Cnt < ElementBits); 3496} 3497 3498/// Check if this is a valid build_vector for the immediate operand of a 3499/// vector shift right operation. The value must be in the range: 3500/// 1 <= Value <= ElementBits 3501static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) { 3502 assert(VT.isVector() && "vector shift count is not a vector type"); 3503 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3504 if (!getVShiftImm(Op, ElementBits, Cnt)) 3505 return false; 3506 return (Cnt >= 1 && Cnt <= ElementBits); 3507} 3508 3509/// Checks for immediate versions of vector shifts and lowers them. 3510static SDValue PerformShiftCombine(SDNode *N, 3511 TargetLowering::DAGCombinerInfo &DCI, 3512 const AArch64Subtarget *ST) { 3513 SelectionDAG &DAG = DCI.DAG; 3514 EVT VT = N->getValueType(0); 3515 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64)) 3516 return PerformSRACombine(N, DCI); 3517 3518 // Nothing to be done for scalar shifts. 3519 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3520 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 3521 return SDValue(); 3522 3523 assert(ST->hasNEON() && "unexpected vector shift"); 3524 int64_t Cnt; 3525 3526 switch (N->getOpcode()) { 3527 default: 3528 llvm_unreachable("unexpected shift opcode"); 3529 3530 case ISD::SHL: 3531 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) { 3532 SDValue RHS = 3533 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3534 DAG.getConstant(Cnt, MVT::i32)); 3535 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS); 3536 } 3537 break; 3538 3539 case ISD::SRA: 3540 case ISD::SRL: 3541 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) { 3542 SDValue RHS = 3543 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3544 DAG.getConstant(Cnt, MVT::i32)); 3545 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS); 3546 } 3547 break; 3548 } 3549 3550 return SDValue(); 3551} 3552 3553/// ARM-specific DAG combining for intrinsics. 3554static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 3555 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3556 3557 switch (IntNo) { 3558 default: 3559 // Don't do anything for most intrinsics. 3560 break; 3561 3562 case Intrinsic::arm_neon_vqshifts: 3563 case Intrinsic::arm_neon_vqshiftu: 3564 EVT VT = N->getOperand(1).getValueType(); 3565 int64_t Cnt; 3566 if (!isVShiftLImm(N->getOperand(2), VT, Cnt)) 3567 break; 3568 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts) 3569 ? AArch64ISD::NEON_QSHLs 3570 : AArch64ISD::NEON_QSHLu; 3571 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), 3572 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 3573 } 3574 3575 return SDValue(); 3576} 3577 3578/// Target-specific DAG combine function for NEON load/store intrinsics 3579/// to merge base address updates. 3580static SDValue CombineBaseUpdate(SDNode *N, 3581 TargetLowering::DAGCombinerInfo &DCI) { 3582 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 3583 return SDValue(); 3584 3585 SelectionDAG &DAG = DCI.DAG; 3586 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 3587 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 3588 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 3589 SDValue Addr = N->getOperand(AddrOpIdx); 3590 3591 // Search for a use of the address operand that is an increment. 3592 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 3593 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 3594 SDNode *User = *UI; 3595 if (User->getOpcode() != ISD::ADD || 3596 UI.getUse().getResNo() != Addr.getResNo()) 3597 continue; 3598 3599 // Check that the add is independent of the load/store. Otherwise, folding 3600 // it would create a cycle. 3601 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 3602 continue; 3603 3604 // Find the new opcode for the updating load/store. 3605 bool isLoad = true; 3606 bool isLaneOp = false; 3607 unsigned NewOpc = 0; 3608 unsigned NumVecs = 0; 3609 if (isIntrinsic) { 3610 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 3611 switch (IntNo) { 3612 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 3613 case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD; 3614 NumVecs = 1; break; 3615 case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD; 3616 NumVecs = 2; break; 3617 case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD; 3618 NumVecs = 3; break; 3619 case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD; 3620 NumVecs = 4; break; 3621 case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD; 3622 NumVecs = 1; isLoad = false; break; 3623 case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD; 3624 NumVecs = 2; isLoad = false; break; 3625 case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD; 3626 NumVecs = 3; isLoad = false; break; 3627 case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD; 3628 NumVecs = 4; isLoad = false; break; 3629 case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD; 3630 NumVecs = 2; break; 3631 case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD; 3632 NumVecs = 3; break; 3633 case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD; 3634 NumVecs = 4; break; 3635 case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD; 3636 NumVecs = 2; isLoad = false; break; 3637 case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD; 3638 NumVecs = 3; isLoad = false; break; 3639 case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD; 3640 NumVecs = 4; isLoad = false; break; 3641 case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD; 3642 NumVecs = 2; isLaneOp = true; break; 3643 case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD; 3644 NumVecs = 3; isLaneOp = true; break; 3645 case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD; 3646 NumVecs = 4; isLaneOp = true; break; 3647 case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD; 3648 NumVecs = 2; isLoad = false; isLaneOp = true; break; 3649 case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD; 3650 NumVecs = 3; isLoad = false; isLaneOp = true; break; 3651 case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD; 3652 NumVecs = 4; isLoad = false; isLaneOp = true; break; 3653 } 3654 } else { 3655 isLaneOp = true; 3656 switch (N->getOpcode()) { 3657 default: llvm_unreachable("unexpected opcode for Neon base update"); 3658 case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD; 3659 NumVecs = 2; break; 3660 case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD; 3661 NumVecs = 3; break; 3662 case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD; 3663 NumVecs = 4; break; 3664 } 3665 } 3666 3667 // Find the size of memory referenced by the load/store. 3668 EVT VecTy; 3669 if (isLoad) 3670 VecTy = N->getValueType(0); 3671 else 3672 VecTy = N->getOperand(AddrOpIdx + 1).getValueType(); 3673 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 3674 if (isLaneOp) 3675 NumBytes /= VecTy.getVectorNumElements(); 3676 3677 // If the increment is a constant, it must match the memory ref size. 3678 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 3679 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 3680 uint32_t IncVal = CInc->getZExtValue(); 3681 if (IncVal != NumBytes) 3682 continue; 3683 Inc = DAG.getTargetConstant(IncVal, MVT::i32); 3684 } 3685 3686 // Create the new updating load/store node. 3687 EVT Tys[6]; 3688 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 3689 unsigned n; 3690 for (n = 0; n < NumResultVecs; ++n) 3691 Tys[n] = VecTy; 3692 Tys[n++] = MVT::i64; 3693 Tys[n] = MVT::Other; 3694 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2); 3695 SmallVector<SDValue, 8> Ops; 3696 Ops.push_back(N->getOperand(0)); // incoming chain 3697 Ops.push_back(N->getOperand(AddrOpIdx)); 3698 Ops.push_back(Inc); 3699 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 3700 Ops.push_back(N->getOperand(i)); 3701 } 3702 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 3703 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, 3704 Ops.data(), Ops.size(), 3705 MemInt->getMemoryVT(), 3706 MemInt->getMemOperand()); 3707 3708 // Update the uses. 3709 std::vector<SDValue> NewResults; 3710 for (unsigned i = 0; i < NumResultVecs; ++i) { 3711 NewResults.push_back(SDValue(UpdN.getNode(), i)); 3712 } 3713 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain 3714 DCI.CombineTo(N, NewResults); 3715 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 3716 3717 break; 3718 } 3719 return SDValue(); 3720} 3721 3722/// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) 3723/// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs. 3724/// If so, combine them to a vldN-dup operation and return true. 3725static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 3726 SelectionDAG &DAG = DCI.DAG; 3727 EVT VT = N->getValueType(0); 3728 3729 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 3730 SDNode *VLD = N->getOperand(0).getNode(); 3731 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 3732 return SDValue(); 3733 unsigned NumVecs = 0; 3734 unsigned NewOpc = 0; 3735 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 3736 if (IntNo == Intrinsic::arm_neon_vld2lane) { 3737 NumVecs = 2; 3738 NewOpc = AArch64ISD::NEON_LD2DUP; 3739 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 3740 NumVecs = 3; 3741 NewOpc = AArch64ISD::NEON_LD3DUP; 3742 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 3743 NumVecs = 4; 3744 NewOpc = AArch64ISD::NEON_LD4DUP; 3745 } else { 3746 return SDValue(); 3747 } 3748 3749 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 3750 // numbers match the load. 3751 unsigned VLDLaneNo = 3752 cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue(); 3753 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 3754 UI != UE; ++UI) { 3755 // Ignore uses of the chain result. 3756 if (UI.getUse().getResNo() == NumVecs) 3757 continue; 3758 SDNode *User = *UI; 3759 if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE || 3760 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 3761 return SDValue(); 3762 } 3763 3764 // Create the vldN-dup node. 3765 EVT Tys[5]; 3766 unsigned n; 3767 for (n = 0; n < NumVecs; ++n) 3768 Tys[n] = VT; 3769 Tys[n] = MVT::Other; 3770 SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1); 3771 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 3772 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 3773 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2, 3774 VLDMemInt->getMemoryVT(), 3775 VLDMemInt->getMemOperand()); 3776 3777 // Update the uses. 3778 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 3779 UI != UE; ++UI) { 3780 unsigned ResNo = UI.getUse().getResNo(); 3781 // Ignore uses of the chain result. 3782 if (ResNo == NumVecs) 3783 continue; 3784 SDNode *User = *UI; 3785 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 3786 } 3787 3788 // Now the vldN-lane intrinsic is dead except for its chain result. 3789 // Update uses of the chain. 3790 std::vector<SDValue> VLDDupResults; 3791 for (unsigned n = 0; n < NumVecs; ++n) 3792 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 3793 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 3794 DCI.CombineTo(VLD, VLDDupResults); 3795 3796 return SDValue(N, 0); 3797} 3798 3799SDValue 3800AArch64TargetLowering::PerformDAGCombine(SDNode *N, 3801 DAGCombinerInfo &DCI) const { 3802 switch (N->getOpcode()) { 3803 default: break; 3804 case ISD::AND: return PerformANDCombine(N, DCI); 3805 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget()); 3806 case ISD::SHL: 3807 case ISD::SRA: 3808 case ISD::SRL: 3809 return PerformShiftCombine(N, DCI, getSubtarget()); 3810 case ISD::INTRINSIC_WO_CHAIN: 3811 return PerformIntrinsicCombine(N, DCI.DAG); 3812 case AArch64ISD::NEON_VDUPLANE: 3813 return CombineVLDDUP(N, DCI); 3814 case AArch64ISD::NEON_LD2DUP: 3815 case AArch64ISD::NEON_LD3DUP: 3816 case AArch64ISD::NEON_LD4DUP: 3817 return CombineBaseUpdate(N, DCI); 3818 case ISD::INTRINSIC_VOID: 3819 case ISD::INTRINSIC_W_CHAIN: 3820 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 3821 case Intrinsic::arm_neon_vld1: 3822 case Intrinsic::arm_neon_vld2: 3823 case Intrinsic::arm_neon_vld3: 3824 case Intrinsic::arm_neon_vld4: 3825 case Intrinsic::arm_neon_vst1: 3826 case Intrinsic::arm_neon_vst2: 3827 case Intrinsic::arm_neon_vst3: 3828 case Intrinsic::arm_neon_vst4: 3829 case Intrinsic::arm_neon_vld2lane: 3830 case Intrinsic::arm_neon_vld3lane: 3831 case Intrinsic::arm_neon_vld4lane: 3832 case Intrinsic::aarch64_neon_vld1x2: 3833 case Intrinsic::aarch64_neon_vld1x3: 3834 case Intrinsic::aarch64_neon_vld1x4: 3835 case Intrinsic::aarch64_neon_vst1x2: 3836 case Intrinsic::aarch64_neon_vst1x3: 3837 case Intrinsic::aarch64_neon_vst1x4: 3838 case Intrinsic::arm_neon_vst2lane: 3839 case Intrinsic::arm_neon_vst3lane: 3840 case Intrinsic::arm_neon_vst4lane: 3841 return CombineBaseUpdate(N, DCI); 3842 default: 3843 break; 3844 } 3845 } 3846 return SDValue(); 3847} 3848 3849bool 3850AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3851 VT = VT.getScalarType(); 3852 3853 if (!VT.isSimple()) 3854 return false; 3855 3856 switch (VT.getSimpleVT().SimpleTy) { 3857 case MVT::f16: 3858 case MVT::f32: 3859 case MVT::f64: 3860 return true; 3861 case MVT::f128: 3862 return false; 3863 default: 3864 break; 3865 } 3866 3867 return false; 3868} 3869 3870// Check whether a Build Vector could be presented as Shuffle Vector. If yes, 3871// try to call LowerVECTOR_SHUFFLE to lower it. 3872bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, 3873 SDValue &Res) const { 3874 SDLoc DL(Op); 3875 EVT VT = Op.getValueType(); 3876 unsigned NumElts = VT.getVectorNumElements(); 3877 unsigned V0NumElts = 0; 3878 int Mask[16]; 3879 SDValue V0, V1; 3880 3881 // Check if all elements are extracted from less than 3 vectors. 3882 for (unsigned i = 0; i < NumElts; ++i) { 3883 SDValue Elt = Op.getOperand(i); 3884 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 3885 return false; 3886 3887 if (V0.getNode() == 0) { 3888 V0 = Elt.getOperand(0); 3889 V0NumElts = V0.getValueType().getVectorNumElements(); 3890 } 3891 if (Elt.getOperand(0) == V0) { 3892 Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue()); 3893 continue; 3894 } else if (V1.getNode() == 0) { 3895 V1 = Elt.getOperand(0); 3896 } 3897 if (Elt.getOperand(0) == V1) { 3898 unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue(); 3899 Mask[i] = (Lane + V0NumElts); 3900 continue; 3901 } else { 3902 return false; 3903 } 3904 } 3905 3906 if (!V1.getNode() && V0NumElts == NumElts * 2) { 3907 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, 3908 DAG.getConstant(NumElts, MVT::i64)); 3909 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, 3910 DAG.getConstant(0, MVT::i64)); 3911 V0NumElts = V0.getValueType().getVectorNumElements(); 3912 } 3913 3914 if (V1.getNode() && NumElts == V0NumElts && 3915 V0NumElts == V1.getValueType().getVectorNumElements()) { 3916 SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask); 3917 Res = LowerVECTOR_SHUFFLE(Shuffle, DAG); 3918 return true; 3919 } else 3920 return false; 3921} 3922 3923// If this is a case we can't handle, return null and let the default 3924// expansion code take care of it. 3925SDValue 3926AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3927 const AArch64Subtarget *ST) const { 3928 3929 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3930 SDLoc DL(Op); 3931 EVT VT = Op.getValueType(); 3932 3933 APInt SplatBits, SplatUndef; 3934 unsigned SplatBitSize; 3935 bool HasAnyUndefs; 3936 3937 unsigned UseNeonMov = VT.getSizeInBits() >= 64; 3938 3939 // Note we favor lowering MOVI over MVNI. 3940 // This has implications on the definition of patterns in TableGen to select 3941 // BIC immediate instructions but not ORR immediate instructions. 3942 // If this lowering order is changed, TableGen patterns for BIC immediate and 3943 // ORR immediate instructions have to be updated. 3944 if (UseNeonMov && 3945 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3946 if (SplatBitSize <= 64) { 3947 // First attempt to use vector immediate-form MOVI 3948 EVT NeonMovVT; 3949 unsigned Imm = 0; 3950 unsigned OpCmode = 0; 3951 3952 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 3953 SplatBitSize, DAG, VT.is128BitVector(), 3954 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) { 3955 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3956 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3957 3958 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3959 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT, 3960 ImmVal, OpCmodeVal); 3961 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3962 } 3963 } 3964 3965 // Then attempt to use vector immediate-form MVNI 3966 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 3967 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, 3968 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT, 3969 Imm, OpCmode)) { 3970 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3971 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3972 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3973 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT, 3974 ImmVal, OpCmodeVal); 3975 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3976 } 3977 } 3978 3979 // Attempt to use vector immediate-form FMOV 3980 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) || 3981 (VT == MVT::v2f64 && SplatBitSize == 64)) { 3982 APFloat RealVal( 3983 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble, 3984 SplatBits); 3985 uint32_t ImmVal; 3986 if (A64Imms::isFPImm(RealVal, ImmVal)) { 3987 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 3988 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val); 3989 } 3990 } 3991 } 3992 } 3993 3994 unsigned NumElts = VT.getVectorNumElements(); 3995 bool isOnlyLowElement = true; 3996 bool usesOnlyOneValue = true; 3997 bool hasDominantValue = false; 3998 bool isConstant = true; 3999 4000 // Map of the number of times a particular SDValue appears in the 4001 // element list. 4002 DenseMap<SDValue, unsigned> ValueCounts; 4003 SDValue Value; 4004 for (unsigned i = 0; i < NumElts; ++i) { 4005 SDValue V = Op.getOperand(i); 4006 if (V.getOpcode() == ISD::UNDEF) 4007 continue; 4008 if (i > 0) 4009 isOnlyLowElement = false; 4010 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4011 isConstant = false; 4012 4013 ValueCounts.insert(std::make_pair(V, 0)); 4014 unsigned &Count = ValueCounts[V]; 4015 4016 // Is this value dominant? (takes up more than half of the lanes) 4017 if (++Count > (NumElts / 2)) { 4018 hasDominantValue = true; 4019 Value = V; 4020 } 4021 } 4022 if (ValueCounts.size() != 1) 4023 usesOnlyOneValue = false; 4024 if (!Value.getNode() && ValueCounts.size() > 0) 4025 Value = ValueCounts.begin()->first; 4026 4027 if (ValueCounts.size() == 0) 4028 return DAG.getUNDEF(VT); 4029 4030 // Loads are better lowered with insert_vector_elt. 4031 // Keep going if we are hitting this case. 4032 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 4033 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 4034 4035 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4036 if (hasDominantValue && EltSize <= 64) { 4037 // Use VDUP for non-constant splats. 4038 if (!isConstant) { 4039 SDValue N; 4040 4041 // If we are DUPing a value that comes directly from a vector, we could 4042 // just use DUPLANE. We can only do this if the lane being extracted 4043 // is at a constant index, as the DUP from lane instructions only have 4044 // constant-index forms. 4045 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4046 isa<ConstantSDNode>(Value->getOperand(1))) { 4047 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, 4048 Value->getOperand(0), Value->getOperand(1)); 4049 } else 4050 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 4051 4052 if (!usesOnlyOneValue) { 4053 // The dominant value was splatted as 'N', but we now have to insert 4054 // all differing elements. 4055 for (unsigned I = 0; I < NumElts; ++I) { 4056 if (Op.getOperand(I) == Value) 4057 continue; 4058 SmallVector<SDValue, 3> Ops; 4059 Ops.push_back(N); 4060 Ops.push_back(Op.getOperand(I)); 4061 Ops.push_back(DAG.getConstant(I, MVT::i64)); 4062 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3); 4063 } 4064 } 4065 return N; 4066 } 4067 if (usesOnlyOneValue && isConstant) { 4068 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 4069 } 4070 } 4071 // If all elements are constants and the case above didn't get hit, fall back 4072 // to the default expansion, which will generate a load from the constant 4073 // pool. 4074 if (isConstant) 4075 return SDValue(); 4076 4077 // Try to lower this in lowering ShuffleVector way. 4078 SDValue Shuf; 4079 if (isKnownShuffleVector(Op, DAG, Shuf)) 4080 return Shuf; 4081 4082 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 4083 // know the default expansion would otherwise fall back on something even 4084 // worse. For a vector with one or two non-undef values, that's 4085 // scalar_to_vector for the elements followed by a shuffle (provided the 4086 // shuffle is valid for the target) and materialization element by element 4087 // on the stack followed by a load for everything else. 4088 if (!isConstant && !usesOnlyOneValue) { 4089 SDValue Vec = DAG.getUNDEF(VT); 4090 for (unsigned i = 0 ; i < NumElts; ++i) { 4091 SDValue V = Op.getOperand(i); 4092 if (V.getOpcode() == ISD::UNDEF) 4093 continue; 4094 SDValue LaneIdx = DAG.getConstant(i, MVT::i64); 4095 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx); 4096 } 4097 return Vec; 4098 } 4099 return SDValue(); 4100} 4101 4102/// isREVMask - Check if a vector shuffle corresponds to a REV 4103/// instruction with the specified blocksize. (The order of the elements 4104/// within each block of the vector is reversed.) 4105static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 4106 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && 4107 "Only possible block sizes for REV are: 16, 32, 64"); 4108 4109 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4110 if (EltSz == 64) 4111 return false; 4112 4113 unsigned NumElts = VT.getVectorNumElements(); 4114 unsigned BlockElts = M[0] + 1; 4115 // If the first shuffle index is UNDEF, be optimistic. 4116 if (M[0] < 0) 4117 BlockElts = BlockSize / EltSz; 4118 4119 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 4120 return false; 4121 4122 for (unsigned i = 0; i < NumElts; ++i) { 4123 if (M[i] < 0) 4124 continue; // ignore UNDEF indices 4125 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) 4126 return false; 4127 } 4128 4129 return true; 4130} 4131 4132// isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and 4133// TRN instruction. 4134static unsigned isPermuteMask(ArrayRef<int> M, EVT VT) { 4135 unsigned NumElts = VT.getVectorNumElements(); 4136 if (NumElts < 4) 4137 return 0; 4138 4139 bool ismatch = true; 4140 4141 // Check UZP1 4142 for (unsigned i = 0; i < NumElts; ++i) { 4143 if ((unsigned)M[i] != i * 2) { 4144 ismatch = false; 4145 break; 4146 } 4147 } 4148 if (ismatch) 4149 return AArch64ISD::NEON_UZP1; 4150 4151 // Check UZP2 4152 ismatch = true; 4153 for (unsigned i = 0; i < NumElts; ++i) { 4154 if ((unsigned)M[i] != i * 2 + 1) { 4155 ismatch = false; 4156 break; 4157 } 4158 } 4159 if (ismatch) 4160 return AArch64ISD::NEON_UZP2; 4161 4162 // Check ZIP1 4163 ismatch = true; 4164 for (unsigned i = 0; i < NumElts; ++i) { 4165 if ((unsigned)M[i] != i / 2 + NumElts * (i % 2)) { 4166 ismatch = false; 4167 break; 4168 } 4169 } 4170 if (ismatch) 4171 return AArch64ISD::NEON_ZIP1; 4172 4173 // Check ZIP2 4174 ismatch = true; 4175 for (unsigned i = 0; i < NumElts; ++i) { 4176 if ((unsigned)M[i] != (NumElts + i) / 2 + NumElts * (i % 2)) { 4177 ismatch = false; 4178 break; 4179 } 4180 } 4181 if (ismatch) 4182 return AArch64ISD::NEON_ZIP2; 4183 4184 // Check TRN1 4185 ismatch = true; 4186 for (unsigned i = 0; i < NumElts; ++i) { 4187 if ((unsigned)M[i] != i + (NumElts - 1) * (i % 2)) { 4188 ismatch = false; 4189 break; 4190 } 4191 } 4192 if (ismatch) 4193 return AArch64ISD::NEON_TRN1; 4194 4195 // Check TRN2 4196 ismatch = true; 4197 for (unsigned i = 0; i < NumElts; ++i) { 4198 if ((unsigned)M[i] != 1 + i + (NumElts - 1) * (i % 2)) { 4199 ismatch = false; 4200 break; 4201 } 4202 } 4203 if (ismatch) 4204 return AArch64ISD::NEON_TRN2; 4205 4206 return 0; 4207} 4208 4209SDValue 4210AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 4211 SelectionDAG &DAG) const { 4212 SDValue V1 = Op.getOperand(0); 4213 SDValue V2 = Op.getOperand(1); 4214 SDLoc dl(Op); 4215 EVT VT = Op.getValueType(); 4216 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4217 4218 // Convert shuffles that are directly supported on NEON to target-specific 4219 // DAG nodes, instead of keeping them as shuffles and matching them again 4220 // during code selection. This is more efficient and avoids the possibility 4221 // of inconsistencies between legalization and selection. 4222 ArrayRef<int> ShuffleMask = SVN->getMask(); 4223 4224 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4225 if (EltSize > 64) 4226 return SDValue(); 4227 4228 if (isREVMask(ShuffleMask, VT, 64)) 4229 return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1); 4230 if (isREVMask(ShuffleMask, VT, 32)) 4231 return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1); 4232 if (isREVMask(ShuffleMask, VT, 16)) 4233 return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1); 4234 4235 unsigned ISDNo = isPermuteMask(ShuffleMask, VT); 4236 if (ISDNo) 4237 return DAG.getNode(ISDNo, dl, VT, V1, V2); 4238 4239 // If the element of shuffle mask are all the same constant, we can 4240 // transform it into either NEON_VDUP or NEON_VDUPLANE 4241 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4242 int Lane = SVN->getSplatIndex(); 4243 // If this is undef splat, generate it via "just" vdup, if possible. 4244 if (Lane == -1) Lane = 0; 4245 4246 // Test if V1 is a SCALAR_TO_VECTOR. 4247 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4248 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0)); 4249 } 4250 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR. 4251 if (V1.getOpcode() == ISD::BUILD_VECTOR) { 4252 bool IsScalarToVector = true; 4253 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i) 4254 if (V1.getOperand(i).getOpcode() != ISD::UNDEF && 4255 i != (unsigned)Lane) { 4256 IsScalarToVector = false; 4257 break; 4258 } 4259 if (IsScalarToVector) 4260 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, 4261 V1.getOperand(Lane)); 4262 } 4263 4264 // Test if V1 is a EXTRACT_SUBVECTOR. 4265 if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) { 4266 int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue(); 4267 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0), 4268 DAG.getConstant(Lane + ExtLane, MVT::i64)); 4269 } 4270 // Test if V1 is a CONCAT_VECTORS. 4271 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 4272 V1.getOperand(1).getOpcode() == ISD::UNDEF) { 4273 SDValue Op0 = V1.getOperand(0); 4274 assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() && 4275 "Invalid vector lane access"); 4276 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0, 4277 DAG.getConstant(Lane, MVT::i64)); 4278 } 4279 4280 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1, 4281 DAG.getConstant(Lane, MVT::i64)); 4282 } 4283 4284 int Length = ShuffleMask.size(); 4285 int V1EltNum = V1.getValueType().getVectorNumElements(); 4286 4287 // If the number of v1 elements is the same as the number of shuffle mask 4288 // element and the shuffle masks are sequential values, we can transform 4289 // it into NEON_VEXTRACT. 4290 if (V1EltNum == Length) { 4291 // Check if the shuffle mask is sequential. 4292 bool IsSequential = true; 4293 int CurMask = ShuffleMask[0]; 4294 for (int I = 0; I < Length; ++I) { 4295 if (ShuffleMask[I] != CurMask) { 4296 IsSequential = false; 4297 break; 4298 } 4299 CurMask++; 4300 } 4301 if (IsSequential) { 4302 assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect"); 4303 unsigned VecSize = EltSize * V1EltNum; 4304 unsigned Index = (EltSize/8) * ShuffleMask[0]; 4305 if (VecSize == 64 || VecSize == 128) 4306 return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2, 4307 DAG.getConstant(Index, MVT::i64)); 4308 } 4309 } 4310 4311 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert 4312 // by element from V2 to V1 . 4313 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a 4314 // better choice to be inserted than V1 as less insert needed, so we count 4315 // element to be inserted for both V1 and V2, and select less one as insert 4316 // target. 4317 4318 // Collect elements need to be inserted and their index. 4319 SmallVector<int, 8> NV1Elt; 4320 SmallVector<int, 8> N1Index; 4321 SmallVector<int, 8> NV2Elt; 4322 SmallVector<int, 8> N2Index; 4323 for (int I = 0; I != Length; ++I) { 4324 if (ShuffleMask[I] != I) { 4325 NV1Elt.push_back(ShuffleMask[I]); 4326 N1Index.push_back(I); 4327 } 4328 } 4329 for (int I = 0; I != Length; ++I) { 4330 if (ShuffleMask[I] != (I + V1EltNum)) { 4331 NV2Elt.push_back(ShuffleMask[I]); 4332 N2Index.push_back(I); 4333 } 4334 } 4335 4336 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2 4337 // will be inserted. 4338 SDValue InsV = V1; 4339 SmallVector<int, 8> InsMasks = NV1Elt; 4340 SmallVector<int, 8> InsIndex = N1Index; 4341 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) { 4342 if (NV1Elt.size() > NV2Elt.size()) { 4343 InsV = V2; 4344 InsMasks = NV2Elt; 4345 InsIndex = N2Index; 4346 } 4347 } else { 4348 InsV = DAG.getNode(ISD::UNDEF, dl, VT); 4349 } 4350 4351 for (int I = 0, E = InsMasks.size(); I != E; ++I) { 4352 SDValue ExtV = V1; 4353 int Mask = InsMasks[I]; 4354 if (Mask >= V1EltNum) { 4355 ExtV = V2; 4356 Mask -= V1EltNum; 4357 } 4358 // Any value type smaller than i32 is illegal in AArch64, and this lower 4359 // function is called after legalize pass, so we need to legalize 4360 // the result here. 4361 EVT EltVT; 4362 if (VT.getVectorElementType().isFloatingPoint()) 4363 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32; 4364 else 4365 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32; 4366 4367 if (Mask >= 0) { 4368 ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV, 4369 DAG.getConstant(Mask, MVT::i64)); 4370 InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV, 4371 DAG.getConstant(InsIndex[I], MVT::i64)); 4372 } 4373 } 4374 return InsV; 4375} 4376 4377AArch64TargetLowering::ConstraintType 4378AArch64TargetLowering::getConstraintType(const std::string &Constraint) const { 4379 if (Constraint.size() == 1) { 4380 switch (Constraint[0]) { 4381 default: break; 4382 case 'w': // An FP/SIMD vector register 4383 return C_RegisterClass; 4384 case 'I': // Constant that can be used with an ADD instruction 4385 case 'J': // Constant that can be used with a SUB instruction 4386 case 'K': // Constant that can be used with a 32-bit logical instruction 4387 case 'L': // Constant that can be used with a 64-bit logical instruction 4388 case 'M': // Constant that can be used as a 32-bit MOV immediate 4389 case 'N': // Constant that can be used as a 64-bit MOV immediate 4390 case 'Y': // Floating point constant zero 4391 case 'Z': // Integer constant zero 4392 return C_Other; 4393 case 'Q': // A memory reference with base register and no offset 4394 return C_Memory; 4395 case 'S': // A symbolic address 4396 return C_Other; 4397 } 4398 } 4399 4400 // FIXME: Ump, Utf, Usa, Ush 4401 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, 4402 // whatever they may be 4403 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be 4404 // Usa: An absolute symbolic address 4405 // Ush: The high part (bits 32:12) of a pc-relative symbolic address 4406 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa" 4407 && Constraint != "Ush" && "Unimplemented constraints"); 4408 4409 return TargetLowering::getConstraintType(Constraint); 4410} 4411 4412TargetLowering::ConstraintWeight 4413AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info, 4414 const char *Constraint) const { 4415 4416 llvm_unreachable("Constraint weight unimplemented"); 4417} 4418 4419void 4420AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4421 std::string &Constraint, 4422 std::vector<SDValue> &Ops, 4423 SelectionDAG &DAG) const { 4424 SDValue Result(0, 0); 4425 4426 // Only length 1 constraints are C_Other. 4427 if (Constraint.size() != 1) return; 4428 4429 // Only C_Other constraints get lowered like this. That means constants for us 4430 // so return early if there's no hope the constraint can be lowered. 4431 4432 switch(Constraint[0]) { 4433 default: break; 4434 case 'I': case 'J': case 'K': case 'L': 4435 case 'M': case 'N': case 'Z': { 4436 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4437 if (!C) 4438 return; 4439 4440 uint64_t CVal = C->getZExtValue(); 4441 uint32_t Bits; 4442 4443 switch (Constraint[0]) { 4444 default: 4445 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J' 4446 // is a peculiarly useless SUB constraint. 4447 llvm_unreachable("Unimplemented C_Other constraint"); 4448 case 'I': 4449 if (CVal <= 0xfff) 4450 break; 4451 return; 4452 case 'K': 4453 if (A64Imms::isLogicalImm(32, CVal, Bits)) 4454 break; 4455 return; 4456 case 'L': 4457 if (A64Imms::isLogicalImm(64, CVal, Bits)) 4458 break; 4459 return; 4460 case 'Z': 4461 if (CVal == 0) 4462 break; 4463 return; 4464 } 4465 4466 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 4467 break; 4468 } 4469 case 'S': { 4470 // An absolute symbolic address or label reference. 4471 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 4472 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4473 GA->getValueType(0)); 4474 } else if (const BlockAddressSDNode *BA 4475 = dyn_cast<BlockAddressSDNode>(Op)) { 4476 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(), 4477 BA->getValueType(0)); 4478 } else if (const ExternalSymbolSDNode *ES 4479 = dyn_cast<ExternalSymbolSDNode>(Op)) { 4480 Result = DAG.getTargetExternalSymbol(ES->getSymbol(), 4481 ES->getValueType(0)); 4482 } else 4483 return; 4484 break; 4485 } 4486 case 'Y': 4487 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 4488 if (CFP->isExactlyValue(0.0)) { 4489 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0)); 4490 break; 4491 } 4492 } 4493 return; 4494 } 4495 4496 if (Result.getNode()) { 4497 Ops.push_back(Result); 4498 return; 4499 } 4500 4501 // It's an unknown constraint for us. Let generic code have a go. 4502 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 4503} 4504 4505std::pair<unsigned, const TargetRegisterClass*> 4506AArch64TargetLowering::getRegForInlineAsmConstraint( 4507 const std::string &Constraint, 4508 MVT VT) const { 4509 if (Constraint.size() == 1) { 4510 switch (Constraint[0]) { 4511 case 'r': 4512 if (VT.getSizeInBits() <= 32) 4513 return std::make_pair(0U, &AArch64::GPR32RegClass); 4514 else if (VT == MVT::i64) 4515 return std::make_pair(0U, &AArch64::GPR64RegClass); 4516 break; 4517 case 'w': 4518 if (VT == MVT::f16) 4519 return std::make_pair(0U, &AArch64::FPR16RegClass); 4520 else if (VT == MVT::f32) 4521 return std::make_pair(0U, &AArch64::FPR32RegClass); 4522 else if (VT.getSizeInBits() == 64) 4523 return std::make_pair(0U, &AArch64::FPR64RegClass); 4524 else if (VT.getSizeInBits() == 128) 4525 return std::make_pair(0U, &AArch64::FPR128RegClass); 4526 break; 4527 } 4528 } 4529 4530 // Use the default implementation in TargetLowering to convert the register 4531 // constraint into a member of a register class. 4532 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 4533} 4534 4535/// Represent NEON load and store intrinsics as MemIntrinsicNodes. 4536/// The associated MachineMemOperands record the alignment specified 4537/// in the intrinsic calls. 4538bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 4539 const CallInst &I, 4540 unsigned Intrinsic) const { 4541 switch (Intrinsic) { 4542 case Intrinsic::arm_neon_vld1: 4543 case Intrinsic::arm_neon_vld2: 4544 case Intrinsic::arm_neon_vld3: 4545 case Intrinsic::arm_neon_vld4: 4546 case Intrinsic::aarch64_neon_vld1x2: 4547 case Intrinsic::aarch64_neon_vld1x3: 4548 case Intrinsic::aarch64_neon_vld1x4: 4549 case Intrinsic::arm_neon_vld2lane: 4550 case Intrinsic::arm_neon_vld3lane: 4551 case Intrinsic::arm_neon_vld4lane: { 4552 Info.opc = ISD::INTRINSIC_W_CHAIN; 4553 // Conservatively set memVT to the entire set of vectors loaded. 4554 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 4555 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4556 Info.ptrVal = I.getArgOperand(0); 4557 Info.offset = 0; 4558 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4559 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4560 Info.vol = false; // volatile loads with NEON intrinsics not supported 4561 Info.readMem = true; 4562 Info.writeMem = false; 4563 return true; 4564 } 4565 case Intrinsic::arm_neon_vst1: 4566 case Intrinsic::arm_neon_vst2: 4567 case Intrinsic::arm_neon_vst3: 4568 case Intrinsic::arm_neon_vst4: 4569 case Intrinsic::aarch64_neon_vst1x2: 4570 case Intrinsic::aarch64_neon_vst1x3: 4571 case Intrinsic::aarch64_neon_vst1x4: 4572 case Intrinsic::arm_neon_vst2lane: 4573 case Intrinsic::arm_neon_vst3lane: 4574 case Intrinsic::arm_neon_vst4lane: { 4575 Info.opc = ISD::INTRINSIC_VOID; 4576 // Conservatively set memVT to the entire set of vectors stored. 4577 unsigned NumElts = 0; 4578 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 4579 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 4580 if (!ArgTy->isVectorTy()) 4581 break; 4582 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 4583 } 4584 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4585 Info.ptrVal = I.getArgOperand(0); 4586 Info.offset = 0; 4587 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4588 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4589 Info.vol = false; // volatile stores with NEON intrinsics not supported 4590 Info.readMem = false; 4591 Info.writeMem = true; 4592 return true; 4593 } 4594 default: 4595 break; 4596 } 4597 4598 return false; 4599} 4600