AArch64ISelLowering.cpp revision f04a4d74b86733b853b7445ab6d5a3bde025a30d
16d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===// 26d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// 36d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// The LLVM Compiler Infrastructure 46d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// 5116680a4aac90f2aa7413d9095a592090648e557Ben Murdoch// This file is distributed under the University of Illinois Open Source 6116680a4aac90f2aa7413d9095a592090648e557Ben Murdoch// License. See LICENSE.TXT for details. 76d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// 86d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)//===----------------------------------------------------------------------===// 96d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// 106d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// This file defines the interfaces that AArch64 uses to lower LLVM code into a 116d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// selection DAG. 126d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)// 136d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)//===----------------------------------------------------------------------===// 146d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 156d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#define DEBUG_TYPE "aarch64-isel" 166d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "AArch64.h" 176d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "AArch64ISelLowering.h" 186d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "AArch64MachineFunctionInfo.h" 196d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "AArch64TargetMachine.h" 206d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "AArch64TargetObjectFile.h" 216d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "Utils/AArch64BaseInfo.h" 226d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "llvm/CodeGen/Analysis.h" 236d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "llvm/CodeGen/CallingConvLower.h" 246d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "llvm/CodeGen/MachineFrameInfo.h" 256d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "llvm/CodeGen/MachineInstrBuilder.h" 266d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "llvm/CodeGen/MachineRegisterInfo.h" 276d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 286d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)#include "llvm/IR/CallingConv.h" 296d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 306d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)using namespace llvm; 316d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 326d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) { 336d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 346d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 356d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) if (Subtarget->isTargetLinux()) 366d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) return new AArch64LinuxTargetObjectFile(); 376d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) if (Subtarget->isTargetELF()) 386d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) return new TargetLoweringObjectFileELF(); 396d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) llvm_unreachable("unknown subtarget type"); 406d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)} 416d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 426d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles)AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) 436d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) { 446d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 456d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 466d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 476d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) // SIMD compares set the entire lane's bits to 1 486d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 496d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 506d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) // Scalar register <-> type mapping 516d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::i32, &AArch64::GPR32RegClass); 526d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::i64, &AArch64::GPR64RegClass); 536d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 546d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) if (Subtarget->hasFPARMv8()) { 556d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); 566d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); 576d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); 586d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); 596d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) } 606d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) 616d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) if (Subtarget->hasNEON()) { 626d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) // And the vectors 636d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass); 646d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass); 656d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass); 666d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 676d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass); 686d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass); 696d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass); 706d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass); 716d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass); 726d86b77056ed63eb6871182f42a9fd5f07550f90Torne (Richard Coles) addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 73116680a4aac90f2aa7413d9095a592090648e557Ben Murdoch addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass); 74 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass); 75 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass); 76 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass); 77 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass); 78 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass); 79 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass); 80 } 81 82 computeRegisterProperties(); 83 84 // We combine OR nodes for bitfield and NEON BSL operations. 85 setTargetDAGCombine(ISD::OR); 86 87 setTargetDAGCombine(ISD::AND); 88 setTargetDAGCombine(ISD::SRA); 89 setTargetDAGCombine(ISD::SRL); 90 setTargetDAGCombine(ISD::SHL); 91 92 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 93 setTargetDAGCombine(ISD::INTRINSIC_VOID); 94 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 95 96 // AArch64 does not have i1 loads, or much of anything for i1 really. 97 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 98 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 99 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 100 101 setStackPointerRegisterToSaveRestore(AArch64::XSP); 102 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 103 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 104 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 105 106 // We'll lower globals to wrappers for selection. 107 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 108 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 109 110 // A64 instructions have the comparison predicate attached to the user of the 111 // result, but having a separate comparison is valuable for matching. 112 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 113 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 114 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 115 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 116 117 setOperationAction(ISD::SELECT, MVT::i32, Custom); 118 setOperationAction(ISD::SELECT, MVT::i64, Custom); 119 setOperationAction(ISD::SELECT, MVT::f32, Custom); 120 setOperationAction(ISD::SELECT, MVT::f64, Custom); 121 122 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 123 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 124 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 125 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 126 127 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 128 129 setOperationAction(ISD::SETCC, MVT::i32, Custom); 130 setOperationAction(ISD::SETCC, MVT::i64, Custom); 131 setOperationAction(ISD::SETCC, MVT::f32, Custom); 132 setOperationAction(ISD::SETCC, MVT::f64, Custom); 133 134 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 135 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 136 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 137 138 setOperationAction(ISD::VASTART, MVT::Other, Custom); 139 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 140 setOperationAction(ISD::VAEND, MVT::Other, Expand); 141 setOperationAction(ISD::VAARG, MVT::Other, Expand); 142 143 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 144 145 setOperationAction(ISD::ROTL, MVT::i32, Expand); 146 setOperationAction(ISD::ROTL, MVT::i64, Expand); 147 148 setOperationAction(ISD::UREM, MVT::i32, Expand); 149 setOperationAction(ISD::UREM, MVT::i64, Expand); 150 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 151 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 152 153 setOperationAction(ISD::SREM, MVT::i32, Expand); 154 setOperationAction(ISD::SREM, MVT::i64, Expand); 155 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 156 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 157 158 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 159 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 160 161 // Legal floating-point operations. 162 setOperationAction(ISD::FABS, MVT::f32, Legal); 163 setOperationAction(ISD::FABS, MVT::f64, Legal); 164 165 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 166 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 167 168 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 169 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 170 171 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 172 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 173 174 setOperationAction(ISD::FNEG, MVT::f32, Legal); 175 setOperationAction(ISD::FNEG, MVT::f64, Legal); 176 177 setOperationAction(ISD::FRINT, MVT::f32, Legal); 178 setOperationAction(ISD::FRINT, MVT::f64, Legal); 179 180 setOperationAction(ISD::FSQRT, MVT::f32, Legal); 181 setOperationAction(ISD::FSQRT, MVT::f64, Legal); 182 183 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 184 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 185 186 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 187 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 188 setOperationAction(ISD::ConstantFP, MVT::f128, Legal); 189 190 // Illegal floating-point operations. 191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 192 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 193 194 setOperationAction(ISD::FCOS, MVT::f32, Expand); 195 setOperationAction(ISD::FCOS, MVT::f64, Expand); 196 197 setOperationAction(ISD::FEXP, MVT::f32, Expand); 198 setOperationAction(ISD::FEXP, MVT::f64, Expand); 199 200 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 201 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 202 203 setOperationAction(ISD::FLOG, MVT::f32, Expand); 204 setOperationAction(ISD::FLOG, MVT::f64, Expand); 205 206 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 207 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 208 209 setOperationAction(ISD::FLOG10, MVT::f32, Expand); 210 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 211 212 setOperationAction(ISD::FPOW, MVT::f32, Expand); 213 setOperationAction(ISD::FPOW, MVT::f64, Expand); 214 215 setOperationAction(ISD::FPOWI, MVT::f32, Expand); 216 setOperationAction(ISD::FPOWI, MVT::f64, Expand); 217 218 setOperationAction(ISD::FREM, MVT::f32, Expand); 219 setOperationAction(ISD::FREM, MVT::f64, Expand); 220 221 setOperationAction(ISD::FSIN, MVT::f32, Expand); 222 setOperationAction(ISD::FSIN, MVT::f64, Expand); 223 224 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 225 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 226 227 // Virtually no operation on f128 is legal, but LLVM can't expand them when 228 // there's a valid register class, so we need custom operations in most cases. 229 setOperationAction(ISD::FABS, MVT::f128, Expand); 230 setOperationAction(ISD::FADD, MVT::f128, Custom); 231 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 232 setOperationAction(ISD::FCOS, MVT::f128, Expand); 233 setOperationAction(ISD::FDIV, MVT::f128, Custom); 234 setOperationAction(ISD::FMA, MVT::f128, Expand); 235 setOperationAction(ISD::FMUL, MVT::f128, Custom); 236 setOperationAction(ISD::FNEG, MVT::f128, Expand); 237 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); 238 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand); 239 setOperationAction(ISD::FPOW, MVT::f128, Expand); 240 setOperationAction(ISD::FREM, MVT::f128, Expand); 241 setOperationAction(ISD::FRINT, MVT::f128, Expand); 242 setOperationAction(ISD::FSIN, MVT::f128, Expand); 243 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 244 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 245 setOperationAction(ISD::FSUB, MVT::f128, Custom); 246 setOperationAction(ISD::FTRUNC, MVT::f128, Expand); 247 setOperationAction(ISD::SETCC, MVT::f128, Custom); 248 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 249 setOperationAction(ISD::SELECT, MVT::f128, Expand); 250 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 251 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 252 253 // Lowering for many of the conversions is actually specified by the non-f128 254 // type. The LowerXXX function will be trivial when f128 isn't involved. 255 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 256 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 257 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); 258 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 259 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 260 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); 261 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 262 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 263 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); 264 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 265 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 266 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); 267 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 268 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 269 270 // This prevents LLVM trying to compress double constants into a floating 271 // constant-pool entry and trying to load from there. It's of doubtful benefit 272 // for A64: we'd need LDR followed by FCVT, I believe. 273 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); 274 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 275 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand); 276 277 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 278 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 279 setTruncStoreAction(MVT::f128, MVT::f16, Expand); 280 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 281 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 282 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 283 284 setExceptionPointerRegister(AArch64::X0); 285 setExceptionSelectorRegister(AArch64::X1); 286 287 if (Subtarget->hasNEON()) { 288 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom); 289 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 290 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 291 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom); 292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 293 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 294 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom); 295 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 296 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 297 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 298 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 299 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom); 300 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom); 301 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 302 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom); 303 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 304 305 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 306 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom); 309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom); 311 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 312 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 313 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom); 314 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 315 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom); 316 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 317 318 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal); 319 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 320 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 321 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 322 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 323 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 324 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 325 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal); 326 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal); 327 328 setOperationAction(ISD::SETCC, MVT::v8i8, Custom); 329 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 330 setOperationAction(ISD::SETCC, MVT::v4i16, Custom); 331 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 332 setOperationAction(ISD::SETCC, MVT::v2i32, Custom); 333 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 334 setOperationAction(ISD::SETCC, MVT::v1i64, Custom); 335 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 336 setOperationAction(ISD::SETCC, MVT::v1f32, Custom); 337 setOperationAction(ISD::SETCC, MVT::v2f32, Custom); 338 setOperationAction(ISD::SETCC, MVT::v4f32, Custom); 339 setOperationAction(ISD::SETCC, MVT::v1f64, Custom); 340 setOperationAction(ISD::SETCC, MVT::v2f64, Custom); 341 342 setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal); 343 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 344 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 345 346 setOperationAction(ISD::FCEIL, MVT::v2f32, Legal); 347 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 348 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 349 350 setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal); 351 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 352 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 353 354 setOperationAction(ISD::FRINT, MVT::v2f32, Legal); 355 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 356 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 357 358 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal); 359 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 360 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 361 362 setOperationAction(ISD::FROUND, MVT::v2f32, Legal); 363 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 364 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 365 } 366} 367 368EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 369 // It's reasonably important that this value matches the "natural" legal 370 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself 371 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64). 372 if (!VT.isVector()) return MVT::i32; 373 return VT.changeVectorElementTypeToInteger(); 374} 375 376static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, 377 unsigned &LdrOpc, 378 unsigned &StrOpc) { 379 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, 380 AArch64::LDXR_word, AArch64::LDXR_dword}; 381 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, 382 AArch64::LDAXR_word, AArch64::LDAXR_dword}; 383 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, 384 AArch64::STXR_word, AArch64::STXR_dword}; 385 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword, 386 AArch64::STLXR_word, AArch64::STLXR_dword}; 387 388 const unsigned *LoadOps, *StoreOps; 389 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) 390 LoadOps = LoadAcqs; 391 else 392 LoadOps = LoadBares; 393 394 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent) 395 StoreOps = StoreRels; 396 else 397 StoreOps = StoreBares; 398 399 assert(isPowerOf2_32(Size) && Size <= 8 && 400 "unsupported size for atomic binary op!"); 401 402 LdrOpc = LoadOps[Log2_32(Size)]; 403 StrOpc = StoreOps[Log2_32(Size)]; 404} 405 406// FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really 407// have value type mapped, and they are both being defined as MVT::untyped. 408// Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost 409// would fail to figure out the register pressure correctly. 410std::pair<const TargetRegisterClass*, uint8_t> 411AArch64TargetLowering::findRepresentativeClass(MVT VT) const{ 412 const TargetRegisterClass *RRC = 0; 413 uint8_t Cost = 1; 414 switch (VT.SimpleTy) { 415 default: 416 return TargetLowering::findRepresentativeClass(VT); 417 case MVT::v4i64: 418 RRC = &AArch64::QPairRegClass; 419 Cost = 2; 420 break; 421 case MVT::v8i64: 422 RRC = &AArch64::QQuadRegClass; 423 Cost = 4; 424 break; 425 } 426 return std::make_pair(RRC, Cost); 427} 428 429MachineBasicBlock * 430AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 431 unsigned Size, 432 unsigned BinOpcode) const { 433 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 434 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 435 436 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 437 MachineFunction *MF = BB->getParent(); 438 MachineFunction::iterator It = BB; 439 ++It; 440 441 unsigned dest = MI->getOperand(0).getReg(); 442 unsigned ptr = MI->getOperand(1).getReg(); 443 unsigned incr = MI->getOperand(2).getReg(); 444 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 445 DebugLoc dl = MI->getDebugLoc(); 446 447 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 448 449 unsigned ldrOpc, strOpc; 450 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 451 452 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 453 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 454 MF->insert(It, loopMBB); 455 MF->insert(It, exitMBB); 456 457 // Transfer the remainder of BB and its successor edges to exitMBB. 458 exitMBB->splice(exitMBB->begin(), BB, 459 llvm::next(MachineBasicBlock::iterator(MI)), 460 BB->end()); 461 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 462 463 const TargetRegisterClass *TRC 464 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; 465 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 466 467 // thisMBB: 468 // ... 469 // fallthrough --> loopMBB 470 BB->addSuccessor(loopMBB); 471 472 // loopMBB: 473 // ldxr dest, ptr 474 // <binop> scratch, dest, incr 475 // stxr stxr_status, scratch, ptr 476 // cbnz stxr_status, loopMBB 477 // fallthrough --> exitMBB 478 BB = loopMBB; 479 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 480 if (BinOpcode) { 481 // All arithmetic operations we'll be creating are designed to take an extra 482 // shift or extend operand, which we can conveniently set to zero. 483 484 // Operand order needs to go the other way for NAND. 485 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl) 486 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 487 .addReg(incr).addReg(dest).addImm(0); 488 else 489 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 490 .addReg(dest).addReg(incr).addImm(0); 491 } 492 493 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp 494 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 495 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 496 497 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr); 498 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 499 .addReg(stxr_status).addMBB(loopMBB); 500 501 BB->addSuccessor(loopMBB); 502 BB->addSuccessor(exitMBB); 503 504 // exitMBB: 505 // ... 506 BB = exitMBB; 507 508 MI->eraseFromParent(); // The instruction is gone now. 509 510 return BB; 511} 512 513MachineBasicBlock * 514AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, 515 MachineBasicBlock *BB, 516 unsigned Size, 517 unsigned CmpOp, 518 A64CC::CondCodes Cond) const { 519 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 520 521 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 522 MachineFunction *MF = BB->getParent(); 523 MachineFunction::iterator It = BB; 524 ++It; 525 526 unsigned dest = MI->getOperand(0).getReg(); 527 unsigned ptr = MI->getOperand(1).getReg(); 528 unsigned incr = MI->getOperand(2).getReg(); 529 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 530 531 unsigned oldval = dest; 532 DebugLoc dl = MI->getDebugLoc(); 533 534 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 535 const TargetRegisterClass *TRC, *TRCsp; 536 if (Size == 8) { 537 TRC = &AArch64::GPR64RegClass; 538 TRCsp = &AArch64::GPR64xspRegClass; 539 } else { 540 TRC = &AArch64::GPR32RegClass; 541 TRCsp = &AArch64::GPR32wspRegClass; 542 } 543 544 unsigned ldrOpc, strOpc; 545 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 546 547 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 548 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 549 MF->insert(It, loopMBB); 550 MF->insert(It, exitMBB); 551 552 // Transfer the remainder of BB and its successor edges to exitMBB. 553 exitMBB->splice(exitMBB->begin(), BB, 554 llvm::next(MachineBasicBlock::iterator(MI)), 555 BB->end()); 556 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 557 558 unsigned scratch = MRI.createVirtualRegister(TRC); 559 MRI.constrainRegClass(scratch, TRCsp); 560 561 // thisMBB: 562 // ... 563 // fallthrough --> loopMBB 564 BB->addSuccessor(loopMBB); 565 566 // loopMBB: 567 // ldxr dest, ptr 568 // cmp incr, dest (, sign extend if necessary) 569 // csel scratch, dest, incr, cond 570 // stxr stxr_status, scratch, ptr 571 // cbnz stxr_status, loopMBB 572 // fallthrough --> exitMBB 573 BB = loopMBB; 574 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 575 576 // Build compare and cmov instructions. 577 MRI.constrainRegClass(incr, TRCsp); 578 BuildMI(BB, dl, TII->get(CmpOp)) 579 .addReg(incr).addReg(oldval).addImm(0); 580 581 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc), 582 scratch) 583 .addReg(oldval).addReg(incr).addImm(Cond); 584 585 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 586 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 587 588 BuildMI(BB, dl, TII->get(strOpc), stxr_status) 589 .addReg(scratch).addReg(ptr); 590 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 591 .addReg(stxr_status).addMBB(loopMBB); 592 593 BB->addSuccessor(loopMBB); 594 BB->addSuccessor(exitMBB); 595 596 // exitMBB: 597 // ... 598 BB = exitMBB; 599 600 MI->eraseFromParent(); // The instruction is gone now. 601 602 return BB; 603} 604 605MachineBasicBlock * 606AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, 607 MachineBasicBlock *BB, 608 unsigned Size) const { 609 unsigned dest = MI->getOperand(0).getReg(); 610 unsigned ptr = MI->getOperand(1).getReg(); 611 unsigned oldval = MI->getOperand(2).getReg(); 612 unsigned newval = MI->getOperand(3).getReg(); 613 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm()); 614 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 615 DebugLoc dl = MI->getDebugLoc(); 616 617 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 618 const TargetRegisterClass *TRCsp; 619 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass; 620 621 unsigned ldrOpc, strOpc; 622 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 623 624 MachineFunction *MF = BB->getParent(); 625 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 626 MachineFunction::iterator It = BB; 627 ++It; // insert the new blocks after the current block 628 629 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 630 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 631 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 632 MF->insert(It, loop1MBB); 633 MF->insert(It, loop2MBB); 634 MF->insert(It, exitMBB); 635 636 // Transfer the remainder of BB and its successor edges to exitMBB. 637 exitMBB->splice(exitMBB->begin(), BB, 638 llvm::next(MachineBasicBlock::iterator(MI)), 639 BB->end()); 640 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 641 642 // thisMBB: 643 // ... 644 // fallthrough --> loop1MBB 645 BB->addSuccessor(loop1MBB); 646 647 // loop1MBB: 648 // ldxr dest, [ptr] 649 // cmp dest, oldval 650 // b.ne exitMBB 651 BB = loop1MBB; 652 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 653 654 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl; 655 MRI.constrainRegClass(dest, TRCsp); 656 BuildMI(BB, dl, TII->get(CmpOp)) 657 .addReg(dest).addReg(oldval).addImm(0); 658 BuildMI(BB, dl, TII->get(AArch64::Bcc)) 659 .addImm(A64CC::NE).addMBB(exitMBB); 660 BB->addSuccessor(loop2MBB); 661 BB->addSuccessor(exitMBB); 662 663 // loop2MBB: 664 // strex stxr_status, newval, [ptr] 665 // cbnz stxr_status, loop1MBB 666 BB = loop2MBB; 667 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 668 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 669 670 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr); 671 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 672 .addReg(stxr_status).addMBB(loop1MBB); 673 BB->addSuccessor(loop1MBB); 674 BB->addSuccessor(exitMBB); 675 676 // exitMBB: 677 // ... 678 BB = exitMBB; 679 680 MI->eraseFromParent(); // The instruction is gone now. 681 682 return BB; 683} 684 685MachineBasicBlock * 686AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, 687 MachineBasicBlock *MBB) const { 688 // We materialise the F128CSEL pseudo-instruction using conditional branches 689 // and loads, giving an instruciton sequence like: 690 // str q0, [sp] 691 // b.ne IfTrue 692 // b Finish 693 // IfTrue: 694 // str q1, [sp] 695 // Finish: 696 // ldr q0, [sp] 697 // 698 // Using virtual registers would probably not be beneficial since COPY 699 // instructions are expensive for f128 (there's no actual instruction to 700 // implement them). 701 // 702 // An alternative would be to do an integer-CSEL on some address. E.g.: 703 // mov x0, sp 704 // add x1, sp, #16 705 // str q0, [x0] 706 // str q1, [x1] 707 // csel x0, x0, x1, ne 708 // ldr q0, [x0] 709 // 710 // It's unclear which approach is actually optimal. 711 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 712 MachineFunction *MF = MBB->getParent(); 713 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 714 DebugLoc DL = MI->getDebugLoc(); 715 MachineFunction::iterator It = MBB; 716 ++It; 717 718 unsigned DestReg = MI->getOperand(0).getReg(); 719 unsigned IfTrueReg = MI->getOperand(1).getReg(); 720 unsigned IfFalseReg = MI->getOperand(2).getReg(); 721 unsigned CondCode = MI->getOperand(3).getImm(); 722 bool NZCVKilled = MI->getOperand(4).isKill(); 723 724 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); 725 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); 726 MF->insert(It, TrueBB); 727 MF->insert(It, EndBB); 728 729 // Transfer rest of current basic-block to EndBB 730 EndBB->splice(EndBB->begin(), MBB, 731 llvm::next(MachineBasicBlock::iterator(MI)), 732 MBB->end()); 733 EndBB->transferSuccessorsAndUpdatePHIs(MBB); 734 735 // We need somewhere to store the f128 value needed. 736 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16); 737 738 // [... start of incoming MBB ...] 739 // str qIFFALSE, [sp] 740 // b.cc IfTrue 741 // b Done 742 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR)) 743 .addReg(IfFalseReg) 744 .addFrameIndex(ScratchFI) 745 .addImm(0); 746 BuildMI(MBB, DL, TII->get(AArch64::Bcc)) 747 .addImm(CondCode) 748 .addMBB(TrueBB); 749 BuildMI(MBB, DL, TII->get(AArch64::Bimm)) 750 .addMBB(EndBB); 751 MBB->addSuccessor(TrueBB); 752 MBB->addSuccessor(EndBB); 753 754 if (!NZCVKilled) { 755 // NZCV is live-through TrueBB. 756 TrueBB->addLiveIn(AArch64::NZCV); 757 EndBB->addLiveIn(AArch64::NZCV); 758 } 759 760 // IfTrue: 761 // str qIFTRUE, [sp] 762 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR)) 763 .addReg(IfTrueReg) 764 .addFrameIndex(ScratchFI) 765 .addImm(0); 766 767 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the 768 // blocks. 769 TrueBB->addSuccessor(EndBB); 770 771 // Done: 772 // ldr qDEST, [sp] 773 // [... rest of incoming MBB ...] 774 MachineInstr *StartOfEnd = EndBB->begin(); 775 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg) 776 .addFrameIndex(ScratchFI) 777 .addImm(0); 778 779 MI->eraseFromParent(); 780 return EndBB; 781} 782 783MachineBasicBlock * 784AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 785 MachineBasicBlock *MBB) const { 786 switch (MI->getOpcode()) { 787 default: llvm_unreachable("Unhandled instruction with custom inserter"); 788 case AArch64::F128CSEL: 789 return EmitF128CSEL(MI, MBB); 790 case AArch64::ATOMIC_LOAD_ADD_I8: 791 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl); 792 case AArch64::ATOMIC_LOAD_ADD_I16: 793 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl); 794 case AArch64::ATOMIC_LOAD_ADD_I32: 795 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl); 796 case AArch64::ATOMIC_LOAD_ADD_I64: 797 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl); 798 799 case AArch64::ATOMIC_LOAD_SUB_I8: 800 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl); 801 case AArch64::ATOMIC_LOAD_SUB_I16: 802 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl); 803 case AArch64::ATOMIC_LOAD_SUB_I32: 804 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl); 805 case AArch64::ATOMIC_LOAD_SUB_I64: 806 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl); 807 808 case AArch64::ATOMIC_LOAD_AND_I8: 809 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl); 810 case AArch64::ATOMIC_LOAD_AND_I16: 811 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl); 812 case AArch64::ATOMIC_LOAD_AND_I32: 813 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl); 814 case AArch64::ATOMIC_LOAD_AND_I64: 815 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl); 816 817 case AArch64::ATOMIC_LOAD_OR_I8: 818 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl); 819 case AArch64::ATOMIC_LOAD_OR_I16: 820 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl); 821 case AArch64::ATOMIC_LOAD_OR_I32: 822 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl); 823 case AArch64::ATOMIC_LOAD_OR_I64: 824 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl); 825 826 case AArch64::ATOMIC_LOAD_XOR_I8: 827 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl); 828 case AArch64::ATOMIC_LOAD_XOR_I16: 829 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl); 830 case AArch64::ATOMIC_LOAD_XOR_I32: 831 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl); 832 case AArch64::ATOMIC_LOAD_XOR_I64: 833 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl); 834 835 case AArch64::ATOMIC_LOAD_NAND_I8: 836 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl); 837 case AArch64::ATOMIC_LOAD_NAND_I16: 838 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl); 839 case AArch64::ATOMIC_LOAD_NAND_I32: 840 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl); 841 case AArch64::ATOMIC_LOAD_NAND_I64: 842 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl); 843 844 case AArch64::ATOMIC_LOAD_MIN_I8: 845 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT); 846 case AArch64::ATOMIC_LOAD_MIN_I16: 847 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT); 848 case AArch64::ATOMIC_LOAD_MIN_I32: 849 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT); 850 case AArch64::ATOMIC_LOAD_MIN_I64: 851 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT); 852 853 case AArch64::ATOMIC_LOAD_MAX_I8: 854 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT); 855 case AArch64::ATOMIC_LOAD_MAX_I16: 856 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT); 857 case AArch64::ATOMIC_LOAD_MAX_I32: 858 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT); 859 case AArch64::ATOMIC_LOAD_MAX_I64: 860 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT); 861 862 case AArch64::ATOMIC_LOAD_UMIN_I8: 863 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI); 864 case AArch64::ATOMIC_LOAD_UMIN_I16: 865 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI); 866 case AArch64::ATOMIC_LOAD_UMIN_I32: 867 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI); 868 case AArch64::ATOMIC_LOAD_UMIN_I64: 869 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI); 870 871 case AArch64::ATOMIC_LOAD_UMAX_I8: 872 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO); 873 case AArch64::ATOMIC_LOAD_UMAX_I16: 874 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO); 875 case AArch64::ATOMIC_LOAD_UMAX_I32: 876 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO); 877 case AArch64::ATOMIC_LOAD_UMAX_I64: 878 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO); 879 880 case AArch64::ATOMIC_SWAP_I8: 881 return emitAtomicBinary(MI, MBB, 1, 0); 882 case AArch64::ATOMIC_SWAP_I16: 883 return emitAtomicBinary(MI, MBB, 2, 0); 884 case AArch64::ATOMIC_SWAP_I32: 885 return emitAtomicBinary(MI, MBB, 4, 0); 886 case AArch64::ATOMIC_SWAP_I64: 887 return emitAtomicBinary(MI, MBB, 8, 0); 888 889 case AArch64::ATOMIC_CMP_SWAP_I8: 890 return emitAtomicCmpSwap(MI, MBB, 1); 891 case AArch64::ATOMIC_CMP_SWAP_I16: 892 return emitAtomicCmpSwap(MI, MBB, 2); 893 case AArch64::ATOMIC_CMP_SWAP_I32: 894 return emitAtomicCmpSwap(MI, MBB, 4); 895 case AArch64::ATOMIC_CMP_SWAP_I64: 896 return emitAtomicCmpSwap(MI, MBB, 8); 897 } 898} 899 900 901const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { 902 switch (Opcode) { 903 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC"; 904 case AArch64ISD::Call: return "AArch64ISD::Call"; 905 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV"; 906 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad"; 907 case AArch64ISD::BFI: return "AArch64ISD::BFI"; 908 case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; 909 case AArch64ISD::Ret: return "AArch64ISD::Ret"; 910 case AArch64ISD::SBFX: return "AArch64ISD::SBFX"; 911 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC"; 912 case AArch64ISD::SETCC: return "AArch64ISD::SETCC"; 913 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; 914 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; 915 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL"; 916 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; 917 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall"; 918 919 case AArch64ISD::NEON_BSL: 920 return "AArch64ISD::NEON_BSL"; 921 case AArch64ISD::NEON_MOVIMM: 922 return "AArch64ISD::NEON_MOVIMM"; 923 case AArch64ISD::NEON_MVNIMM: 924 return "AArch64ISD::NEON_MVNIMM"; 925 case AArch64ISD::NEON_FMOVIMM: 926 return "AArch64ISD::NEON_FMOVIMM"; 927 case AArch64ISD::NEON_CMP: 928 return "AArch64ISD::NEON_CMP"; 929 case AArch64ISD::NEON_CMPZ: 930 return "AArch64ISD::NEON_CMPZ"; 931 case AArch64ISD::NEON_TST: 932 return "AArch64ISD::NEON_TST"; 933 case AArch64ISD::NEON_QSHLs: 934 return "AArch64ISD::NEON_QSHLs"; 935 case AArch64ISD::NEON_QSHLu: 936 return "AArch64ISD::NEON_QSHLu"; 937 case AArch64ISD::NEON_VDUP: 938 return "AArch64ISD::NEON_VDUP"; 939 case AArch64ISD::NEON_VDUPLANE: 940 return "AArch64ISD::NEON_VDUPLANE"; 941 case AArch64ISD::NEON_REV16: 942 return "AArch64ISD::NEON_REV16"; 943 case AArch64ISD::NEON_REV32: 944 return "AArch64ISD::NEON_REV32"; 945 case AArch64ISD::NEON_REV64: 946 return "AArch64ISD::NEON_REV64"; 947 case AArch64ISD::NEON_UZP1: 948 return "AArch64ISD::NEON_UZP1"; 949 case AArch64ISD::NEON_UZP2: 950 return "AArch64ISD::NEON_UZP2"; 951 case AArch64ISD::NEON_ZIP1: 952 return "AArch64ISD::NEON_ZIP1"; 953 case AArch64ISD::NEON_ZIP2: 954 return "AArch64ISD::NEON_ZIP2"; 955 case AArch64ISD::NEON_TRN1: 956 return "AArch64ISD::NEON_TRN1"; 957 case AArch64ISD::NEON_TRN2: 958 return "AArch64ISD::NEON_TRN2"; 959 case AArch64ISD::NEON_LD1_UPD: 960 return "AArch64ISD::NEON_LD1_UPD"; 961 case AArch64ISD::NEON_LD2_UPD: 962 return "AArch64ISD::NEON_LD2_UPD"; 963 case AArch64ISD::NEON_LD3_UPD: 964 return "AArch64ISD::NEON_LD3_UPD"; 965 case AArch64ISD::NEON_LD4_UPD: 966 return "AArch64ISD::NEON_LD4_UPD"; 967 case AArch64ISD::NEON_ST1_UPD: 968 return "AArch64ISD::NEON_ST1_UPD"; 969 case AArch64ISD::NEON_ST2_UPD: 970 return "AArch64ISD::NEON_ST2_UPD"; 971 case AArch64ISD::NEON_ST3_UPD: 972 return "AArch64ISD::NEON_ST3_UPD"; 973 case AArch64ISD::NEON_ST4_UPD: 974 return "AArch64ISD::NEON_ST4_UPD"; 975 case AArch64ISD::NEON_LD1x2_UPD: 976 return "AArch64ISD::NEON_LD1x2_UPD"; 977 case AArch64ISD::NEON_LD1x3_UPD: 978 return "AArch64ISD::NEON_LD1x3_UPD"; 979 case AArch64ISD::NEON_LD1x4_UPD: 980 return "AArch64ISD::NEON_LD1x4_UPD"; 981 case AArch64ISD::NEON_ST1x2_UPD: 982 return "AArch64ISD::NEON_ST1x2_UPD"; 983 case AArch64ISD::NEON_ST1x3_UPD: 984 return "AArch64ISD::NEON_ST1x3_UPD"; 985 case AArch64ISD::NEON_ST1x4_UPD: 986 return "AArch64ISD::NEON_ST1x4_UPD"; 987 case AArch64ISD::NEON_LD2DUP: 988 return "AArch64ISD::NEON_LD2DUP"; 989 case AArch64ISD::NEON_LD3DUP: 990 return "AArch64ISD::NEON_LD3DUP"; 991 case AArch64ISD::NEON_LD4DUP: 992 return "AArch64ISD::NEON_LD4DUP"; 993 case AArch64ISD::NEON_LD2DUP_UPD: 994 return "AArch64ISD::NEON_LD2DUP_UPD"; 995 case AArch64ISD::NEON_LD3DUP_UPD: 996 return "AArch64ISD::NEON_LD3DUP_UPD"; 997 case AArch64ISD::NEON_LD4DUP_UPD: 998 return "AArch64ISD::NEON_LD4DUP_UPD"; 999 case AArch64ISD::NEON_LD2LN_UPD: 1000 return "AArch64ISD::NEON_LD2LN_UPD"; 1001 case AArch64ISD::NEON_LD3LN_UPD: 1002 return "AArch64ISD::NEON_LD3LN_UPD"; 1003 case AArch64ISD::NEON_LD4LN_UPD: 1004 return "AArch64ISD::NEON_LD4LN_UPD"; 1005 case AArch64ISD::NEON_ST2LN_UPD: 1006 return "AArch64ISD::NEON_ST2LN_UPD"; 1007 case AArch64ISD::NEON_ST3LN_UPD: 1008 return "AArch64ISD::NEON_ST3LN_UPD"; 1009 case AArch64ISD::NEON_ST4LN_UPD: 1010 return "AArch64ISD::NEON_ST4LN_UPD"; 1011 case AArch64ISD::NEON_VEXTRACT: 1012 return "AArch64ISD::NEON_VEXTRACT"; 1013 default: 1014 return NULL; 1015 } 1016} 1017 1018static const uint16_t AArch64FPRArgRegs[] = { 1019 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, 1020 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7 1021}; 1022static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs); 1023 1024static const uint16_t AArch64ArgRegs[] = { 1025 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, 1026 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7 1027}; 1028static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs); 1029 1030static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 1031 CCValAssign::LocInfo LocInfo, 1032 ISD::ArgFlagsTy ArgFlags, CCState &State) { 1033 // Mark all remaining general purpose registers as allocated. We don't 1034 // backtrack: if (for example) an i128 gets put on the stack, no subsequent 1035 // i64 will go in registers (C.11). 1036 for (unsigned i = 0; i < NumArgRegs; ++i) 1037 State.AllocateReg(AArch64ArgRegs[i]); 1038 1039 return false; 1040} 1041 1042#include "AArch64GenCallingConv.inc" 1043 1044CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const { 1045 1046 switch(CC) { 1047 default: llvm_unreachable("Unsupported calling convention"); 1048 case CallingConv::Fast: 1049 case CallingConv::C: 1050 return CC_A64_APCS; 1051 } 1052} 1053 1054void 1055AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, 1056 SDLoc DL, SDValue &Chain) const { 1057 MachineFunction &MF = DAG.getMachineFunction(); 1058 MachineFrameInfo *MFI = MF.getFrameInfo(); 1059 AArch64MachineFunctionInfo *FuncInfo 1060 = MF.getInfo<AArch64MachineFunctionInfo>(); 1061 1062 SmallVector<SDValue, 8> MemOps; 1063 1064 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs, 1065 NumArgRegs); 1066 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs, 1067 NumFPRArgRegs); 1068 1069 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR); 1070 int GPRIdx = 0; 1071 if (GPRSaveSize != 0) { 1072 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false); 1073 1074 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy()); 1075 1076 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) { 1077 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass); 1078 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 1079 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1080 MachinePointerInfo::getStack(i * 8), 1081 false, false, 0); 1082 MemOps.push_back(Store); 1083 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1084 DAG.getConstant(8, getPointerTy())); 1085 } 1086 } 1087 1088 if (getSubtarget()->hasFPARMv8()) { 1089 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); 1090 int FPRIdx = 0; 1091 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we 1092 // can omit a register save area if we know we'll never use registers of 1093 // that class. 1094 if (FPRSaveSize != 0) { 1095 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false); 1096 1097 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy()); 1098 1099 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { 1100 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i], 1101 &AArch64::FPR128RegClass); 1102 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); 1103 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1104 MachinePointerInfo::getStack(i * 16), 1105 false, false, 0); 1106 MemOps.push_back(Store); 1107 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1108 DAG.getConstant(16, getPointerTy())); 1109 } 1110 } 1111 FuncInfo->setVariadicFPRIdx(FPRIdx); 1112 FuncInfo->setVariadicFPRSize(FPRSaveSize); 1113 } 1114 1115 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true); 1116 1117 FuncInfo->setVariadicStackIdx(StackIdx); 1118 FuncInfo->setVariadicGPRIdx(GPRIdx); 1119 FuncInfo->setVariadicGPRSize(GPRSaveSize); 1120 1121 if (!MemOps.empty()) { 1122 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 1123 MemOps.size()); 1124 } 1125} 1126 1127 1128SDValue 1129AArch64TargetLowering::LowerFormalArguments(SDValue Chain, 1130 CallingConv::ID CallConv, bool isVarArg, 1131 const SmallVectorImpl<ISD::InputArg> &Ins, 1132 SDLoc dl, SelectionDAG &DAG, 1133 SmallVectorImpl<SDValue> &InVals) const { 1134 MachineFunction &MF = DAG.getMachineFunction(); 1135 AArch64MachineFunctionInfo *FuncInfo 1136 = MF.getInfo<AArch64MachineFunctionInfo>(); 1137 MachineFrameInfo *MFI = MF.getFrameInfo(); 1138 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1139 1140 SmallVector<CCValAssign, 16> ArgLocs; 1141 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1142 getTargetMachine(), ArgLocs, *DAG.getContext()); 1143 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv)); 1144 1145 SmallVector<SDValue, 16> ArgValues; 1146 1147 SDValue ArgValue; 1148 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1149 CCValAssign &VA = ArgLocs[i]; 1150 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1151 1152 if (Flags.isByVal()) { 1153 // Byval is used for small structs and HFAs in the PCS, but the system 1154 // should work in a non-compliant manner for larger structs. 1155 EVT PtrTy = getPointerTy(); 1156 int Size = Flags.getByValSize(); 1157 unsigned NumRegs = (Size + 7) / 8; 1158 1159 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs, 1160 VA.getLocMemOffset(), 1161 false); 1162 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy); 1163 InVals.push_back(FrameIdxN); 1164 1165 continue; 1166 } else if (VA.isRegLoc()) { 1167 MVT RegVT = VA.getLocVT(); 1168 const TargetRegisterClass *RC = getRegClassFor(RegVT); 1169 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1170 1171 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1172 } else { // VA.isRegLoc() 1173 assert(VA.isMemLoc()); 1174 1175 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 1176 VA.getLocMemOffset(), true); 1177 1178 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1179 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1180 MachinePointerInfo::getFixedStack(FI), 1181 false, false, false, 0); 1182 1183 1184 } 1185 1186 switch (VA.getLocInfo()) { 1187 default: llvm_unreachable("Unknown loc info!"); 1188 case CCValAssign::Full: break; 1189 case CCValAssign::BCvt: 1190 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue); 1191 break; 1192 case CCValAssign::SExt: 1193 case CCValAssign::ZExt: 1194 case CCValAssign::AExt: { 1195 unsigned DestSize = VA.getValVT().getSizeInBits(); 1196 unsigned DestSubReg; 1197 1198 switch (DestSize) { 1199 case 8: DestSubReg = AArch64::sub_8; break; 1200 case 16: DestSubReg = AArch64::sub_16; break; 1201 case 32: DestSubReg = AArch64::sub_32; break; 1202 case 64: DestSubReg = AArch64::sub_64; break; 1203 default: llvm_unreachable("Unexpected argument promotion"); 1204 } 1205 1206 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, 1207 VA.getValVT(), ArgValue, 1208 DAG.getTargetConstant(DestSubReg, MVT::i32)), 1209 0); 1210 break; 1211 } 1212 } 1213 1214 InVals.push_back(ArgValue); 1215 } 1216 1217 if (isVarArg) 1218 SaveVarArgRegisters(CCInfo, DAG, dl, Chain); 1219 1220 unsigned StackArgSize = CCInfo.getNextStackOffset(); 1221 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { 1222 // This is a non-standard ABI so by fiat I say we're allowed to make full 1223 // use of the stack area to be popped, which must be aligned to 16 bytes in 1224 // any case: 1225 StackArgSize = RoundUpToAlignment(StackArgSize, 16); 1226 1227 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding 1228 // a multiple of 16. 1229 FuncInfo->setArgumentStackToRestore(StackArgSize); 1230 1231 // This realignment carries over to the available bytes below. Our own 1232 // callers will guarantee the space is free by giving an aligned value to 1233 // CALLSEQ_START. 1234 } 1235 // Even if we're not expected to free up the space, it's useful to know how 1236 // much is there while considering tail calls (because we can reuse it). 1237 FuncInfo->setBytesInStackArgArea(StackArgSize); 1238 1239 return Chain; 1240} 1241 1242SDValue 1243AArch64TargetLowering::LowerReturn(SDValue Chain, 1244 CallingConv::ID CallConv, bool isVarArg, 1245 const SmallVectorImpl<ISD::OutputArg> &Outs, 1246 const SmallVectorImpl<SDValue> &OutVals, 1247 SDLoc dl, SelectionDAG &DAG) const { 1248 // CCValAssign - represent the assignment of the return value to a location. 1249 SmallVector<CCValAssign, 16> RVLocs; 1250 1251 // CCState - Info about the registers and stack slots. 1252 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1253 getTargetMachine(), RVLocs, *DAG.getContext()); 1254 1255 // Analyze outgoing return values. 1256 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv)); 1257 1258 SDValue Flag; 1259 SmallVector<SDValue, 4> RetOps(1, Chain); 1260 1261 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1262 // PCS: "If the type, T, of the result of a function is such that 1263 // void func(T arg) would require that arg be passed as a value in a 1264 // register (or set of registers) according to the rules in 5.4, then the 1265 // result is returned in the same registers as would be used for such an 1266 // argument. 1267 // 1268 // Otherwise, the caller shall reserve a block of memory of sufficient 1269 // size and alignment to hold the result. The address of the memory block 1270 // shall be passed as an additional argument to the function in x8." 1271 // 1272 // This is implemented in two places. The register-return values are dealt 1273 // with here, more complex returns are passed as an sret parameter, which 1274 // means we don't have to worry about it during actual return. 1275 CCValAssign &VA = RVLocs[i]; 1276 assert(VA.isRegLoc() && "Only register-returns should be created by PCS"); 1277 1278 1279 SDValue Arg = OutVals[i]; 1280 1281 // There's no convenient note in the ABI about this as there is for normal 1282 // arguments, but it says return values are passed in the same registers as 1283 // an argument would be. I believe that includes the comments about 1284 // unspecified higher bits, putting the burden of widening on the *caller* 1285 // for return values. 1286 switch (VA.getLocInfo()) { 1287 default: llvm_unreachable("Unknown loc info"); 1288 case CCValAssign::Full: break; 1289 case CCValAssign::SExt: 1290 case CCValAssign::ZExt: 1291 case CCValAssign::AExt: 1292 // Floating-point values should only be extended when they're going into 1293 // memory, which can't happen here so an integer extend is acceptable. 1294 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1295 break; 1296 case CCValAssign::BCvt: 1297 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1298 break; 1299 } 1300 1301 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1302 Flag = Chain.getValue(1); 1303 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1304 } 1305 1306 RetOps[0] = Chain; // Update chain. 1307 1308 // Add the flag if we have it. 1309 if (Flag.getNode()) 1310 RetOps.push_back(Flag); 1311 1312 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other, 1313 &RetOps[0], RetOps.size()); 1314} 1315 1316SDValue 1317AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, 1318 SmallVectorImpl<SDValue> &InVals) const { 1319 SelectionDAG &DAG = CLI.DAG; 1320 SDLoc &dl = CLI.DL; 1321 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1322 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1323 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1324 SDValue Chain = CLI.Chain; 1325 SDValue Callee = CLI.Callee; 1326 bool &IsTailCall = CLI.IsTailCall; 1327 CallingConv::ID CallConv = CLI.CallConv; 1328 bool IsVarArg = CLI.IsVarArg; 1329 1330 MachineFunction &MF = DAG.getMachineFunction(); 1331 AArch64MachineFunctionInfo *FuncInfo 1332 = MF.getInfo<AArch64MachineFunctionInfo>(); 1333 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1334 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet(); 1335 bool IsSibCall = false; 1336 1337 if (IsTailCall) { 1338 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1339 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1340 Outs, OutVals, Ins, DAG); 1341 1342 // A sibling call is one where we're under the usual C ABI and not planning 1343 // to change that but can still do a tail call: 1344 if (!TailCallOpt && IsTailCall) 1345 IsSibCall = true; 1346 } 1347 1348 SmallVector<CCValAssign, 16> ArgLocs; 1349 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1350 getTargetMachine(), ArgLocs, *DAG.getContext()); 1351 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv)); 1352 1353 // On AArch64 (and all other architectures I'm aware of) the most this has to 1354 // do is adjust the stack pointer. 1355 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16); 1356 if (IsSibCall) { 1357 // Since we're not changing the ABI to make this a tail call, the memory 1358 // operands are already available in the caller's incoming argument space. 1359 NumBytes = 0; 1360 } 1361 1362 // FPDiff is the byte offset of the call's argument area from the callee's. 1363 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1364 // by this amount for a tail call. In a sibling call it must be 0 because the 1365 // caller will deallocate the entire stack and the callee still expects its 1366 // arguments to begin at SP+0. Completely unused for non-tail calls. 1367 int FPDiff = 0; 1368 1369 if (IsTailCall && !IsSibCall) { 1370 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1371 1372 // FPDiff will be negative if this tail call requires more space than we 1373 // would automatically have in our incoming argument space. Positive if we 1374 // can actually shrink the stack. 1375 FPDiff = NumReusableBytes - NumBytes; 1376 1377 // The stack pointer must be 16-byte aligned at all times it's used for a 1378 // memory operation, which in practice means at *all* times and in 1379 // particular across call boundaries. Therefore our own arguments started at 1380 // a 16-byte aligned SP and the delta applied for the tail call should 1381 // satisfy the same constraint. 1382 assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); 1383 } 1384 1385 if (!IsSibCall) 1386 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 1387 dl); 1388 1389 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP, 1390 getPointerTy()); 1391 1392 SmallVector<SDValue, 8> MemOpChains; 1393 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1394 1395 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1396 CCValAssign &VA = ArgLocs[i]; 1397 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1398 SDValue Arg = OutVals[i]; 1399 1400 // Callee does the actual widening, so all extensions just use an implicit 1401 // definition of the rest of the Loc. Aesthetically, this would be nicer as 1402 // an ANY_EXTEND, but that isn't valid for floating-point types and this 1403 // alternative works on integer types too. 1404 switch (VA.getLocInfo()) { 1405 default: llvm_unreachable("Unknown loc info!"); 1406 case CCValAssign::Full: break; 1407 case CCValAssign::SExt: 1408 case CCValAssign::ZExt: 1409 case CCValAssign::AExt: { 1410 unsigned SrcSize = VA.getValVT().getSizeInBits(); 1411 unsigned SrcSubReg; 1412 1413 switch (SrcSize) { 1414 case 8: SrcSubReg = AArch64::sub_8; break; 1415 case 16: SrcSubReg = AArch64::sub_16; break; 1416 case 32: SrcSubReg = AArch64::sub_32; break; 1417 case 64: SrcSubReg = AArch64::sub_64; break; 1418 default: llvm_unreachable("Unexpected argument promotion"); 1419 } 1420 1421 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 1422 VA.getLocVT(), 1423 DAG.getUNDEF(VA.getLocVT()), 1424 Arg, 1425 DAG.getTargetConstant(SrcSubReg, MVT::i32)), 1426 0); 1427 1428 break; 1429 } 1430 case CCValAssign::BCvt: 1431 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1432 break; 1433 } 1434 1435 if (VA.isRegLoc()) { 1436 // A normal register (sub-) argument. For now we just note it down because 1437 // we want to copy things into registers as late as possible to avoid 1438 // register-pressure (and possibly worse). 1439 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1440 continue; 1441 } 1442 1443 assert(VA.isMemLoc() && "unexpected argument location"); 1444 1445 SDValue DstAddr; 1446 MachinePointerInfo DstInfo; 1447 if (IsTailCall) { 1448 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() : 1449 VA.getLocVT().getSizeInBits(); 1450 OpSize = (OpSize + 7) / 8; 1451 int32_t Offset = VA.getLocMemOffset() + FPDiff; 1452 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 1453 1454 DstAddr = DAG.getFrameIndex(FI, getPointerTy()); 1455 DstInfo = MachinePointerInfo::getFixedStack(FI); 1456 1457 // Make sure any stack arguments overlapping with where we're storing are 1458 // loaded before this eventual operation. Otherwise they'll be clobbered. 1459 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); 1460 } else { 1461 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1462 1463 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1464 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset()); 1465 } 1466 1467 if (Flags.isByVal()) { 1468 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64); 1469 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode, 1470 Flags.getByValAlign(), 1471 /*isVolatile = */ false, 1472 /*alwaysInline = */ false, 1473 DstInfo, MachinePointerInfo(0)); 1474 MemOpChains.push_back(Cpy); 1475 } else { 1476 // Normal stack argument, put it where it's needed. 1477 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo, 1478 false, false, 0); 1479 MemOpChains.push_back(Store); 1480 } 1481 } 1482 1483 // The loads and stores generated above shouldn't clash with each 1484 // other. Combining them with this TokenFactor notes that fact for the rest of 1485 // the backend. 1486 if (!MemOpChains.empty()) 1487 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1488 &MemOpChains[0], MemOpChains.size()); 1489 1490 // Most of the rest of the instructions need to be glued together; we don't 1491 // want assignments to actual registers used by a call to be rearranged by a 1492 // well-meaning scheduler. 1493 SDValue InFlag; 1494 1495 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1496 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1497 RegsToPass[i].second, InFlag); 1498 InFlag = Chain.getValue(1); 1499 } 1500 1501 // The linker is responsible for inserting veneers when necessary to put a 1502 // function call destination in range, so we don't need to bother with a 1503 // wrapper here. 1504 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1505 const GlobalValue *GV = G->getGlobal(); 1506 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy()); 1507 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1508 const char *Sym = S->getSymbol(); 1509 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 1510 } 1511 1512 // We don't usually want to end the call-sequence here because we would tidy 1513 // the frame up *after* the call, however in the ABI-changing tail-call case 1514 // we've carefully laid out the parameters so that when sp is reset they'll be 1515 // in the correct location. 1516 if (IsTailCall && !IsSibCall) { 1517 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1518 DAG.getIntPtrConstant(0, true), InFlag, dl); 1519 InFlag = Chain.getValue(1); 1520 } 1521 1522 // We produce the following DAG scheme for the actual call instruction: 1523 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag? 1524 // 1525 // Most arguments aren't going to be used and just keep the values live as 1526 // far as LLVM is concerned. It's expected to be selected as simply "bl 1527 // callee" (for a direct, non-tail call). 1528 std::vector<SDValue> Ops; 1529 Ops.push_back(Chain); 1530 Ops.push_back(Callee); 1531 1532 if (IsTailCall) { 1533 // Each tail call may have to adjust the stack by a different amount, so 1534 // this information must travel along with the operation for eventual 1535 // consumption by emitEpilogue. 1536 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32)); 1537 } 1538 1539 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1540 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1541 RegsToPass[i].second.getValueType())); 1542 1543 1544 // Add a register mask operand representing the call-preserved registers. This 1545 // is used later in codegen to constrain register-allocation. 1546 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1547 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 1548 assert(Mask && "Missing call preserved mask for calling convention"); 1549 Ops.push_back(DAG.getRegisterMask(Mask)); 1550 1551 // If we needed glue, put it in as the last argument. 1552 if (InFlag.getNode()) 1553 Ops.push_back(InFlag); 1554 1555 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1556 1557 if (IsTailCall) { 1558 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1559 } 1560 1561 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size()); 1562 InFlag = Chain.getValue(1); 1563 1564 // Now we can reclaim the stack, just as well do it before working out where 1565 // our return value is. 1566 if (!IsSibCall) { 1567 uint64_t CalleePopBytes 1568 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0; 1569 1570 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1571 DAG.getIntPtrConstant(CalleePopBytes, true), 1572 InFlag, dl); 1573 InFlag = Chain.getValue(1); 1574 } 1575 1576 return LowerCallResult(Chain, InFlag, CallConv, 1577 IsVarArg, Ins, dl, DAG, InVals); 1578} 1579 1580SDValue 1581AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1582 CallingConv::ID CallConv, bool IsVarArg, 1583 const SmallVectorImpl<ISD::InputArg> &Ins, 1584 SDLoc dl, SelectionDAG &DAG, 1585 SmallVectorImpl<SDValue> &InVals) const { 1586 // Assign locations to each value returned by this call. 1587 SmallVector<CCValAssign, 16> RVLocs; 1588 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1589 getTargetMachine(), RVLocs, *DAG.getContext()); 1590 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv)); 1591 1592 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1593 CCValAssign VA = RVLocs[i]; 1594 1595 // Return values that are too big to fit into registers should use an sret 1596 // pointer, so this can be a lot simpler than the main argument code. 1597 assert(VA.isRegLoc() && "Memory locations not expected for call return"); 1598 1599 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1600 InFlag); 1601 Chain = Val.getValue(1); 1602 InFlag = Val.getValue(2); 1603 1604 switch (VA.getLocInfo()) { 1605 default: llvm_unreachable("Unknown loc info!"); 1606 case CCValAssign::Full: break; 1607 case CCValAssign::BCvt: 1608 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1609 break; 1610 case CCValAssign::ZExt: 1611 case CCValAssign::SExt: 1612 case CCValAssign::AExt: 1613 // Floating-point arguments only get extended/truncated if they're going 1614 // in memory, so using the integer operation is acceptable here. 1615 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 1616 break; 1617 } 1618 1619 InVals.push_back(Val); 1620 } 1621 1622 return Chain; 1623} 1624 1625bool 1626AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1627 CallingConv::ID CalleeCC, 1628 bool IsVarArg, 1629 bool IsCalleeStructRet, 1630 bool IsCallerStructRet, 1631 const SmallVectorImpl<ISD::OutputArg> &Outs, 1632 const SmallVectorImpl<SDValue> &OutVals, 1633 const SmallVectorImpl<ISD::InputArg> &Ins, 1634 SelectionDAG& DAG) const { 1635 1636 // For CallingConv::C this function knows whether the ABI needs 1637 // changing. That's not true for other conventions so they will have to opt in 1638 // manually. 1639 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1640 return false; 1641 1642 const MachineFunction &MF = DAG.getMachineFunction(); 1643 const Function *CallerF = MF.getFunction(); 1644 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1645 bool CCMatch = CallerCC == CalleeCC; 1646 1647 // Byval parameters hand the function a pointer directly into the stack area 1648 // we want to reuse during a tail call. Working around this *is* possible (see 1649 // X86) but less efficient and uglier in LowerCall. 1650 for (Function::const_arg_iterator i = CallerF->arg_begin(), 1651 e = CallerF->arg_end(); i != e; ++i) 1652 if (i->hasByValAttr()) 1653 return false; 1654 1655 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 1656 if (IsTailCallConvention(CalleeCC) && CCMatch) 1657 return true; 1658 return false; 1659 } 1660 1661 // Now we search for cases where we can use a tail call without changing the 1662 // ABI. Sibcall is used in some places (particularly gcc) to refer to this 1663 // concept. 1664 1665 // I want anyone implementing a new calling convention to think long and hard 1666 // about this assert. 1667 assert((!IsVarArg || CalleeCC == CallingConv::C) 1668 && "Unexpected variadic calling convention"); 1669 1670 if (IsVarArg && !Outs.empty()) { 1671 // At least two cases here: if caller is fastcc then we can't have any 1672 // memory arguments (we'd be expected to clean up the stack afterwards). If 1673 // caller is C then we could potentially use its argument area. 1674 1675 // FIXME: for now we take the most conservative of these in both cases: 1676 // disallow all variadic memory operands. 1677 SmallVector<CCValAssign, 16> ArgLocs; 1678 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1679 getTargetMachine(), ArgLocs, *DAG.getContext()); 1680 1681 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1682 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 1683 if (!ArgLocs[i].isRegLoc()) 1684 return false; 1685 } 1686 1687 // If the calling conventions do not match, then we'd better make sure the 1688 // results are returned in the same way as what the caller expects. 1689 if (!CCMatch) { 1690 SmallVector<CCValAssign, 16> RVLocs1; 1691 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1692 getTargetMachine(), RVLocs1, *DAG.getContext()); 1693 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC)); 1694 1695 SmallVector<CCValAssign, 16> RVLocs2; 1696 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1697 getTargetMachine(), RVLocs2, *DAG.getContext()); 1698 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC)); 1699 1700 if (RVLocs1.size() != RVLocs2.size()) 1701 return false; 1702 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1703 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1704 return false; 1705 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1706 return false; 1707 if (RVLocs1[i].isRegLoc()) { 1708 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1709 return false; 1710 } else { 1711 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1712 return false; 1713 } 1714 } 1715 } 1716 1717 // Nothing more to check if the callee is taking no arguments 1718 if (Outs.empty()) 1719 return true; 1720 1721 SmallVector<CCValAssign, 16> ArgLocs; 1722 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1723 getTargetMachine(), ArgLocs, *DAG.getContext()); 1724 1725 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1726 1727 const AArch64MachineFunctionInfo *FuncInfo 1728 = MF.getInfo<AArch64MachineFunctionInfo>(); 1729 1730 // If the stack arguments for this call would fit into our own save area then 1731 // the call can be made tail. 1732 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea(); 1733} 1734 1735bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, 1736 bool TailCallOpt) const { 1737 return CallCC == CallingConv::Fast && TailCallOpt; 1738} 1739 1740bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const { 1741 return CallCC == CallingConv::Fast; 1742} 1743 1744SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, 1745 SelectionDAG &DAG, 1746 MachineFrameInfo *MFI, 1747 int ClobberedFI) const { 1748 SmallVector<SDValue, 8> ArgChains; 1749 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI); 1750 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1; 1751 1752 // Include the original chain at the beginning of the list. When this is 1753 // used by target LowerCall hooks, this helps legalize find the 1754 // CALLSEQ_BEGIN node. 1755 ArgChains.push_back(Chain); 1756 1757 // Add a chain value for each stack argument corresponding 1758 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1759 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U) 1760 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 1761 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 1762 if (FI->getIndex() < 0) { 1763 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex()); 1764 int64_t InLastByte = InFirstByte; 1765 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1; 1766 1767 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1768 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1769 ArgChains.push_back(SDValue(L, 1)); 1770 } 1771 1772 // Build a tokenfactor for all the chains. 1773 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, 1774 &ArgChains[0], ArgChains.size()); 1775} 1776 1777static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) { 1778 switch (CC) { 1779 case ISD::SETEQ: return A64CC::EQ; 1780 case ISD::SETGT: return A64CC::GT; 1781 case ISD::SETGE: return A64CC::GE; 1782 case ISD::SETLT: return A64CC::LT; 1783 case ISD::SETLE: return A64CC::LE; 1784 case ISD::SETNE: return A64CC::NE; 1785 case ISD::SETUGT: return A64CC::HI; 1786 case ISD::SETUGE: return A64CC::HS; 1787 case ISD::SETULT: return A64CC::LO; 1788 case ISD::SETULE: return A64CC::LS; 1789 default: llvm_unreachable("Unexpected condition code"); 1790 } 1791} 1792 1793bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const { 1794 // icmp is implemented using adds/subs immediate, which take an unsigned 1795 // 12-bit immediate, optionally shifted left by 12 bits. 1796 1797 // Symmetric by using adds/subs 1798 if (Val < 0) 1799 Val = -Val; 1800 1801 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0; 1802} 1803 1804SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS, 1805 ISD::CondCode CC, SDValue &A64cc, 1806 SelectionDAG &DAG, SDLoc &dl) const { 1807 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 1808 int64_t C = 0; 1809 EVT VT = RHSC->getValueType(0); 1810 bool knownInvalid = false; 1811 1812 // I'm not convinced the rest of LLVM handles these edge cases properly, but 1813 // we can at least get it right. 1814 if (isSignedIntSetCC(CC)) { 1815 C = RHSC->getSExtValue(); 1816 } else if (RHSC->getZExtValue() > INT64_MAX) { 1817 // A 64-bit constant not representable by a signed 64-bit integer is far 1818 // too big to fit into a SUBS immediate anyway. 1819 knownInvalid = true; 1820 } else { 1821 C = RHSC->getZExtValue(); 1822 } 1823 1824 if (!knownInvalid && !isLegalICmpImmediate(C)) { 1825 // Constant does not fit, try adjusting it by one? 1826 switch (CC) { 1827 default: break; 1828 case ISD::SETLT: 1829 case ISD::SETGE: 1830 if (isLegalICmpImmediate(C-1)) { 1831 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 1832 RHS = DAG.getConstant(C-1, VT); 1833 } 1834 break; 1835 case ISD::SETULT: 1836 case ISD::SETUGE: 1837 if (isLegalICmpImmediate(C-1)) { 1838 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 1839 RHS = DAG.getConstant(C-1, VT); 1840 } 1841 break; 1842 case ISD::SETLE: 1843 case ISD::SETGT: 1844 if (isLegalICmpImmediate(C+1)) { 1845 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 1846 RHS = DAG.getConstant(C+1, VT); 1847 } 1848 break; 1849 case ISD::SETULE: 1850 case ISD::SETUGT: 1851 if (isLegalICmpImmediate(C+1)) { 1852 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 1853 RHS = DAG.getConstant(C+1, VT); 1854 } 1855 break; 1856 } 1857 } 1858 } 1859 1860 A64CC::CondCodes CondCode = IntCCToA64CC(CC); 1861 A64cc = DAG.getConstant(CondCode, MVT::i32); 1862 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1863 DAG.getCondCode(CC)); 1864} 1865 1866static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC, 1867 A64CC::CondCodes &Alternative) { 1868 A64CC::CondCodes CondCode = A64CC::Invalid; 1869 Alternative = A64CC::Invalid; 1870 1871 switch (CC) { 1872 default: llvm_unreachable("Unknown FP condition!"); 1873 case ISD::SETEQ: 1874 case ISD::SETOEQ: CondCode = A64CC::EQ; break; 1875 case ISD::SETGT: 1876 case ISD::SETOGT: CondCode = A64CC::GT; break; 1877 case ISD::SETGE: 1878 case ISD::SETOGE: CondCode = A64CC::GE; break; 1879 case ISD::SETOLT: CondCode = A64CC::MI; break; 1880 case ISD::SETOLE: CondCode = A64CC::LS; break; 1881 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break; 1882 case ISD::SETO: CondCode = A64CC::VC; break; 1883 case ISD::SETUO: CondCode = A64CC::VS; break; 1884 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break; 1885 case ISD::SETUGT: CondCode = A64CC::HI; break; 1886 case ISD::SETUGE: CondCode = A64CC::PL; break; 1887 case ISD::SETLT: 1888 case ISD::SETULT: CondCode = A64CC::LT; break; 1889 case ISD::SETLE: 1890 case ISD::SETULE: CondCode = A64CC::LE; break; 1891 case ISD::SETNE: 1892 case ISD::SETUNE: CondCode = A64CC::NE; break; 1893 } 1894 return CondCode; 1895} 1896 1897SDValue 1898AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 1899 SDLoc DL(Op); 1900 EVT PtrVT = getPointerTy(); 1901 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1902 1903 switch(getTargetMachine().getCodeModel()) { 1904 case CodeModel::Small: 1905 // The most efficient code is PC-relative anyway for the small memory model, 1906 // so we don't need to worry about relocation model. 1907 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 1908 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1909 AArch64II::MO_NO_FLAG), 1910 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1911 AArch64II::MO_LO12), 1912 DAG.getConstant(/*Alignment=*/ 4, MVT::i32)); 1913 case CodeModel::Large: 1914 return DAG.getNode( 1915 AArch64ISD::WrapperLarge, DL, PtrVT, 1916 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3), 1917 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 1918 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 1919 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 1920 default: 1921 llvm_unreachable("Only small and large code models supported now"); 1922 } 1923} 1924 1925 1926// (BRCOND chain, val, dest) 1927SDValue 1928AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 1929 SDLoc dl(Op); 1930 SDValue Chain = Op.getOperand(0); 1931 SDValue TheBit = Op.getOperand(1); 1932 SDValue DestBB = Op.getOperand(2); 1933 1934 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 1935 // that as the consumer we are responsible for ignoring rubbish in higher 1936 // bits. 1937 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 1938 DAG.getConstant(1, MVT::i32)); 1939 1940 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 1941 DAG.getConstant(0, TheBit.getValueType()), 1942 DAG.getCondCode(ISD::SETNE)); 1943 1944 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain, 1945 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32), 1946 DestBB); 1947} 1948 1949// (BR_CC chain, condcode, lhs, rhs, dest) 1950SDValue 1951AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1952 SDLoc dl(Op); 1953 SDValue Chain = Op.getOperand(0); 1954 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1955 SDValue LHS = Op.getOperand(2); 1956 SDValue RHS = Op.getOperand(3); 1957 SDValue DestBB = Op.getOperand(4); 1958 1959 if (LHS.getValueType() == MVT::f128) { 1960 // f128 comparisons are lowered to runtime calls by a routine which sets 1961 // LHS, RHS and CC appropriately for the rest of this function to continue. 1962 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 1963 1964 // If softenSetCCOperands returned a scalar, we need to compare the result 1965 // against zero to select between true and false values. 1966 if (RHS.getNode() == 0) { 1967 RHS = DAG.getConstant(0, LHS.getValueType()); 1968 CC = ISD::SETNE; 1969 } 1970 } 1971 1972 if (LHS.getValueType().isInteger()) { 1973 SDValue A64cc; 1974 1975 // Integers are handled in a separate function because the combinations of 1976 // immediates and tests can get hairy and we may want to fiddle things. 1977 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 1978 1979 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1980 Chain, CmpOp, A64cc, DestBB); 1981 } 1982 1983 // Note that some LLVM floating-point CondCodes can't be lowered to a single 1984 // conditional branch, hence FPCCToA64CC can set a second test, where either 1985 // passing is sufficient. 1986 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 1987 CondCode = FPCCToA64CC(CC, Alternative); 1988 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 1989 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1990 DAG.getCondCode(CC)); 1991 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1992 Chain, SetCC, A64cc, DestBB); 1993 1994 if (Alternative != A64CC::Invalid) { 1995 A64cc = DAG.getConstant(Alternative, MVT::i32); 1996 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1997 A64BR_CC, SetCC, A64cc, DestBB); 1998 1999 } 2000 2001 return A64BR_CC; 2002} 2003 2004SDValue 2005AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG, 2006 RTLIB::Libcall Call) const { 2007 ArgListTy Args; 2008 ArgListEntry Entry; 2009 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { 2010 EVT ArgVT = Op.getOperand(i).getValueType(); 2011 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2012 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy; 2013 Entry.isSExt = false; 2014 Entry.isZExt = false; 2015 Args.push_back(Entry); 2016 } 2017 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy()); 2018 2019 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 2020 2021 // By default, the input chain to this libcall is the entry node of the 2022 // function. If the libcall is going to be emitted as a tail call then 2023 // isUsedByReturnOnly will change it to the right chain if the return 2024 // node which is being folded has a non-entry input chain. 2025 SDValue InChain = DAG.getEntryNode(); 2026 2027 // isTailCall may be true since the callee does not reference caller stack 2028 // frame. Check if it's in the right position. 2029 SDValue TCChain = InChain; 2030 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain); 2031 if (isTailCall) 2032 InChain = TCChain; 2033 2034 TargetLowering:: 2035 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false, 2036 0, getLibcallCallingConv(Call), isTailCall, 2037 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2038 Callee, Args, DAG, SDLoc(Op)); 2039 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2040 2041 if (!CallInfo.second.getNode()) 2042 // It's a tailcall, return the chain (which is the DAG root). 2043 return DAG.getRoot(); 2044 2045 return CallInfo.first; 2046} 2047 2048SDValue 2049AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 2050 if (Op.getOperand(0).getValueType() != MVT::f128) { 2051 // It's legal except when f128 is involved 2052 return Op; 2053 } 2054 2055 RTLIB::Libcall LC; 2056 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 2057 2058 SDValue SrcVal = Op.getOperand(0); 2059 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1, 2060 /*isSigned*/ false, SDLoc(Op)).first; 2061} 2062 2063SDValue 2064AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 2065 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); 2066 2067 RTLIB::Libcall LC; 2068 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 2069 2070 return LowerF128ToCall(Op, DAG, LC); 2071} 2072 2073SDValue 2074AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 2075 bool IsSigned) const { 2076 if (Op.getOperand(0).getValueType() != MVT::f128) { 2077 // It's legal except when f128 is involved 2078 return Op; 2079 } 2080 2081 RTLIB::Libcall LC; 2082 if (IsSigned) 2083 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2084 else 2085 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2086 2087 return LowerF128ToCall(Op, DAG, LC); 2088} 2089 2090SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2091 MachineFunction &MF = DAG.getMachineFunction(); 2092 MachineFrameInfo *MFI = MF.getFrameInfo(); 2093 MFI->setReturnAddressIsTaken(true); 2094 2095 EVT VT = Op.getValueType(); 2096 SDLoc dl(Op); 2097 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2098 if (Depth) { 2099 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2100 SDValue Offset = DAG.getConstant(8, MVT::i64); 2101 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2102 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2103 MachinePointerInfo(), false, false, false, 0); 2104 } 2105 2106 // Return X30, which contains the return address. Mark it an implicit live-in. 2107 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64)); 2108 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64); 2109} 2110 2111 2112SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) 2113 const { 2114 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2115 MFI->setFrameAddressIsTaken(true); 2116 2117 EVT VT = Op.getValueType(); 2118 SDLoc dl(Op); 2119 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2120 unsigned FrameReg = AArch64::X29; 2121 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2122 while (Depth--) 2123 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2124 MachinePointerInfo(), 2125 false, false, false, 0); 2126 return FrameAddr; 2127} 2128 2129SDValue 2130AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op, 2131 SelectionDAG &DAG) const { 2132 assert(getTargetMachine().getCodeModel() == CodeModel::Large); 2133 assert(getTargetMachine().getRelocationModel() == Reloc::Static); 2134 2135 EVT PtrVT = getPointerTy(); 2136 SDLoc dl(Op); 2137 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2138 const GlobalValue *GV = GN->getGlobal(); 2139 2140 SDValue GlobalAddr = DAG.getNode( 2141 AArch64ISD::WrapperLarge, dl, PtrVT, 2142 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3), 2143 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 2144 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 2145 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 2146 2147 if (GN->getOffset() != 0) 2148 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2149 DAG.getConstant(GN->getOffset(), PtrVT)); 2150 2151 return GlobalAddr; 2152} 2153 2154SDValue 2155AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op, 2156 SelectionDAG &DAG) const { 2157 assert(getTargetMachine().getCodeModel() == CodeModel::Small); 2158 2159 EVT PtrVT = getPointerTy(); 2160 SDLoc dl(Op); 2161 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2162 const GlobalValue *GV = GN->getGlobal(); 2163 unsigned Alignment = GV->getAlignment(); 2164 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2165 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) { 2166 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate 2167 // to zero when they remain undefined. In PIC mode the GOT can take care of 2168 // this, but in absolute mode we use a constant pool load. 2169 SDValue PoolAddr; 2170 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2171 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2172 AArch64II::MO_NO_FLAG), 2173 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2174 AArch64II::MO_LO12), 2175 DAG.getConstant(8, MVT::i32)); 2176 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr, 2177 MachinePointerInfo::getConstantPool(), 2178 /*isVolatile=*/ false, 2179 /*isNonTemporal=*/ true, 2180 /*isInvariant=*/ true, 8); 2181 if (GN->getOffset() != 0) 2182 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2183 DAG.getConstant(GN->getOffset(), PtrVT)); 2184 2185 return GlobalAddr; 2186 } 2187 2188 if (Alignment == 0) { 2189 const PointerType *GVPtrTy = cast<PointerType>(GV->getType()); 2190 if (GVPtrTy->getElementType()->isSized()) { 2191 Alignment 2192 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType()); 2193 } else { 2194 // Be conservative if we can't guess, not that it really matters: 2195 // functions and labels aren't valid for loads, and the methods used to 2196 // actually calculate an address work with any alignment. 2197 Alignment = 1; 2198 } 2199 } 2200 2201 unsigned char HiFixup, LoFixup; 2202 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM); 2203 2204 if (UseGOT) { 2205 HiFixup = AArch64II::MO_GOT; 2206 LoFixup = AArch64II::MO_GOT_LO12; 2207 Alignment = 8; 2208 } else { 2209 HiFixup = AArch64II::MO_NO_FLAG; 2210 LoFixup = AArch64II::MO_LO12; 2211 } 2212 2213 // AArch64's small model demands the following sequence: 2214 // ADRP x0, somewhere 2215 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly). 2216 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2217 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2218 HiFixup), 2219 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2220 LoFixup), 2221 DAG.getConstant(Alignment, MVT::i32)); 2222 2223 if (UseGOT) { 2224 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(), 2225 GlobalRef); 2226 } 2227 2228 if (GN->getOffset() != 0) 2229 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef, 2230 DAG.getConstant(GN->getOffset(), PtrVT)); 2231 2232 return GlobalRef; 2233} 2234 2235SDValue 2236AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op, 2237 SelectionDAG &DAG) const { 2238 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so 2239 // we make those distinctions here. 2240 2241 switch (getTargetMachine().getCodeModel()) { 2242 case CodeModel::Small: 2243 return LowerGlobalAddressELFSmall(Op, DAG); 2244 case CodeModel::Large: 2245 return LowerGlobalAddressELFLarge(Op, DAG); 2246 default: 2247 llvm_unreachable("Only small and large code models supported now"); 2248 } 2249} 2250 2251SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr, 2252 SDValue DescAddr, 2253 SDLoc DL, 2254 SelectionDAG &DAG) const { 2255 EVT PtrVT = getPointerTy(); 2256 2257 // The function we need to call is simply the first entry in the GOT for this 2258 // descriptor, load it in preparation. 2259 SDValue Func, Chain; 2260 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2261 DescAddr); 2262 2263 // The function takes only one argument: the address of the descriptor itself 2264 // in X0. 2265 SDValue Glue; 2266 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue); 2267 Glue = Chain.getValue(1); 2268 2269 // Finally, there's a special calling-convention which means that the lookup 2270 // must preserve all registers (except X0, obviously). 2271 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2272 const AArch64RegisterInfo *A64RI 2273 = static_cast<const AArch64RegisterInfo *>(TRI); 2274 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask(); 2275 2276 // We're now ready to populate the argument list, as with a normal call: 2277 std::vector<SDValue> Ops; 2278 Ops.push_back(Chain); 2279 Ops.push_back(Func); 2280 Ops.push_back(SymAddr); 2281 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT)); 2282 Ops.push_back(DAG.getRegisterMask(Mask)); 2283 Ops.push_back(Glue); 2284 2285 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2286 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0], 2287 Ops.size()); 2288 Glue = Chain.getValue(1); 2289 2290 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it 2291 // back to the generic handling code. 2292 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); 2293} 2294 2295SDValue 2296AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, 2297 SelectionDAG &DAG) const { 2298 assert(getSubtarget()->isTargetELF() && 2299 "TLS not implemented for non-ELF targets"); 2300 assert(getTargetMachine().getCodeModel() == CodeModel::Small 2301 && "TLS only supported in small memory model"); 2302 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2303 2304 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); 2305 2306 SDValue TPOff; 2307 EVT PtrVT = getPointerTy(); 2308 SDLoc DL(Op); 2309 const GlobalValue *GV = GA->getGlobal(); 2310 2311 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); 2312 2313 if (Model == TLSModel::InitialExec) { 2314 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2315 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2316 AArch64II::MO_GOTTPREL), 2317 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2318 AArch64II::MO_GOTTPREL_LO12), 2319 DAG.getConstant(8, MVT::i32)); 2320 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2321 TPOff); 2322 } else if (Model == TLSModel::LocalExec) { 2323 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2324 AArch64II::MO_TPREL_G1); 2325 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2326 AArch64II::MO_TPREL_G0_NC); 2327 2328 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2329 DAG.getTargetConstant(1, MVT::i32)), 0); 2330 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2331 TPOff, LoVar, 2332 DAG.getTargetConstant(0, MVT::i32)), 0); 2333 } else if (Model == TLSModel::GeneralDynamic) { 2334 // Accesses used in this sequence go via the TLS descriptor which lives in 2335 // the GOT. Prepare an address we can use to handle this. 2336 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2337 AArch64II::MO_TLSDESC); 2338 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2339 AArch64II::MO_TLSDESC_LO12); 2340 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2341 HiDesc, LoDesc, 2342 DAG.getConstant(8, MVT::i32)); 2343 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0); 2344 2345 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2346 } else if (Model == TLSModel::LocalDynamic) { 2347 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS 2348 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate 2349 // the beginning of the module's TLS region, followed by a DTPREL offset 2350 // calculation. 2351 2352 // These accesses will need deduplicating if there's more than one. 2353 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction() 2354 .getInfo<AArch64MachineFunctionInfo>(); 2355 MFI->incNumLocalDynamicTLSAccesses(); 2356 2357 2358 // Get the location of _TLS_MODULE_BASE_: 2359 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2360 AArch64II::MO_TLSDESC); 2361 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2362 AArch64II::MO_TLSDESC_LO12); 2363 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2364 HiDesc, LoDesc, 2365 DAG.getConstant(8, MVT::i32)); 2366 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT); 2367 2368 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2369 2370 // Get the variable's offset from _TLS_MODULE_BASE_ 2371 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2372 AArch64II::MO_DTPREL_G1); 2373 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2374 AArch64II::MO_DTPREL_G0_NC); 2375 2376 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2377 DAG.getTargetConstant(0, MVT::i32)), 0); 2378 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2379 TPOff, LoVar, 2380 DAG.getTargetConstant(0, MVT::i32)), 0); 2381 } else 2382 llvm_unreachable("Unsupported TLS access model"); 2383 2384 2385 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); 2386} 2387 2388SDValue 2389AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2390 bool IsSigned) const { 2391 if (Op.getValueType() != MVT::f128) { 2392 // Legal for everything except f128. 2393 return Op; 2394 } 2395 2396 RTLIB::Libcall LC; 2397 if (IsSigned) 2398 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2399 else 2400 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2401 2402 return LowerF128ToCall(Op, DAG, LC); 2403} 2404 2405 2406SDValue 2407AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2408 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2409 SDLoc dl(JT); 2410 EVT PtrVT = getPointerTy(); 2411 2412 // When compiling PIC, jump tables get put in the code section so a static 2413 // relocation-style is acceptable for both cases. 2414 switch (getTargetMachine().getCodeModel()) { 2415 case CodeModel::Small: 2416 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2417 DAG.getTargetJumpTable(JT->getIndex(), PtrVT), 2418 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2419 AArch64II::MO_LO12), 2420 DAG.getConstant(1, MVT::i32)); 2421 case CodeModel::Large: 2422 return DAG.getNode( 2423 AArch64ISD::WrapperLarge, dl, PtrVT, 2424 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3), 2425 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC), 2426 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC), 2427 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC)); 2428 default: 2429 llvm_unreachable("Only small and large code models supported now"); 2430 } 2431} 2432 2433// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode) 2434SDValue 2435AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2436 SDLoc dl(Op); 2437 SDValue LHS = Op.getOperand(0); 2438 SDValue RHS = Op.getOperand(1); 2439 SDValue IfTrue = Op.getOperand(2); 2440 SDValue IfFalse = Op.getOperand(3); 2441 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2442 2443 if (LHS.getValueType() == MVT::f128) { 2444 // f128 comparisons are lowered to libcalls, but slot in nicely here 2445 // afterwards. 2446 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2447 2448 // If softenSetCCOperands returned a scalar, we need to compare the result 2449 // against zero to select between true and false values. 2450 if (RHS.getNode() == 0) { 2451 RHS = DAG.getConstant(0, LHS.getValueType()); 2452 CC = ISD::SETNE; 2453 } 2454 } 2455 2456 if (LHS.getValueType().isInteger()) { 2457 SDValue A64cc; 2458 2459 // Integers are handled in a separate function because the combinations of 2460 // immediates and tests can get hairy and we may want to fiddle things. 2461 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2462 2463 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2464 CmpOp, IfTrue, IfFalse, A64cc); 2465 } 2466 2467 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2468 // conditional branch, hence FPCCToA64CC can set a second test, where either 2469 // passing is sufficient. 2470 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2471 CondCode = FPCCToA64CC(CC, Alternative); 2472 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2473 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2474 DAG.getCondCode(CC)); 2475 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, 2476 Op.getValueType(), 2477 SetCC, IfTrue, IfFalse, A64cc); 2478 2479 if (Alternative != A64CC::Invalid) { 2480 A64cc = DAG.getConstant(Alternative, MVT::i32); 2481 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2482 SetCC, IfTrue, A64SELECT_CC, A64cc); 2483 2484 } 2485 2486 return A64SELECT_CC; 2487} 2488 2489// (SELECT testbit, iftrue, iffalse) 2490SDValue 2491AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2492 SDLoc dl(Op); 2493 SDValue TheBit = Op.getOperand(0); 2494 SDValue IfTrue = Op.getOperand(1); 2495 SDValue IfFalse = Op.getOperand(2); 2496 2497 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 2498 // that as the consumer we are responsible for ignoring rubbish in higher 2499 // bits. 2500 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 2501 DAG.getConstant(1, MVT::i32)); 2502 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 2503 DAG.getConstant(0, TheBit.getValueType()), 2504 DAG.getCondCode(ISD::SETNE)); 2505 2506 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2507 A64CMP, IfTrue, IfFalse, 2508 DAG.getConstant(A64CC::NE, MVT::i32)); 2509} 2510 2511static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) { 2512 SDLoc DL(Op); 2513 SDValue LHS = Op.getOperand(0); 2514 SDValue RHS = Op.getOperand(1); 2515 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2516 EVT VT = Op.getValueType(); 2517 bool Invert = false; 2518 SDValue Op0, Op1; 2519 unsigned Opcode; 2520 2521 if (LHS.getValueType().isInteger()) { 2522 2523 // Attempt to use Vector Integer Compare Mask Test instruction. 2524 // TST = icmp ne (and (op0, op1), zero). 2525 if (CC == ISD::SETNE) { 2526 if (((LHS.getOpcode() == ISD::AND) && 2527 ISD::isBuildVectorAllZeros(RHS.getNode())) || 2528 ((RHS.getOpcode() == ISD::AND) && 2529 ISD::isBuildVectorAllZeros(LHS.getNode()))) { 2530 2531 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS; 2532 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0)); 2533 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1)); 2534 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS); 2535 } 2536 } 2537 2538 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed). 2539 // Note: Compare against Zero does not support unsigned predicates. 2540 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) || 2541 ISD::isBuildVectorAllZeros(LHS.getNode())) && 2542 !isUnsignedIntSetCC(CC)) { 2543 2544 // If LHS is the zero value, swap operands and CondCode. 2545 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2546 CC = getSetCCSwappedOperands(CC); 2547 Op0 = RHS; 2548 } else 2549 Op0 = LHS; 2550 2551 // Ensure valid CondCode for Compare Mask against Zero instruction: 2552 // EQ, GE, GT, LE, LT. 2553 if (ISD::SETNE == CC) { 2554 Invert = true; 2555 CC = ISD::SETEQ; 2556 } 2557 2558 // Using constant type to differentiate integer and FP compares with zero. 2559 Op1 = DAG.getConstant(0, MVT::i32); 2560 Opcode = AArch64ISD::NEON_CMPZ; 2561 2562 } else { 2563 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned). 2564 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT. 2565 bool Swap = false; 2566 switch (CC) { 2567 default: 2568 llvm_unreachable("Illegal integer comparison."); 2569 case ISD::SETEQ: 2570 case ISD::SETGT: 2571 case ISD::SETGE: 2572 case ISD::SETUGT: 2573 case ISD::SETUGE: 2574 break; 2575 case ISD::SETNE: 2576 Invert = true; 2577 CC = ISD::SETEQ; 2578 break; 2579 case ISD::SETULT: 2580 case ISD::SETULE: 2581 case ISD::SETLT: 2582 case ISD::SETLE: 2583 Swap = true; 2584 CC = getSetCCSwappedOperands(CC); 2585 } 2586 2587 if (Swap) 2588 std::swap(LHS, RHS); 2589 2590 Opcode = AArch64ISD::NEON_CMP; 2591 Op0 = LHS; 2592 Op1 = RHS; 2593 } 2594 2595 // Generate Compare Mask instr or Compare Mask against Zero instr. 2596 SDValue NeonCmp = 2597 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2598 2599 if (Invert) 2600 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2601 2602 return NeonCmp; 2603 } 2604 2605 // Now handle Floating Point cases. 2606 // Attempt to use Vector Floating Point Compare Mask against Zero instruction. 2607 if (ISD::isBuildVectorAllZeros(RHS.getNode()) || 2608 ISD::isBuildVectorAllZeros(LHS.getNode())) { 2609 2610 // If LHS is the zero value, swap operands and CondCode. 2611 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2612 CC = getSetCCSwappedOperands(CC); 2613 Op0 = RHS; 2614 } else 2615 Op0 = LHS; 2616 2617 // Using constant type to differentiate integer and FP compares with zero. 2618 Op1 = DAG.getConstantFP(0, MVT::f32); 2619 Opcode = AArch64ISD::NEON_CMPZ; 2620 } else { 2621 // Attempt to use Vector Floating Point Compare Mask instruction. 2622 Op0 = LHS; 2623 Op1 = RHS; 2624 Opcode = AArch64ISD::NEON_CMP; 2625 } 2626 2627 SDValue NeonCmpAlt; 2628 // Some register compares have to be implemented with swapped CC and operands, 2629 // e.g.: OLT implemented as OGT with swapped operands. 2630 bool SwapIfRegArgs = false; 2631 2632 // Ensure valid CondCode for FP Compare Mask against Zero instruction: 2633 // EQ, GE, GT, LE, LT. 2634 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT. 2635 switch (CC) { 2636 default: 2637 llvm_unreachable("Illegal FP comparison"); 2638 case ISD::SETUNE: 2639 case ISD::SETNE: 2640 Invert = true; // Fallthrough 2641 case ISD::SETOEQ: 2642 case ISD::SETEQ: 2643 CC = ISD::SETEQ; 2644 break; 2645 case ISD::SETOLT: 2646 case ISD::SETLT: 2647 CC = ISD::SETLT; 2648 SwapIfRegArgs = true; 2649 break; 2650 case ISD::SETOGT: 2651 case ISD::SETGT: 2652 CC = ISD::SETGT; 2653 break; 2654 case ISD::SETOLE: 2655 case ISD::SETLE: 2656 CC = ISD::SETLE; 2657 SwapIfRegArgs = true; 2658 break; 2659 case ISD::SETOGE: 2660 case ISD::SETGE: 2661 CC = ISD::SETGE; 2662 break; 2663 case ISD::SETUGE: 2664 Invert = true; 2665 CC = ISD::SETLT; 2666 SwapIfRegArgs = true; 2667 break; 2668 case ISD::SETULE: 2669 Invert = true; 2670 CC = ISD::SETGT; 2671 break; 2672 case ISD::SETUGT: 2673 Invert = true; 2674 CC = ISD::SETLE; 2675 SwapIfRegArgs = true; 2676 break; 2677 case ISD::SETULT: 2678 Invert = true; 2679 CC = ISD::SETGE; 2680 break; 2681 case ISD::SETUEQ: 2682 Invert = true; // Fallthrough 2683 case ISD::SETONE: 2684 // Expand this to (OGT |OLT). 2685 NeonCmpAlt = 2686 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT)); 2687 CC = ISD::SETLT; 2688 SwapIfRegArgs = true; 2689 break; 2690 case ISD::SETUO: 2691 Invert = true; // Fallthrough 2692 case ISD::SETO: 2693 // Expand this to (OGE | OLT). 2694 NeonCmpAlt = 2695 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE)); 2696 CC = ISD::SETLT; 2697 SwapIfRegArgs = true; 2698 break; 2699 } 2700 2701 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) { 2702 CC = getSetCCSwappedOperands(CC); 2703 std::swap(Op0, Op1); 2704 } 2705 2706 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr 2707 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2708 2709 if (NeonCmpAlt.getNode()) 2710 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt); 2711 2712 if (Invert) 2713 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2714 2715 return NeonCmp; 2716} 2717 2718// (SETCC lhs, rhs, condcode) 2719SDValue 2720AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2721 SDLoc dl(Op); 2722 SDValue LHS = Op.getOperand(0); 2723 SDValue RHS = Op.getOperand(1); 2724 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2725 EVT VT = Op.getValueType(); 2726 2727 if (VT.isVector()) 2728 return LowerVectorSETCC(Op, DAG); 2729 2730 if (LHS.getValueType() == MVT::f128) { 2731 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS 2732 // for the rest of the function (some i32 or i64 values). 2733 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2734 2735 // If softenSetCCOperands returned a scalar, use it. 2736 if (RHS.getNode() == 0) { 2737 assert(LHS.getValueType() == Op.getValueType() && 2738 "Unexpected setcc expansion!"); 2739 return LHS; 2740 } 2741 } 2742 2743 if (LHS.getValueType().isInteger()) { 2744 SDValue A64cc; 2745 2746 // Integers are handled in a separate function because the combinations of 2747 // immediates and tests can get hairy and we may want to fiddle things. 2748 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2749 2750 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2751 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT), 2752 A64cc); 2753 } 2754 2755 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2756 // conditional branch, hence FPCCToA64CC can set a second test, where either 2757 // passing is sufficient. 2758 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2759 CondCode = FPCCToA64CC(CC, Alternative); 2760 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2761 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2762 DAG.getCondCode(CC)); 2763 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2764 CmpOp, DAG.getConstant(1, VT), 2765 DAG.getConstant(0, VT), A64cc); 2766 2767 if (Alternative != A64CC::Invalid) { 2768 A64cc = DAG.getConstant(Alternative, MVT::i32); 2769 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, 2770 DAG.getConstant(1, VT), A64SELECT_CC, A64cc); 2771 } 2772 2773 return A64SELECT_CC; 2774} 2775 2776SDValue 2777AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2778 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2779 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2780 2781 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes 2782 // rather than just 8. 2783 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), 2784 Op.getOperand(1), Op.getOperand(2), 2785 DAG.getConstant(32, MVT::i32), 8, false, false, 2786 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); 2787} 2788 2789SDValue 2790AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2791 // The layout of the va_list struct is specified in the AArch64 Procedure Call 2792 // Standard, section B.3. 2793 MachineFunction &MF = DAG.getMachineFunction(); 2794 AArch64MachineFunctionInfo *FuncInfo 2795 = MF.getInfo<AArch64MachineFunctionInfo>(); 2796 SDLoc DL(Op); 2797 2798 SDValue Chain = Op.getOperand(0); 2799 SDValue VAList = Op.getOperand(1); 2800 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2801 SmallVector<SDValue, 4> MemOps; 2802 2803 // void *__stack at offset 0 2804 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(), 2805 getPointerTy()); 2806 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, 2807 MachinePointerInfo(SV), false, false, 0)); 2808 2809 // void *__gr_top at offset 8 2810 int GPRSize = FuncInfo->getVariadicGPRSize(); 2811 if (GPRSize > 0) { 2812 SDValue GRTop, GRTopAddr; 2813 2814 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2815 DAG.getConstant(8, getPointerTy())); 2816 2817 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy()); 2818 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop, 2819 DAG.getConstant(GPRSize, getPointerTy())); 2820 2821 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, 2822 MachinePointerInfo(SV, 8), 2823 false, false, 0)); 2824 } 2825 2826 // void *__vr_top at offset 16 2827 int FPRSize = FuncInfo->getVariadicFPRSize(); 2828 if (FPRSize > 0) { 2829 SDValue VRTop, VRTopAddr; 2830 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2831 DAG.getConstant(16, getPointerTy())); 2832 2833 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy()); 2834 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop, 2835 DAG.getConstant(FPRSize, getPointerTy())); 2836 2837 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, 2838 MachinePointerInfo(SV, 16), 2839 false, false, 0)); 2840 } 2841 2842 // int __gr_offs at offset 24 2843 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2844 DAG.getConstant(24, getPointerTy())); 2845 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32), 2846 GROffsAddr, MachinePointerInfo(SV, 24), 2847 false, false, 0)); 2848 2849 // int __vr_offs at offset 28 2850 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2851 DAG.getConstant(28, getPointerTy())); 2852 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32), 2853 VROffsAddr, MachinePointerInfo(SV, 28), 2854 false, false, 0)); 2855 2856 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 2857 MemOps.size()); 2858} 2859 2860SDValue 2861AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2862 switch (Op.getOpcode()) { 2863 default: llvm_unreachable("Don't know how to custom lower this!"); 2864 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128); 2865 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128); 2866 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128); 2867 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128); 2868 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true); 2869 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false); 2870 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true); 2871 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false); 2872 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 2873 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 2874 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 2875 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 2876 2877 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2878 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 2879 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 2880 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG); 2881 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2882 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2883 case ISD::SELECT: return LowerSELECT(Op, DAG); 2884 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2885 case ISD::SETCC: return LowerSETCC(Op, DAG); 2886 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 2887 case ISD::VASTART: return LowerVASTART(Op, DAG); 2888 case ISD::BUILD_VECTOR: 2889 return LowerBUILD_VECTOR(Op, DAG, getSubtarget()); 2890 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2891 } 2892 2893 return SDValue(); 2894} 2895 2896/// Check if the specified splat value corresponds to a valid vector constant 2897/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If 2898/// so, return the encoded 8-bit immediate and the OpCmode instruction fields 2899/// values. 2900static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 2901 unsigned SplatBitSize, SelectionDAG &DAG, 2902 bool is128Bits, NeonModImmType type, EVT &VT, 2903 unsigned &Imm, unsigned &OpCmode) { 2904 switch (SplatBitSize) { 2905 default: 2906 llvm_unreachable("unexpected size for isNeonModifiedImm"); 2907 case 8: { 2908 if (type != Neon_Mov_Imm) 2909 return false; 2910 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 2911 // Neon movi per byte: Op=0, Cmode=1110. 2912 OpCmode = 0xe; 2913 Imm = SplatBits; 2914 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 2915 break; 2916 } 2917 case 16: { 2918 // Neon move inst per halfword 2919 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 2920 if ((SplatBits & ~0xff) == 0) { 2921 // Value = 0x00nn is 0x00nn LSL 0 2922 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000 2923 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001 2924 // Op=x, Cmode=100y 2925 Imm = SplatBits; 2926 OpCmode = 0x8; 2927 break; 2928 } 2929 if ((SplatBits & ~0xff00) == 0) { 2930 // Value = 0xnn00 is 0x00nn LSL 8 2931 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010 2932 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011 2933 // Op=x, Cmode=101x 2934 Imm = SplatBits >> 8; 2935 OpCmode = 0xa; 2936 break; 2937 } 2938 // can't handle any other 2939 return false; 2940 } 2941 2942 case 32: { 2943 // First the LSL variants (MSL is unusable by some interested instructions). 2944 2945 // Neon move instr per word, shift zeros 2946 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 2947 if ((SplatBits & ~0xff) == 0) { 2948 // Value = 0x000000nn is 0x000000nn LSL 0 2949 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000 2950 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001 2951 // Op=x, Cmode=000x 2952 Imm = SplatBits; 2953 OpCmode = 0; 2954 break; 2955 } 2956 if ((SplatBits & ~0xff00) == 0) { 2957 // Value = 0x0000nn00 is 0x000000nn LSL 8 2958 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010 2959 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011 2960 // Op=x, Cmode=001x 2961 Imm = SplatBits >> 8; 2962 OpCmode = 0x2; 2963 break; 2964 } 2965 if ((SplatBits & ~0xff0000) == 0) { 2966 // Value = 0x00nn0000 is 0x000000nn LSL 16 2967 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100 2968 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101 2969 // Op=x, Cmode=010x 2970 Imm = SplatBits >> 16; 2971 OpCmode = 0x4; 2972 break; 2973 } 2974 if ((SplatBits & ~0xff000000) == 0) { 2975 // Value = 0xnn000000 is 0x000000nn LSL 24 2976 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110 2977 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111 2978 // Op=x, Cmode=011x 2979 Imm = SplatBits >> 24; 2980 OpCmode = 0x6; 2981 break; 2982 } 2983 2984 // Now the MSL immediates. 2985 2986 // Neon move instr per word, shift ones 2987 if ((SplatBits & ~0xffff) == 0 && 2988 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 2989 // Value = 0x0000nnff is 0x000000nn MSL 8 2990 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100 2991 // Op=x, Cmode=1100 2992 Imm = SplatBits >> 8; 2993 OpCmode = 0xc; 2994 break; 2995 } 2996 if ((SplatBits & ~0xffffff) == 0 && 2997 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 2998 // Value = 0x00nnffff is 0x000000nn MSL 16 2999 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101 3000 // Op=x, Cmode=1101 3001 Imm = SplatBits >> 16; 3002 OpCmode = 0xd; 3003 break; 3004 } 3005 // can't handle any other 3006 return false; 3007 } 3008 3009 case 64: { 3010 if (type != Neon_Mov_Imm) 3011 return false; 3012 // Neon move instr bytemask, where each byte is either 0x00 or 0xff. 3013 // movi Op=1, Cmode=1110. 3014 OpCmode = 0x1e; 3015 uint64_t BitMask = 0xff; 3016 uint64_t Val = 0; 3017 unsigned ImmMask = 1; 3018 Imm = 0; 3019 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3020 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3021 Val |= BitMask; 3022 Imm |= ImmMask; 3023 } else if ((SplatBits & BitMask) != 0) { 3024 return false; 3025 } 3026 BitMask <<= 8; 3027 ImmMask <<= 1; 3028 } 3029 SplatBits = Val; 3030 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3031 break; 3032 } 3033 } 3034 3035 return true; 3036} 3037 3038static SDValue PerformANDCombine(SDNode *N, 3039 TargetLowering::DAGCombinerInfo &DCI) { 3040 3041 SelectionDAG &DAG = DCI.DAG; 3042 SDLoc DL(N); 3043 EVT VT = N->getValueType(0); 3044 3045 // We're looking for an SRA/SHL pair which form an SBFX. 3046 3047 if (VT != MVT::i32 && VT != MVT::i64) 3048 return SDValue(); 3049 3050 if (!isa<ConstantSDNode>(N->getOperand(1))) 3051 return SDValue(); 3052 3053 uint64_t TruncMask = N->getConstantOperandVal(1); 3054 if (!isMask_64(TruncMask)) 3055 return SDValue(); 3056 3057 uint64_t Width = CountPopulation_64(TruncMask); 3058 SDValue Shift = N->getOperand(0); 3059 3060 if (Shift.getOpcode() != ISD::SRL) 3061 return SDValue(); 3062 3063 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3064 return SDValue(); 3065 uint64_t LSB = Shift->getConstantOperandVal(1); 3066 3067 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3068 return SDValue(); 3069 3070 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0), 3071 DAG.getConstant(LSB, MVT::i64), 3072 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3073} 3074 3075/// For a true bitfield insert, the bits getting into that contiguous mask 3076/// should come from the low part of an existing value: they must be formed from 3077/// a compatible SHL operation (unless they're already low). This function 3078/// checks that condition and returns the least-significant bit that's 3079/// intended. If the operation not a field preparation, -1 is returned. 3080static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT, 3081 SDValue &MaskedVal, uint64_t Mask) { 3082 if (!isShiftedMask_64(Mask)) 3083 return -1; 3084 3085 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI 3086 // instruction. BFI will do a left-shift by LSB before applying the mask we've 3087 // spotted, so in general we should pre-emptively "undo" that by making sure 3088 // the incoming bits have had a right-shift applied to them. 3089 // 3090 // This right shift, however, will combine with existing left/right shifts. In 3091 // the simplest case of a completely straight bitfield operation, it will be 3092 // expected to completely cancel out with an existing SHL. More complicated 3093 // cases (e.g. bitfield to bitfield copy) may still need a real shift before 3094 // the BFI. 3095 3096 uint64_t LSB = countTrailingZeros(Mask); 3097 int64_t ShiftRightRequired = LSB; 3098 if (MaskedVal.getOpcode() == ISD::SHL && 3099 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3100 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1); 3101 MaskedVal = MaskedVal.getOperand(0); 3102 } else if (MaskedVal.getOpcode() == ISD::SRL && 3103 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3104 ShiftRightRequired += MaskedVal.getConstantOperandVal(1); 3105 MaskedVal = MaskedVal.getOperand(0); 3106 } 3107 3108 if (ShiftRightRequired > 0) 3109 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal, 3110 DAG.getConstant(ShiftRightRequired, MVT::i64)); 3111 else if (ShiftRightRequired < 0) { 3112 // We could actually end up with a residual left shift, for example with 3113 // "struc.bitfield = val << 1". 3114 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal, 3115 DAG.getConstant(-ShiftRightRequired, MVT::i64)); 3116 } 3117 3118 return LSB; 3119} 3120 3121/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by 3122/// a mask and an extension. Returns true if a BFI was found and provides 3123/// information on its surroundings. 3124static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask, 3125 bool &Extended) { 3126 Extended = false; 3127 if (N.getOpcode() == ISD::ZERO_EXTEND) { 3128 Extended = true; 3129 N = N.getOperand(0); 3130 } 3131 3132 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) { 3133 Mask = N->getConstantOperandVal(1); 3134 N = N.getOperand(0); 3135 } else { 3136 // Mask is the whole width. 3137 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits()); 3138 } 3139 3140 if (N.getOpcode() == AArch64ISD::BFI) { 3141 BFI = N; 3142 return true; 3143 } 3144 3145 return false; 3146} 3147 3148/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which 3149/// is roughly equivalent to (and (BFI ...), mask). This form is used because it 3150/// can often be further combined with a larger mask. Ultimately, we want mask 3151/// to be 2^32-1 or 2^64-1 so the AND can be skipped. 3152static SDValue tryCombineToBFI(SDNode *N, 3153 TargetLowering::DAGCombinerInfo &DCI, 3154 const AArch64Subtarget *Subtarget) { 3155 SelectionDAG &DAG = DCI.DAG; 3156 SDLoc DL(N); 3157 EVT VT = N->getValueType(0); 3158 3159 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3160 3161 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or 3162 // abandon the effort. 3163 SDValue LHS = N->getOperand(0); 3164 if (LHS.getOpcode() != ISD::AND) 3165 return SDValue(); 3166 3167 uint64_t LHSMask; 3168 if (isa<ConstantSDNode>(LHS.getOperand(1))) 3169 LHSMask = LHS->getConstantOperandVal(1); 3170 else 3171 return SDValue(); 3172 3173 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask 3174 // is or abandon the effort. 3175 SDValue RHS = N->getOperand(1); 3176 if (RHS.getOpcode() != ISD::AND) 3177 return SDValue(); 3178 3179 uint64_t RHSMask; 3180 if (isa<ConstantSDNode>(RHS.getOperand(1))) 3181 RHSMask = RHS->getConstantOperandVal(1); 3182 else 3183 return SDValue(); 3184 3185 // Can't do anything if the masks are incompatible. 3186 if (LHSMask & RHSMask) 3187 return SDValue(); 3188 3189 // Now we need one of the masks to be a contiguous field. Without loss of 3190 // generality that should be the RHS one. 3191 SDValue Bitfield = LHS.getOperand(0); 3192 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) { 3193 // We know that LHS is a candidate new value, and RHS isn't already a better 3194 // one. 3195 std::swap(LHS, RHS); 3196 std::swap(LHSMask, RHSMask); 3197 } 3198 3199 // We've done our best to put the right operands in the right places, all we 3200 // can do now is check whether a BFI exists. 3201 Bitfield = RHS.getOperand(0); 3202 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask); 3203 if (LSB == -1) 3204 return SDValue(); 3205 3206 uint32_t Width = CountPopulation_64(RHSMask); 3207 assert(Width && "Expected non-zero bitfield width"); 3208 3209 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3210 LHS.getOperand(0), Bitfield, 3211 DAG.getConstant(LSB, MVT::i64), 3212 DAG.getConstant(Width, MVT::i64)); 3213 3214 // Mask is trivial 3215 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3216 return BFI; 3217 3218 return DAG.getNode(ISD::AND, DL, VT, BFI, 3219 DAG.getConstant(LHSMask | RHSMask, VT)); 3220} 3221 3222/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its 3223/// original input. This is surprisingly common because SROA splits things up 3224/// into i8 chunks, so the originally detected MaskedBFI may actually only act 3225/// on the low (say) byte of a word. This is then orred into the rest of the 3226/// word afterwards. 3227/// 3228/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)). 3229/// 3230/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the 3231/// MaskedBFI. We can also deal with a certain amount of extend/truncate being 3232/// involved. 3233static SDValue tryCombineToLargerBFI(SDNode *N, 3234 TargetLowering::DAGCombinerInfo &DCI, 3235 const AArch64Subtarget *Subtarget) { 3236 SelectionDAG &DAG = DCI.DAG; 3237 SDLoc DL(N); 3238 EVT VT = N->getValueType(0); 3239 3240 // First job is to hunt for a MaskedBFI on either the left or right. Swap 3241 // operands if it's actually on the right. 3242 SDValue BFI; 3243 SDValue PossExtraMask; 3244 uint64_t ExistingMask = 0; 3245 bool Extended = false; 3246 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended)) 3247 PossExtraMask = N->getOperand(1); 3248 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended)) 3249 PossExtraMask = N->getOperand(0); 3250 else 3251 return SDValue(); 3252 3253 // We can only combine a BFI with another compatible mask. 3254 if (PossExtraMask.getOpcode() != ISD::AND || 3255 !isa<ConstantSDNode>(PossExtraMask.getOperand(1))) 3256 return SDValue(); 3257 3258 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1); 3259 3260 // Masks must be compatible. 3261 if (ExtraMask & ExistingMask) 3262 return SDValue(); 3263 3264 SDValue OldBFIVal = BFI.getOperand(0); 3265 SDValue NewBFIVal = BFI.getOperand(1); 3266 if (Extended) { 3267 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be 3268 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments 3269 // need to be made compatible. 3270 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32 3271 && "Invalid types for BFI"); 3272 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal); 3273 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal); 3274 } 3275 3276 // We need the MaskedBFI to be combined with a mask of the *same* value. 3277 if (PossExtraMask.getOperand(0) != OldBFIVal) 3278 return SDValue(); 3279 3280 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3281 OldBFIVal, NewBFIVal, 3282 BFI.getOperand(2), BFI.getOperand(3)); 3283 3284 // If the masking is trivial, we don't need to create it. 3285 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3286 return BFI; 3287 3288 return DAG.getNode(ISD::AND, DL, VT, BFI, 3289 DAG.getConstant(ExtraMask | ExistingMask, VT)); 3290} 3291 3292/// An EXTR instruction is made up of two shifts, ORed together. This helper 3293/// searches for and classifies those shifts. 3294static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, 3295 bool &FromHi) { 3296 if (N.getOpcode() == ISD::SHL) 3297 FromHi = false; 3298 else if (N.getOpcode() == ISD::SRL) 3299 FromHi = true; 3300 else 3301 return false; 3302 3303 if (!isa<ConstantSDNode>(N.getOperand(1))) 3304 return false; 3305 3306 ShiftAmount = N->getConstantOperandVal(1); 3307 Src = N->getOperand(0); 3308 return true; 3309} 3310 3311/// EXTR instruction extracts a contiguous chunk of bits from two existing 3312/// registers viewed as a high/low pair. This function looks for the pattern: 3313/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an 3314/// EXTR. Can't quite be done in TableGen because the two immediates aren't 3315/// independent. 3316static SDValue tryCombineToEXTR(SDNode *N, 3317 TargetLowering::DAGCombinerInfo &DCI) { 3318 SelectionDAG &DAG = DCI.DAG; 3319 SDLoc DL(N); 3320 EVT VT = N->getValueType(0); 3321 3322 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3323 3324 if (VT != MVT::i32 && VT != MVT::i64) 3325 return SDValue(); 3326 3327 SDValue LHS; 3328 uint32_t ShiftLHS = 0; 3329 bool LHSFromHi = 0; 3330 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) 3331 return SDValue(); 3332 3333 SDValue RHS; 3334 uint32_t ShiftRHS = 0; 3335 bool RHSFromHi = 0; 3336 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) 3337 return SDValue(); 3338 3339 // If they're both trying to come from the high part of the register, they're 3340 // not really an EXTR. 3341 if (LHSFromHi == RHSFromHi) 3342 return SDValue(); 3343 3344 if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) 3345 return SDValue(); 3346 3347 if (LHSFromHi) { 3348 std::swap(LHS, RHS); 3349 std::swap(ShiftLHS, ShiftRHS); 3350 } 3351 3352 return DAG.getNode(AArch64ISD::EXTR, DL, VT, 3353 LHS, RHS, 3354 DAG.getConstant(ShiftRHS, MVT::i64)); 3355} 3356 3357/// Target-specific dag combine xforms for ISD::OR 3358static SDValue PerformORCombine(SDNode *N, 3359 TargetLowering::DAGCombinerInfo &DCI, 3360 const AArch64Subtarget *Subtarget) { 3361 3362 SelectionDAG &DAG = DCI.DAG; 3363 SDLoc DL(N); 3364 EVT VT = N->getValueType(0); 3365 3366 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 3367 return SDValue(); 3368 3369 // Attempt to recognise bitfield-insert operations. 3370 SDValue Res = tryCombineToBFI(N, DCI, Subtarget); 3371 if (Res.getNode()) 3372 return Res; 3373 3374 // Attempt to combine an existing MaskedBFI operation into one with a larger 3375 // mask. 3376 Res = tryCombineToLargerBFI(N, DCI, Subtarget); 3377 if (Res.getNode()) 3378 return Res; 3379 3380 Res = tryCombineToEXTR(N, DCI); 3381 if (Res.getNode()) 3382 return Res; 3383 3384 if (!Subtarget->hasNEON()) 3385 return SDValue(); 3386 3387 // Attempt to use vector immediate-form BSL 3388 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 3389 3390 SDValue N0 = N->getOperand(0); 3391 if (N0.getOpcode() != ISD::AND) 3392 return SDValue(); 3393 3394 SDValue N1 = N->getOperand(1); 3395 if (N1.getOpcode() != ISD::AND) 3396 return SDValue(); 3397 3398 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 3399 APInt SplatUndef; 3400 unsigned SplatBitSize; 3401 bool HasAnyUndefs; 3402 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 3403 APInt SplatBits0; 3404 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 3405 HasAnyUndefs) && 3406 !HasAnyUndefs) { 3407 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 3408 APInt SplatBits1; 3409 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 3410 HasAnyUndefs) && 3411 !HasAnyUndefs && SplatBits0 == ~SplatBits1) { 3412 // Canonicalize the vector type to make instruction selection simpler. 3413 EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8; 3414 SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT, 3415 N0->getOperand(1), N0->getOperand(0), 3416 N1->getOperand(0)); 3417 return DAG.getNode(ISD::BITCAST, DL, VT, Result); 3418 } 3419 } 3420 } 3421 3422 return SDValue(); 3423} 3424 3425/// Target-specific dag combine xforms for ISD::SRA 3426static SDValue PerformSRACombine(SDNode *N, 3427 TargetLowering::DAGCombinerInfo &DCI) { 3428 3429 SelectionDAG &DAG = DCI.DAG; 3430 SDLoc DL(N); 3431 EVT VT = N->getValueType(0); 3432 3433 // We're looking for an SRA/SHL pair which form an SBFX. 3434 3435 if (VT != MVT::i32 && VT != MVT::i64) 3436 return SDValue(); 3437 3438 if (!isa<ConstantSDNode>(N->getOperand(1))) 3439 return SDValue(); 3440 3441 uint64_t ExtraSignBits = N->getConstantOperandVal(1); 3442 SDValue Shift = N->getOperand(0); 3443 3444 if (Shift.getOpcode() != ISD::SHL) 3445 return SDValue(); 3446 3447 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3448 return SDValue(); 3449 3450 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1); 3451 uint64_t Width = VT.getSizeInBits() - ExtraSignBits; 3452 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft; 3453 3454 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3455 return SDValue(); 3456 3457 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0), 3458 DAG.getConstant(LSB, MVT::i64), 3459 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3460} 3461 3462/// Check if this is a valid build_vector for the immediate operand of 3463/// a vector shift operation, where all the elements of the build_vector 3464/// must have the same constant integer value. 3465static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 3466 // Ignore bit_converts. 3467 while (Op.getOpcode() == ISD::BITCAST) 3468 Op = Op.getOperand(0); 3469 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3470 APInt SplatBits, SplatUndef; 3471 unsigned SplatBitSize; 3472 bool HasAnyUndefs; 3473 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 3474 HasAnyUndefs, ElementBits) || 3475 SplatBitSize > ElementBits) 3476 return false; 3477 Cnt = SplatBits.getSExtValue(); 3478 return true; 3479} 3480 3481/// Check if this is a valid build_vector for the immediate operand of 3482/// a vector shift left operation. That value must be in the range: 3483/// 0 <= Value < ElementBits 3484static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) { 3485 assert(VT.isVector() && "vector shift count is not a vector type"); 3486 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3487 if (!getVShiftImm(Op, ElementBits, Cnt)) 3488 return false; 3489 return (Cnt >= 0 && Cnt < ElementBits); 3490} 3491 3492/// Check if this is a valid build_vector for the immediate operand of a 3493/// vector shift right operation. The value must be in the range: 3494/// 1 <= Value <= ElementBits 3495static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) { 3496 assert(VT.isVector() && "vector shift count is not a vector type"); 3497 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3498 if (!getVShiftImm(Op, ElementBits, Cnt)) 3499 return false; 3500 return (Cnt >= 1 && Cnt <= ElementBits); 3501} 3502 3503/// Checks for immediate versions of vector shifts and lowers them. 3504static SDValue PerformShiftCombine(SDNode *N, 3505 TargetLowering::DAGCombinerInfo &DCI, 3506 const AArch64Subtarget *ST) { 3507 SelectionDAG &DAG = DCI.DAG; 3508 EVT VT = N->getValueType(0); 3509 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64)) 3510 return PerformSRACombine(N, DCI); 3511 3512 // Nothing to be done for scalar shifts. 3513 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3514 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 3515 return SDValue(); 3516 3517 assert(ST->hasNEON() && "unexpected vector shift"); 3518 int64_t Cnt; 3519 3520 switch (N->getOpcode()) { 3521 default: 3522 llvm_unreachable("unexpected shift opcode"); 3523 3524 case ISD::SHL: 3525 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) { 3526 SDValue RHS = 3527 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3528 DAG.getConstant(Cnt, MVT::i32)); 3529 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS); 3530 } 3531 break; 3532 3533 case ISD::SRA: 3534 case ISD::SRL: 3535 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) { 3536 SDValue RHS = 3537 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3538 DAG.getConstant(Cnt, MVT::i32)); 3539 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS); 3540 } 3541 break; 3542 } 3543 3544 return SDValue(); 3545} 3546 3547/// ARM-specific DAG combining for intrinsics. 3548static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 3549 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3550 3551 switch (IntNo) { 3552 default: 3553 // Don't do anything for most intrinsics. 3554 break; 3555 3556 case Intrinsic::arm_neon_vqshifts: 3557 case Intrinsic::arm_neon_vqshiftu: 3558 EVT VT = N->getOperand(1).getValueType(); 3559 int64_t Cnt; 3560 if (!isVShiftLImm(N->getOperand(2), VT, Cnt)) 3561 break; 3562 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts) 3563 ? AArch64ISD::NEON_QSHLs 3564 : AArch64ISD::NEON_QSHLu; 3565 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), 3566 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 3567 } 3568 3569 return SDValue(); 3570} 3571 3572/// Target-specific DAG combine function for NEON load/store intrinsics 3573/// to merge base address updates. 3574static SDValue CombineBaseUpdate(SDNode *N, 3575 TargetLowering::DAGCombinerInfo &DCI) { 3576 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 3577 return SDValue(); 3578 3579 SelectionDAG &DAG = DCI.DAG; 3580 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 3581 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 3582 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 3583 SDValue Addr = N->getOperand(AddrOpIdx); 3584 3585 // Search for a use of the address operand that is an increment. 3586 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 3587 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 3588 SDNode *User = *UI; 3589 if (User->getOpcode() != ISD::ADD || 3590 UI.getUse().getResNo() != Addr.getResNo()) 3591 continue; 3592 3593 // Check that the add is independent of the load/store. Otherwise, folding 3594 // it would create a cycle. 3595 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 3596 continue; 3597 3598 // Find the new opcode for the updating load/store. 3599 bool isLoad = true; 3600 bool isLaneOp = false; 3601 unsigned NewOpc = 0; 3602 unsigned NumVecs = 0; 3603 if (isIntrinsic) { 3604 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 3605 switch (IntNo) { 3606 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 3607 case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD; 3608 NumVecs = 1; break; 3609 case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD; 3610 NumVecs = 2; break; 3611 case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD; 3612 NumVecs = 3; break; 3613 case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD; 3614 NumVecs = 4; break; 3615 case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD; 3616 NumVecs = 1; isLoad = false; break; 3617 case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD; 3618 NumVecs = 2; isLoad = false; break; 3619 case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD; 3620 NumVecs = 3; isLoad = false; break; 3621 case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD; 3622 NumVecs = 4; isLoad = false; break; 3623 case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD; 3624 NumVecs = 2; break; 3625 case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD; 3626 NumVecs = 3; break; 3627 case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD; 3628 NumVecs = 4; break; 3629 case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD; 3630 NumVecs = 2; isLoad = false; break; 3631 case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD; 3632 NumVecs = 3; isLoad = false; break; 3633 case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD; 3634 NumVecs = 4; isLoad = false; break; 3635 case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD; 3636 NumVecs = 2; isLaneOp = true; break; 3637 case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD; 3638 NumVecs = 3; isLaneOp = true; break; 3639 case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD; 3640 NumVecs = 4; isLaneOp = true; break; 3641 case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD; 3642 NumVecs = 2; isLoad = false; isLaneOp = true; break; 3643 case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD; 3644 NumVecs = 3; isLoad = false; isLaneOp = true; break; 3645 case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD; 3646 NumVecs = 4; isLoad = false; isLaneOp = true; break; 3647 } 3648 } else { 3649 isLaneOp = true; 3650 switch (N->getOpcode()) { 3651 default: llvm_unreachable("unexpected opcode for Neon base update"); 3652 case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD; 3653 NumVecs = 2; break; 3654 case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD; 3655 NumVecs = 3; break; 3656 case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD; 3657 NumVecs = 4; break; 3658 } 3659 } 3660 3661 // Find the size of memory referenced by the load/store. 3662 EVT VecTy; 3663 if (isLoad) 3664 VecTy = N->getValueType(0); 3665 else 3666 VecTy = N->getOperand(AddrOpIdx + 1).getValueType(); 3667 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 3668 if (isLaneOp) 3669 NumBytes /= VecTy.getVectorNumElements(); 3670 3671 // If the increment is a constant, it must match the memory ref size. 3672 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 3673 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 3674 uint32_t IncVal = CInc->getZExtValue(); 3675 if (IncVal != NumBytes) 3676 continue; 3677 Inc = DAG.getTargetConstant(IncVal, MVT::i32); 3678 } 3679 3680 // Create the new updating load/store node. 3681 EVT Tys[6]; 3682 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 3683 unsigned n; 3684 for (n = 0; n < NumResultVecs; ++n) 3685 Tys[n] = VecTy; 3686 Tys[n++] = MVT::i64; 3687 Tys[n] = MVT::Other; 3688 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2); 3689 SmallVector<SDValue, 8> Ops; 3690 Ops.push_back(N->getOperand(0)); // incoming chain 3691 Ops.push_back(N->getOperand(AddrOpIdx)); 3692 Ops.push_back(Inc); 3693 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 3694 Ops.push_back(N->getOperand(i)); 3695 } 3696 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 3697 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, 3698 Ops.data(), Ops.size(), 3699 MemInt->getMemoryVT(), 3700 MemInt->getMemOperand()); 3701 3702 // Update the uses. 3703 std::vector<SDValue> NewResults; 3704 for (unsigned i = 0; i < NumResultVecs; ++i) { 3705 NewResults.push_back(SDValue(UpdN.getNode(), i)); 3706 } 3707 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain 3708 DCI.CombineTo(N, NewResults); 3709 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 3710 3711 break; 3712 } 3713 return SDValue(); 3714} 3715 3716/// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) 3717/// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs. 3718/// If so, combine them to a vldN-dup operation and return true. 3719static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 3720 SelectionDAG &DAG = DCI.DAG; 3721 EVT VT = N->getValueType(0); 3722 3723 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 3724 SDNode *VLD = N->getOperand(0).getNode(); 3725 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 3726 return SDValue(); 3727 unsigned NumVecs = 0; 3728 unsigned NewOpc = 0; 3729 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 3730 if (IntNo == Intrinsic::arm_neon_vld2lane) { 3731 NumVecs = 2; 3732 NewOpc = AArch64ISD::NEON_LD2DUP; 3733 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 3734 NumVecs = 3; 3735 NewOpc = AArch64ISD::NEON_LD3DUP; 3736 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 3737 NumVecs = 4; 3738 NewOpc = AArch64ISD::NEON_LD4DUP; 3739 } else { 3740 return SDValue(); 3741 } 3742 3743 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 3744 // numbers match the load. 3745 unsigned VLDLaneNo = 3746 cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue(); 3747 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 3748 UI != UE; ++UI) { 3749 // Ignore uses of the chain result. 3750 if (UI.getUse().getResNo() == NumVecs) 3751 continue; 3752 SDNode *User = *UI; 3753 if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE || 3754 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 3755 return SDValue(); 3756 } 3757 3758 // Create the vldN-dup node. 3759 EVT Tys[5]; 3760 unsigned n; 3761 for (n = 0; n < NumVecs; ++n) 3762 Tys[n] = VT; 3763 Tys[n] = MVT::Other; 3764 SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1); 3765 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 3766 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 3767 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2, 3768 VLDMemInt->getMemoryVT(), 3769 VLDMemInt->getMemOperand()); 3770 3771 // Update the uses. 3772 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 3773 UI != UE; ++UI) { 3774 unsigned ResNo = UI.getUse().getResNo(); 3775 // Ignore uses of the chain result. 3776 if (ResNo == NumVecs) 3777 continue; 3778 SDNode *User = *UI; 3779 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 3780 } 3781 3782 // Now the vldN-lane intrinsic is dead except for its chain result. 3783 // Update uses of the chain. 3784 std::vector<SDValue> VLDDupResults; 3785 for (unsigned n = 0; n < NumVecs; ++n) 3786 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 3787 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 3788 DCI.CombineTo(VLD, VLDDupResults); 3789 3790 return SDValue(N, 0); 3791} 3792 3793SDValue 3794AArch64TargetLowering::PerformDAGCombine(SDNode *N, 3795 DAGCombinerInfo &DCI) const { 3796 switch (N->getOpcode()) { 3797 default: break; 3798 case ISD::AND: return PerformANDCombine(N, DCI); 3799 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget()); 3800 case ISD::SHL: 3801 case ISD::SRA: 3802 case ISD::SRL: 3803 return PerformShiftCombine(N, DCI, getSubtarget()); 3804 case ISD::INTRINSIC_WO_CHAIN: 3805 return PerformIntrinsicCombine(N, DCI.DAG); 3806 case AArch64ISD::NEON_VDUPLANE: 3807 return CombineVLDDUP(N, DCI); 3808 case AArch64ISD::NEON_LD2DUP: 3809 case AArch64ISD::NEON_LD3DUP: 3810 case AArch64ISD::NEON_LD4DUP: 3811 return CombineBaseUpdate(N, DCI); 3812 case ISD::INTRINSIC_VOID: 3813 case ISD::INTRINSIC_W_CHAIN: 3814 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 3815 case Intrinsic::arm_neon_vld1: 3816 case Intrinsic::arm_neon_vld2: 3817 case Intrinsic::arm_neon_vld3: 3818 case Intrinsic::arm_neon_vld4: 3819 case Intrinsic::arm_neon_vst1: 3820 case Intrinsic::arm_neon_vst2: 3821 case Intrinsic::arm_neon_vst3: 3822 case Intrinsic::arm_neon_vst4: 3823 case Intrinsic::arm_neon_vld2lane: 3824 case Intrinsic::arm_neon_vld3lane: 3825 case Intrinsic::arm_neon_vld4lane: 3826 case Intrinsic::aarch64_neon_vld1x2: 3827 case Intrinsic::aarch64_neon_vld1x3: 3828 case Intrinsic::aarch64_neon_vld1x4: 3829 case Intrinsic::aarch64_neon_vst1x2: 3830 case Intrinsic::aarch64_neon_vst1x3: 3831 case Intrinsic::aarch64_neon_vst1x4: 3832 case Intrinsic::arm_neon_vst2lane: 3833 case Intrinsic::arm_neon_vst3lane: 3834 case Intrinsic::arm_neon_vst4lane: 3835 return CombineBaseUpdate(N, DCI); 3836 default: 3837 break; 3838 } 3839 } 3840 return SDValue(); 3841} 3842 3843bool 3844AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3845 VT = VT.getScalarType(); 3846 3847 if (!VT.isSimple()) 3848 return false; 3849 3850 switch (VT.getSimpleVT().SimpleTy) { 3851 case MVT::f16: 3852 case MVT::f32: 3853 case MVT::f64: 3854 return true; 3855 case MVT::f128: 3856 return false; 3857 default: 3858 break; 3859 } 3860 3861 return false; 3862} 3863 3864// Check whether a Build Vector could be presented as Shuffle Vector. If yes, 3865// try to call LowerVECTOR_SHUFFLE to lower it. 3866bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, 3867 SDValue &Res) const { 3868 SDLoc DL(Op); 3869 EVT VT = Op.getValueType(); 3870 unsigned NumElts = VT.getVectorNumElements(); 3871 unsigned V0NumElts = 0; 3872 int Mask[16]; 3873 SDValue V0, V1; 3874 3875 // Check if all elements are extracted from less than 3 vectors. 3876 for (unsigned i = 0; i < NumElts; ++i) { 3877 SDValue Elt = Op.getOperand(i); 3878 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 3879 return false; 3880 3881 if (V0.getNode() == 0) { 3882 V0 = Elt.getOperand(0); 3883 V0NumElts = V0.getValueType().getVectorNumElements(); 3884 } 3885 if (Elt.getOperand(0) == V0) { 3886 Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue()); 3887 continue; 3888 } else if (V1.getNode() == 0) { 3889 V1 = Elt.getOperand(0); 3890 } 3891 if (Elt.getOperand(0) == V1) { 3892 unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue(); 3893 Mask[i] = (Lane + V0NumElts); 3894 continue; 3895 } else { 3896 return false; 3897 } 3898 } 3899 3900 if (!V1.getNode() && V0NumElts == NumElts * 2) { 3901 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, 3902 DAG.getConstant(NumElts, MVT::i64)); 3903 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, 3904 DAG.getConstant(0, MVT::i64)); 3905 V0NumElts = V0.getValueType().getVectorNumElements(); 3906 } 3907 3908 if (V1.getNode() && NumElts == V0NumElts && 3909 V0NumElts == V1.getValueType().getVectorNumElements()) { 3910 SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask); 3911 Res = LowerVECTOR_SHUFFLE(Shuffle, DAG); 3912 return true; 3913 } else 3914 return false; 3915} 3916 3917// If this is a case we can't handle, return null and let the default 3918// expansion code take care of it. 3919SDValue 3920AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3921 const AArch64Subtarget *ST) const { 3922 3923 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3924 SDLoc DL(Op); 3925 EVT VT = Op.getValueType(); 3926 3927 APInt SplatBits, SplatUndef; 3928 unsigned SplatBitSize; 3929 bool HasAnyUndefs; 3930 3931 unsigned UseNeonMov = VT.getSizeInBits() >= 64; 3932 3933 // Note we favor lowering MOVI over MVNI. 3934 // This has implications on the definition of patterns in TableGen to select 3935 // BIC immediate instructions but not ORR immediate instructions. 3936 // If this lowering order is changed, TableGen patterns for BIC immediate and 3937 // ORR immediate instructions have to be updated. 3938 if (UseNeonMov && 3939 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3940 if (SplatBitSize <= 64) { 3941 // First attempt to use vector immediate-form MOVI 3942 EVT NeonMovVT; 3943 unsigned Imm = 0; 3944 unsigned OpCmode = 0; 3945 3946 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 3947 SplatBitSize, DAG, VT.is128BitVector(), 3948 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) { 3949 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3950 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3951 3952 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3953 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT, 3954 ImmVal, OpCmodeVal); 3955 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3956 } 3957 } 3958 3959 // Then attempt to use vector immediate-form MVNI 3960 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 3961 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, 3962 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT, 3963 Imm, OpCmode)) { 3964 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3965 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3966 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3967 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT, 3968 ImmVal, OpCmodeVal); 3969 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3970 } 3971 } 3972 3973 // Attempt to use vector immediate-form FMOV 3974 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) || 3975 (VT == MVT::v2f64 && SplatBitSize == 64)) { 3976 APFloat RealVal( 3977 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble, 3978 SplatBits); 3979 uint32_t ImmVal; 3980 if (A64Imms::isFPImm(RealVal, ImmVal)) { 3981 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 3982 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val); 3983 } 3984 } 3985 } 3986 } 3987 3988 unsigned NumElts = VT.getVectorNumElements(); 3989 bool isOnlyLowElement = true; 3990 bool usesOnlyOneValue = true; 3991 bool hasDominantValue = false; 3992 bool isConstant = true; 3993 3994 // Map of the number of times a particular SDValue appears in the 3995 // element list. 3996 DenseMap<SDValue, unsigned> ValueCounts; 3997 SDValue Value; 3998 for (unsigned i = 0; i < NumElts; ++i) { 3999 SDValue V = Op.getOperand(i); 4000 if (V.getOpcode() == ISD::UNDEF) 4001 continue; 4002 if (i > 0) 4003 isOnlyLowElement = false; 4004 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4005 isConstant = false; 4006 4007 ValueCounts.insert(std::make_pair(V, 0)); 4008 unsigned &Count = ValueCounts[V]; 4009 4010 // Is this value dominant? (takes up more than half of the lanes) 4011 if (++Count > (NumElts / 2)) { 4012 hasDominantValue = true; 4013 Value = V; 4014 } 4015 } 4016 if (ValueCounts.size() != 1) 4017 usesOnlyOneValue = false; 4018 if (!Value.getNode() && ValueCounts.size() > 0) 4019 Value = ValueCounts.begin()->first; 4020 4021 if (ValueCounts.size() == 0) 4022 return DAG.getUNDEF(VT); 4023 4024 // Loads are better lowered with insert_vector_elt. 4025 // Keep going if we are hitting this case. 4026 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 4027 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 4028 4029 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4030 if (hasDominantValue && EltSize <= 64) { 4031 // Use VDUP for non-constant splats. 4032 if (!isConstant) { 4033 SDValue N; 4034 4035 // If we are DUPing a value that comes directly from a vector, we could 4036 // just use DUPLANE. We can only do this if the lane being extracted 4037 // is at a constant index, as the DUP from lane instructions only have 4038 // constant-index forms. 4039 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4040 isa<ConstantSDNode>(Value->getOperand(1))) { 4041 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, 4042 Value->getOperand(0), Value->getOperand(1)); 4043 } else 4044 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 4045 4046 if (!usesOnlyOneValue) { 4047 // The dominant value was splatted as 'N', but we now have to insert 4048 // all differing elements. 4049 for (unsigned I = 0; I < NumElts; ++I) { 4050 if (Op.getOperand(I) == Value) 4051 continue; 4052 SmallVector<SDValue, 3> Ops; 4053 Ops.push_back(N); 4054 Ops.push_back(Op.getOperand(I)); 4055 Ops.push_back(DAG.getConstant(I, MVT::i64)); 4056 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3); 4057 } 4058 } 4059 return N; 4060 } 4061 if (usesOnlyOneValue && isConstant) { 4062 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 4063 } 4064 } 4065 // If all elements are constants and the case above didn't get hit, fall back 4066 // to the default expansion, which will generate a load from the constant 4067 // pool. 4068 if (isConstant) 4069 return SDValue(); 4070 4071 // Try to lower this in lowering ShuffleVector way. 4072 SDValue Shuf; 4073 if (isKnownShuffleVector(Op, DAG, Shuf)) 4074 return Shuf; 4075 4076 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 4077 // know the default expansion would otherwise fall back on something even 4078 // worse. For a vector with one or two non-undef values, that's 4079 // scalar_to_vector for the elements followed by a shuffle (provided the 4080 // shuffle is valid for the target) and materialization element by element 4081 // on the stack followed by a load for everything else. 4082 if (!isConstant && !usesOnlyOneValue) { 4083 SDValue Vec = DAG.getUNDEF(VT); 4084 for (unsigned i = 0 ; i < NumElts; ++i) { 4085 SDValue V = Op.getOperand(i); 4086 if (V.getOpcode() == ISD::UNDEF) 4087 continue; 4088 SDValue LaneIdx = DAG.getConstant(i, MVT::i64); 4089 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx); 4090 } 4091 return Vec; 4092 } 4093 return SDValue(); 4094} 4095 4096/// isREVMask - Check if a vector shuffle corresponds to a REV 4097/// instruction with the specified blocksize. (The order of the elements 4098/// within each block of the vector is reversed.) 4099static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 4100 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && 4101 "Only possible block sizes for REV are: 16, 32, 64"); 4102 4103 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4104 if (EltSz == 64) 4105 return false; 4106 4107 unsigned NumElts = VT.getVectorNumElements(); 4108 unsigned BlockElts = M[0] + 1; 4109 // If the first shuffle index is UNDEF, be optimistic. 4110 if (M[0] < 0) 4111 BlockElts = BlockSize / EltSz; 4112 4113 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 4114 return false; 4115 4116 for (unsigned i = 0; i < NumElts; ++i) { 4117 if (M[i] < 0) 4118 continue; // ignore UNDEF indices 4119 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) 4120 return false; 4121 } 4122 4123 return true; 4124} 4125 4126// isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and 4127// TRN instruction. 4128static unsigned isPermuteMask(ArrayRef<int> M, EVT VT) { 4129 unsigned NumElts = VT.getVectorNumElements(); 4130 if (NumElts < 4) 4131 return 0; 4132 4133 bool ismatch = true; 4134 4135 // Check UZP1 4136 for (unsigned i = 0; i < NumElts; ++i) { 4137 if ((unsigned)M[i] != i * 2) { 4138 ismatch = false; 4139 break; 4140 } 4141 } 4142 if (ismatch) 4143 return AArch64ISD::NEON_UZP1; 4144 4145 // Check UZP2 4146 ismatch = true; 4147 for (unsigned i = 0; i < NumElts; ++i) { 4148 if ((unsigned)M[i] != i * 2 + 1) { 4149 ismatch = false; 4150 break; 4151 } 4152 } 4153 if (ismatch) 4154 return AArch64ISD::NEON_UZP2; 4155 4156 // Check ZIP1 4157 ismatch = true; 4158 for (unsigned i = 0; i < NumElts; ++i) { 4159 if ((unsigned)M[i] != i / 2 + NumElts * (i % 2)) { 4160 ismatch = false; 4161 break; 4162 } 4163 } 4164 if (ismatch) 4165 return AArch64ISD::NEON_ZIP1; 4166 4167 // Check ZIP2 4168 ismatch = true; 4169 for (unsigned i = 0; i < NumElts; ++i) { 4170 if ((unsigned)M[i] != (NumElts + i) / 2 + NumElts * (i % 2)) { 4171 ismatch = false; 4172 break; 4173 } 4174 } 4175 if (ismatch) 4176 return AArch64ISD::NEON_ZIP2; 4177 4178 // Check TRN1 4179 ismatch = true; 4180 for (unsigned i = 0; i < NumElts; ++i) { 4181 if ((unsigned)M[i] != i + (NumElts - 1) * (i % 2)) { 4182 ismatch = false; 4183 break; 4184 } 4185 } 4186 if (ismatch) 4187 return AArch64ISD::NEON_TRN1; 4188 4189 // Check TRN2 4190 ismatch = true; 4191 for (unsigned i = 0; i < NumElts; ++i) { 4192 if ((unsigned)M[i] != 1 + i + (NumElts - 1) * (i % 2)) { 4193 ismatch = false; 4194 break; 4195 } 4196 } 4197 if (ismatch) 4198 return AArch64ISD::NEON_TRN2; 4199 4200 return 0; 4201} 4202 4203SDValue 4204AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 4205 SelectionDAG &DAG) const { 4206 SDValue V1 = Op.getOperand(0); 4207 SDValue V2 = Op.getOperand(1); 4208 SDLoc dl(Op); 4209 EVT VT = Op.getValueType(); 4210 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4211 4212 // Convert shuffles that are directly supported on NEON to target-specific 4213 // DAG nodes, instead of keeping them as shuffles and matching them again 4214 // during code selection. This is more efficient and avoids the possibility 4215 // of inconsistencies between legalization and selection. 4216 ArrayRef<int> ShuffleMask = SVN->getMask(); 4217 4218 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4219 if (EltSize > 64) 4220 return SDValue(); 4221 4222 if (isREVMask(ShuffleMask, VT, 64)) 4223 return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1); 4224 if (isREVMask(ShuffleMask, VT, 32)) 4225 return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1); 4226 if (isREVMask(ShuffleMask, VT, 16)) 4227 return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1); 4228 4229 unsigned ISDNo = isPermuteMask(ShuffleMask, VT); 4230 if (ISDNo) 4231 return DAG.getNode(ISDNo, dl, VT, V1, V2); 4232 4233 // If the element of shuffle mask are all the same constant, we can 4234 // transform it into either NEON_VDUP or NEON_VDUPLANE 4235 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4236 int Lane = SVN->getSplatIndex(); 4237 // If this is undef splat, generate it via "just" vdup, if possible. 4238 if (Lane == -1) Lane = 0; 4239 4240 // Test if V1 is a SCALAR_TO_VECTOR. 4241 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4242 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0)); 4243 } 4244 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR. 4245 if (V1.getOpcode() == ISD::BUILD_VECTOR) { 4246 bool IsScalarToVector = true; 4247 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i) 4248 if (V1.getOperand(i).getOpcode() != ISD::UNDEF && 4249 i != (unsigned)Lane) { 4250 IsScalarToVector = false; 4251 break; 4252 } 4253 if (IsScalarToVector) 4254 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, 4255 V1.getOperand(Lane)); 4256 } 4257 4258 // Test if V1 is a EXTRACT_SUBVECTOR. 4259 if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) { 4260 int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue(); 4261 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0), 4262 DAG.getConstant(Lane + ExtLane, MVT::i64)); 4263 } 4264 // Test if V1 is a CONCAT_VECTORS. 4265 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 4266 V1.getOperand(1).getOpcode() == ISD::UNDEF) { 4267 SDValue Op0 = V1.getOperand(0); 4268 assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() && 4269 "Invalid vector lane access"); 4270 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0, 4271 DAG.getConstant(Lane, MVT::i64)); 4272 } 4273 4274 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1, 4275 DAG.getConstant(Lane, MVT::i64)); 4276 } 4277 4278 int Length = ShuffleMask.size(); 4279 int V1EltNum = V1.getValueType().getVectorNumElements(); 4280 4281 // If the number of v1 elements is the same as the number of shuffle mask 4282 // element and the shuffle masks are sequential values, we can transform 4283 // it into NEON_VEXTRACT. 4284 if (V1EltNum == Length) { 4285 // Check if the shuffle mask is sequential. 4286 bool IsSequential = true; 4287 int CurMask = ShuffleMask[0]; 4288 for (int I = 0; I < Length; ++I) { 4289 if (ShuffleMask[I] != CurMask) { 4290 IsSequential = false; 4291 break; 4292 } 4293 CurMask++; 4294 } 4295 if (IsSequential) { 4296 assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect"); 4297 unsigned VecSize = EltSize * V1EltNum; 4298 unsigned Index = (EltSize/8) * ShuffleMask[0]; 4299 if (VecSize == 64 || VecSize == 128) 4300 return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2, 4301 DAG.getConstant(Index, MVT::i64)); 4302 } 4303 } 4304 4305 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert 4306 // by element from V2 to V1 . 4307 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a 4308 // better choice to be inserted than V1 as less insert needed, so we count 4309 // element to be inserted for both V1 and V2, and select less one as insert 4310 // target. 4311 4312 // Collect elements need to be inserted and their index. 4313 SmallVector<int, 8> NV1Elt; 4314 SmallVector<int, 8> N1Index; 4315 SmallVector<int, 8> NV2Elt; 4316 SmallVector<int, 8> N2Index; 4317 for (int I = 0; I != Length; ++I) { 4318 if (ShuffleMask[I] != I) { 4319 NV1Elt.push_back(ShuffleMask[I]); 4320 N1Index.push_back(I); 4321 } 4322 } 4323 for (int I = 0; I != Length; ++I) { 4324 if (ShuffleMask[I] != (I + V1EltNum)) { 4325 NV2Elt.push_back(ShuffleMask[I]); 4326 N2Index.push_back(I); 4327 } 4328 } 4329 4330 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2 4331 // will be inserted. 4332 SDValue InsV = V1; 4333 SmallVector<int, 8> InsMasks = NV1Elt; 4334 SmallVector<int, 8> InsIndex = N1Index; 4335 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) { 4336 if (NV1Elt.size() > NV2Elt.size()) { 4337 InsV = V2; 4338 InsMasks = NV2Elt; 4339 InsIndex = N2Index; 4340 } 4341 } else { 4342 InsV = DAG.getNode(ISD::UNDEF, dl, VT); 4343 } 4344 4345 for (int I = 0, E = InsMasks.size(); I != E; ++I) { 4346 SDValue ExtV = V1; 4347 int Mask = InsMasks[I]; 4348 if (Mask >= V1EltNum) { 4349 ExtV = V2; 4350 Mask -= V1EltNum; 4351 } 4352 // Any value type smaller than i32 is illegal in AArch64, and this lower 4353 // function is called after legalize pass, so we need to legalize 4354 // the result here. 4355 EVT EltVT; 4356 if (VT.getVectorElementType().isFloatingPoint()) 4357 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32; 4358 else 4359 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32; 4360 4361 if (Mask >= 0) { 4362 ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV, 4363 DAG.getConstant(Mask, MVT::i64)); 4364 InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV, 4365 DAG.getConstant(InsIndex[I], MVT::i64)); 4366 } 4367 } 4368 return InsV; 4369} 4370 4371AArch64TargetLowering::ConstraintType 4372AArch64TargetLowering::getConstraintType(const std::string &Constraint) const { 4373 if (Constraint.size() == 1) { 4374 switch (Constraint[0]) { 4375 default: break; 4376 case 'w': // An FP/SIMD vector register 4377 return C_RegisterClass; 4378 case 'I': // Constant that can be used with an ADD instruction 4379 case 'J': // Constant that can be used with a SUB instruction 4380 case 'K': // Constant that can be used with a 32-bit logical instruction 4381 case 'L': // Constant that can be used with a 64-bit logical instruction 4382 case 'M': // Constant that can be used as a 32-bit MOV immediate 4383 case 'N': // Constant that can be used as a 64-bit MOV immediate 4384 case 'Y': // Floating point constant zero 4385 case 'Z': // Integer constant zero 4386 return C_Other; 4387 case 'Q': // A memory reference with base register and no offset 4388 return C_Memory; 4389 case 'S': // A symbolic address 4390 return C_Other; 4391 } 4392 } 4393 4394 // FIXME: Ump, Utf, Usa, Ush 4395 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, 4396 // whatever they may be 4397 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be 4398 // Usa: An absolute symbolic address 4399 // Ush: The high part (bits 32:12) of a pc-relative symbolic address 4400 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa" 4401 && Constraint != "Ush" && "Unimplemented constraints"); 4402 4403 return TargetLowering::getConstraintType(Constraint); 4404} 4405 4406TargetLowering::ConstraintWeight 4407AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info, 4408 const char *Constraint) const { 4409 4410 llvm_unreachable("Constraint weight unimplemented"); 4411} 4412 4413void 4414AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4415 std::string &Constraint, 4416 std::vector<SDValue> &Ops, 4417 SelectionDAG &DAG) const { 4418 SDValue Result(0, 0); 4419 4420 // Only length 1 constraints are C_Other. 4421 if (Constraint.size() != 1) return; 4422 4423 // Only C_Other constraints get lowered like this. That means constants for us 4424 // so return early if there's no hope the constraint can be lowered. 4425 4426 switch(Constraint[0]) { 4427 default: break; 4428 case 'I': case 'J': case 'K': case 'L': 4429 case 'M': case 'N': case 'Z': { 4430 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4431 if (!C) 4432 return; 4433 4434 uint64_t CVal = C->getZExtValue(); 4435 uint32_t Bits; 4436 4437 switch (Constraint[0]) { 4438 default: 4439 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J' 4440 // is a peculiarly useless SUB constraint. 4441 llvm_unreachable("Unimplemented C_Other constraint"); 4442 case 'I': 4443 if (CVal <= 0xfff) 4444 break; 4445 return; 4446 case 'K': 4447 if (A64Imms::isLogicalImm(32, CVal, Bits)) 4448 break; 4449 return; 4450 case 'L': 4451 if (A64Imms::isLogicalImm(64, CVal, Bits)) 4452 break; 4453 return; 4454 case 'Z': 4455 if (CVal == 0) 4456 break; 4457 return; 4458 } 4459 4460 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 4461 break; 4462 } 4463 case 'S': { 4464 // An absolute symbolic address or label reference. 4465 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 4466 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4467 GA->getValueType(0)); 4468 } else if (const BlockAddressSDNode *BA 4469 = dyn_cast<BlockAddressSDNode>(Op)) { 4470 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(), 4471 BA->getValueType(0)); 4472 } else if (const ExternalSymbolSDNode *ES 4473 = dyn_cast<ExternalSymbolSDNode>(Op)) { 4474 Result = DAG.getTargetExternalSymbol(ES->getSymbol(), 4475 ES->getValueType(0)); 4476 } else 4477 return; 4478 break; 4479 } 4480 case 'Y': 4481 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 4482 if (CFP->isExactlyValue(0.0)) { 4483 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0)); 4484 break; 4485 } 4486 } 4487 return; 4488 } 4489 4490 if (Result.getNode()) { 4491 Ops.push_back(Result); 4492 return; 4493 } 4494 4495 // It's an unknown constraint for us. Let generic code have a go. 4496 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 4497} 4498 4499std::pair<unsigned, const TargetRegisterClass*> 4500AArch64TargetLowering::getRegForInlineAsmConstraint( 4501 const std::string &Constraint, 4502 MVT VT) const { 4503 if (Constraint.size() == 1) { 4504 switch (Constraint[0]) { 4505 case 'r': 4506 if (VT.getSizeInBits() <= 32) 4507 return std::make_pair(0U, &AArch64::GPR32RegClass); 4508 else if (VT == MVT::i64) 4509 return std::make_pair(0U, &AArch64::GPR64RegClass); 4510 break; 4511 case 'w': 4512 if (VT == MVT::f16) 4513 return std::make_pair(0U, &AArch64::FPR16RegClass); 4514 else if (VT == MVT::f32) 4515 return std::make_pair(0U, &AArch64::FPR32RegClass); 4516 else if (VT.getSizeInBits() == 64) 4517 return std::make_pair(0U, &AArch64::FPR64RegClass); 4518 else if (VT.getSizeInBits() == 128) 4519 return std::make_pair(0U, &AArch64::FPR128RegClass); 4520 break; 4521 } 4522 } 4523 4524 // Use the default implementation in TargetLowering to convert the register 4525 // constraint into a member of a register class. 4526 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 4527} 4528 4529/// Represent NEON load and store intrinsics as MemIntrinsicNodes. 4530/// The associated MachineMemOperands record the alignment specified 4531/// in the intrinsic calls. 4532bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 4533 const CallInst &I, 4534 unsigned Intrinsic) const { 4535 switch (Intrinsic) { 4536 case Intrinsic::arm_neon_vld1: 4537 case Intrinsic::arm_neon_vld2: 4538 case Intrinsic::arm_neon_vld3: 4539 case Intrinsic::arm_neon_vld4: 4540 case Intrinsic::aarch64_neon_vld1x2: 4541 case Intrinsic::aarch64_neon_vld1x3: 4542 case Intrinsic::aarch64_neon_vld1x4: 4543 case Intrinsic::arm_neon_vld2lane: 4544 case Intrinsic::arm_neon_vld3lane: 4545 case Intrinsic::arm_neon_vld4lane: { 4546 Info.opc = ISD::INTRINSIC_W_CHAIN; 4547 // Conservatively set memVT to the entire set of vectors loaded. 4548 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 4549 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4550 Info.ptrVal = I.getArgOperand(0); 4551 Info.offset = 0; 4552 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4553 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4554 Info.vol = false; // volatile loads with NEON intrinsics not supported 4555 Info.readMem = true; 4556 Info.writeMem = false; 4557 return true; 4558 } 4559 case Intrinsic::arm_neon_vst1: 4560 case Intrinsic::arm_neon_vst2: 4561 case Intrinsic::arm_neon_vst3: 4562 case Intrinsic::arm_neon_vst4: 4563 case Intrinsic::aarch64_neon_vst1x2: 4564 case Intrinsic::aarch64_neon_vst1x3: 4565 case Intrinsic::aarch64_neon_vst1x4: 4566 case Intrinsic::arm_neon_vst2lane: 4567 case Intrinsic::arm_neon_vst3lane: 4568 case Intrinsic::arm_neon_vst4lane: { 4569 Info.opc = ISD::INTRINSIC_VOID; 4570 // Conservatively set memVT to the entire set of vectors stored. 4571 unsigned NumElts = 0; 4572 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 4573 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 4574 if (!ArgTy->isVectorTy()) 4575 break; 4576 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 4577 } 4578 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4579 Info.ptrVal = I.getArgOperand(0); 4580 Info.offset = 0; 4581 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4582 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4583 Info.vol = false; // volatile stores with NEON intrinsics not supported 4584 Info.readMem = false; 4585 Info.writeMem = true; 4586 return true; 4587 } 4588 default: 4589 break; 4590 } 4591 4592 return false; 4593} 4594