int_arm64.cc revision 33ae5583bdd69847a7316ab38a8fa8ccd63093ef
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Thumb2 ISA. */ 18 19#include "arm64_lir.h" 20#include "codegen_arm64.h" 21#include "dex/quick/mir_to_lir-inl.h" 22#include "entrypoints/quick/quick_entrypoints.h" 23#include "mirror/array.h" 24 25namespace art { 26 27LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) { 28 OpRegReg(kOpCmp, src1, src2); 29 return OpCondBranch(cond, target); 30} 31 32LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) { 33 LOG(FATAL) << "Unexpected use of OpIT for Arm64"; 34 return NULL; 35} 36 37void Arm64Mir2Lir::OpEndIT(LIR* it) { 38 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64"; 39} 40 41/* 42 * 64-bit 3way compare function. 43 * cmp xA, xB 44 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1 45 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC 46 */ 47void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, 48 RegLocation rl_src2) { 49 RegLocation rl_result; 50 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 51 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 52 rl_result = EvalLoc(rl_dest, kCoreReg, true); 53 54 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 55 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq); 56 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), 57 rl_result.reg.GetReg(), kArmCondGe); 58 StoreValue(rl_dest, rl_result); 59} 60 61void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 62 RegLocation rl_src1, RegLocation rl_shift) { 63 OpKind op = kOpBkpt; 64 switch (opcode) { 65 case Instruction::SHL_LONG: 66 case Instruction::SHL_LONG_2ADDR: 67 op = kOpLsl; 68 break; 69 case Instruction::SHR_LONG: 70 case Instruction::SHR_LONG_2ADDR: 71 op = kOpAsr; 72 break; 73 case Instruction::USHR_LONG: 74 case Instruction::USHR_LONG_2ADDR: 75 op = kOpLsr; 76 break; 77 default: 78 LOG(FATAL) << "Unexpected case: " << opcode; 79 } 80 rl_shift = LoadValue(rl_shift, kCoreReg); 81 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 82 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 83 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg)); 84 StoreValueWide(rl_dest, rl_result); 85} 86 87void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { 88 RegLocation rl_result; 89 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 90 RegLocation rl_dest = mir_graph_->GetDest(mir); 91 RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg; 92 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg; 93 rl_src = LoadValue(rl_src, src_reg_class); 94 ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode); 95 96 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]]; 97 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]]; 98 rl_true = LoadValue(rl_true, result_reg_class); 99 rl_false = LoadValue(rl_false, result_reg_class); 100 rl_result = EvalLoc(rl_dest, result_reg_class, true); 101 OpRegImm(kOpCmp, rl_src.reg, 0); 102 NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rl_true.reg.GetReg(), 103 rl_false.reg.GetReg(), code); 104 StoreValue(rl_dest, rl_result); 105} 106 107void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { 108 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0); 109 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2); 110 LIR* taken = &block_label_list_[bb->taken]; 111 LIR* not_taken = &block_label_list_[bb->fall_through]; 112 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 113 // Normalize such that if either operand is constant, src2 will be constant. 114 ConditionCode ccode = mir->meta.ccode; 115 if (rl_src1.is_const) { 116 std::swap(rl_src1, rl_src2); 117 ccode = FlipComparisonOrder(ccode); 118 } 119 120 if (rl_src2.is_const) { 121 rl_src2 = UpdateLocWide(rl_src2); 122 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 123 // Special handling using cbz & cbnz. 124 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) { 125 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken); 126 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken); 127 return; 128 // Only handle Imm if src2 is not already in a register. 129 } else if (rl_src2.location != kLocPhysReg) { 130 OpRegImm64(kOpCmp, rl_src1.reg, val); 131 OpCondBranch(ccode, taken); 132 OpCondBranch(NegateComparison(ccode), not_taken); 133 return; 134 } 135 } 136 137 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 138 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 139 OpCondBranch(ccode, taken); 140 OpCondBranch(NegateComparison(ccode), not_taken); 141} 142 143/* 144 * Generate a register comparison to an immediate and branch. Caller 145 * is responsible for setting branch target field. 146 */ 147LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 148 LIR* target) { 149 LIR* branch; 150 ArmConditionCode arm_cond = ArmConditionEncoding(cond); 151 if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) { 152 ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt; 153 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0); 154 branch = NewLIR2(opcode | wide, reg.GetReg(), 0); 155 } else { 156 OpRegImm(kOpCmp, reg, check_value); 157 branch = NewLIR2(kA64B2ct, arm_cond, 0); 158 } 159 branch->target = target; 160 return branch; 161} 162 163LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { 164 bool dest_is_fp = r_dest.IsFloat(); 165 bool src_is_fp = r_src.IsFloat(); 166 ArmOpcode opcode = kA64Brk1d; 167 LIR* res; 168 169 if (LIKELY(dest_is_fp == src_is_fp)) { 170 if (LIKELY(!dest_is_fp)) { 171 // Core/core copy. 172 // Copies involving the sp register require a different instruction. 173 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr; 174 175 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction. 176 // This currently works because the other arguments are set to 0 by default. We should 177 // rather introduce an alias kA64Mov2RR. 178 179 // core/core copy. Do a x/x copy only if both registers are x. 180 if (r_dest.Is64Bit() && r_src.Is64Bit()) { 181 opcode = WIDE(opcode); 182 } 183 } else { 184 // Float/float copy. 185 bool dest_is_double = r_dest.IsDouble(); 186 bool src_is_double = r_src.IsDouble(); 187 188 // We do not do float/double or double/float casts here. 189 DCHECK_EQ(dest_is_double, src_is_double); 190 191 // Homogeneous float/float copy. 192 opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff; 193 } 194 } else { 195 // Inhomogeneous register copy. 196 if (dest_is_fp) { 197 if (r_dest.IsDouble()) { 198 opcode = kA64Fmov2Sx; 199 } else { 200 DCHECK(r_src.IsSingle()); 201 opcode = kA64Fmov2sw; 202 } 203 } else { 204 if (r_src.IsDouble()) { 205 opcode = kA64Fmov2xS; 206 } else { 207 DCHECK(r_dest.Is32Bit()); 208 opcode = kA64Fmov2ws; 209 } 210 } 211 } 212 213 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); 214 215 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { 216 res->flags.is_nop = true; 217 } 218 219 return res; 220} 221 222void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { 223 if (r_dest != r_src) { 224 LIR* res = OpRegCopyNoInsert(r_dest, r_src); 225 AppendLIR(res); 226 } 227} 228 229void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) { 230 OpRegCopy(r_dest, r_src); 231} 232 233// Table of magic divisors 234struct MagicTable { 235 uint32_t magic; 236 uint32_t shift; 237 DividePattern pattern; 238}; 239 240static const MagicTable magic_table[] = { 241 {0, 0, DivideNone}, // 0 242 {0, 0, DivideNone}, // 1 243 {0, 0, DivideNone}, // 2 244 {0x55555556, 0, Divide3}, // 3 245 {0, 0, DivideNone}, // 4 246 {0x66666667, 1, Divide5}, // 5 247 {0x2AAAAAAB, 0, Divide3}, // 6 248 {0x92492493, 2, Divide7}, // 7 249 {0, 0, DivideNone}, // 8 250 {0x38E38E39, 1, Divide5}, // 9 251 {0x66666667, 2, Divide5}, // 10 252 {0x2E8BA2E9, 1, Divide5}, // 11 253 {0x2AAAAAAB, 1, Divide5}, // 12 254 {0x4EC4EC4F, 2, Divide5}, // 13 255 {0x92492493, 3, Divide7}, // 14 256 {0x88888889, 3, Divide7}, // 15 257}; 258 259// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4) 260bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 261 RegLocation rl_src, RegLocation rl_dest, int lit) { 262 // TODO(Arm64): fix this for Arm64. Note: may be worth revisiting the magic table. 263 // It should be possible subtracting one from all its entries, and using smaddl 264 // to counteract this. The advantage is that integers should then be easier to 265 // encode as logical immediates (0x55555555 rather than 0x55555556). 266 UNIMPLEMENTED(FATAL); 267 268 if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) { 269 return false; 270 } 271 DividePattern pattern = magic_table[lit].pattern; 272 if (pattern == DivideNone) { 273 return false; 274 } 275 // Tuning: add rem patterns 276 if (!is_div) { 277 return false; 278 } 279 280 RegStorage r_magic = AllocTemp(); 281 LoadConstant(r_magic, magic_table[lit].magic); 282 rl_src = LoadValue(rl_src, kCoreReg); 283 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 284 RegStorage r_hi = AllocTemp(); 285 RegStorage r_lo = AllocTemp(); 286 NewLIR4(kA64Smaddl4xwwx, r_lo.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg(), rxzr); 287 switch (pattern) { 288 case Divide3: 289 OpRegRegRegShift(kOpSub, rl_result.reg, r_hi, rl_src.reg, EncodeShift(kA64Asr, 31)); 290 break; 291 case Divide5: 292 OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31); 293 OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift)); 294 break; 295 case Divide7: 296 OpRegReg(kOpAdd, r_hi, rl_src.reg); 297 OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31); 298 OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift)); 299 break; 300 default: 301 LOG(FATAL) << "Unexpected pattern: " << pattern; 302 } 303 StoreValue(rl_dest, rl_result); 304 return true; 305} 306 307bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 308 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64"; 309 return false; 310} 311 312RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 313 RegLocation rl_src2, bool is_div, bool check_zero) { 314 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64"; 315 return rl_dest; 316} 317 318RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) { 319 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64"; 320 return rl_dest; 321} 322 323RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) { 324 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 325 326 // Put the literal in a temp. 327 RegStorage lit_temp = AllocTemp(); 328 LoadConstant(lit_temp, lit); 329 // Use the generic case for div/rem with arg2 in a register. 330 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure. 331 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div); 332 FreeTemp(lit_temp); 333 334 return rl_result; 335} 336 337RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2, 338 bool is_div) { 339 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit()); 340 341 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 342 if (is_div) { 343 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2); 344 } else { 345 // temp = r_src1 / r_src2 346 // dest = r_src1 - temp * r_src2 347 RegStorage temp; 348 ArmOpcode wide; 349 if (rl_result.reg.Is64Bit()) { 350 temp = AllocTempWide(); 351 wide = WIDE(0); 352 } else { 353 temp = AllocTemp(); 354 wide = UNWIDE(0); 355 } 356 OpRegRegReg(kOpDiv, temp, r_src1, r_src2); 357 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(), 358 r_src1.GetReg(), r_src2.GetReg()); 359 FreeTemp(temp); 360 } 361 return rl_result; 362} 363 364bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) { 365 RegLocation rl_src = info->args[0]; 366 rl_src = LoadValueWide(rl_src, kCoreReg); 367 RegLocation rl_dest = InlineTargetWide(info); 368 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 369 RegStorage sign_reg = AllocTempWide(); 370 // abs(x) = y<=x>>63, (x+y)^y. 371 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63); 372 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg); 373 OpRegReg(kOpXor, rl_result.reg, sign_reg); 374 StoreValueWide(rl_dest, rl_result); 375 return true; 376} 377 378bool Arm64Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { 379 DCHECK_EQ(cu_->instruction_set, kArm64); 380 RegLocation rl_src1 = info->args[0]; 381 RegLocation rl_src2 = info->args[1]; 382 rl_src1 = LoadValue(rl_src1, kCoreReg); 383 rl_src2 = LoadValue(rl_src2, kCoreReg); 384 RegLocation rl_dest = InlineTarget(info); 385 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 386 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 387 NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), 388 rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt); 389 StoreValue(rl_dest, rl_result); 390 return true; 391} 392 393bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { 394 RegLocation rl_src_address = info->args[0]; // long address 395 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1] ? 396 RegLocation rl_dest = InlineTarget(info); 397 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg 398 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 399 400 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size); 401 if (size == k64) { 402 StoreValueWide(rl_dest, rl_result); 403 } else { 404 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 405 StoreValue(rl_dest, rl_result); 406 } 407 return true; 408} 409 410bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { 411 RegLocation rl_src_address = info->args[0]; // long address 412 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1] 413 RegLocation rl_src_value = info->args[2]; // [size] value 414 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg 415 416 RegLocation rl_value; 417 if (size == k64) { 418 rl_value = LoadValueWide(rl_src_value, kCoreReg); 419 } else { 420 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 421 rl_value = LoadValue(rl_src_value, kCoreReg); 422 } 423 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size); 424 return true; 425} 426 427void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) { 428 LOG(FATAL) << "Unexpected use of OpLea for Arm64"; 429} 430 431void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) { 432 UNIMPLEMENTED(FATAL) << "Should not be used."; 433} 434 435void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) { 436 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64"; 437} 438 439bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { 440 DCHECK_EQ(cu_->instruction_set, kArm64); 441 ArmOpcode wide = is_long ? WIDE(0) : UNWIDE(0); 442 // Unused - RegLocation rl_src_unsafe = info->args[0]; 443 RegLocation rl_src_obj = info->args[1]; // Object - known non-null 444 RegLocation rl_src_offset = info->args[2]; // long low 445 rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3] //TODO: do we really need this 446 RegLocation rl_src_expected = info->args[4]; // int, long or Object 447 // If is_long, high half is in info->args[5] 448 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object 449 // If is_long, high half is in info->args[7] 450 RegLocation rl_dest = InlineTarget(info); // boolean place for result 451 452 // Load Object and offset 453 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); 454 RegLocation rl_offset = LoadValue(rl_src_offset, kRefReg); 455 456 RegLocation rl_new_value; 457 RegLocation rl_expected; 458 if (is_long) { 459 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg); 460 rl_expected = LoadValueWide(rl_src_expected, kCoreReg); 461 } else { 462 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg); 463 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg); 464 } 465 466 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) { 467 // Mark card for object assuming new value is stored. 468 MarkGCCard(rl_new_value.reg, rl_object.reg); 469 } 470 471 RegStorage r_ptr = AllocTempRef(); 472 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg); 473 474 // Free now unneeded rl_object and rl_offset to give more temps. 475 ClobberSReg(rl_object.s_reg_low); 476 FreeTemp(rl_object.reg); 477 ClobberSReg(rl_offset.s_reg_low); 478 FreeTemp(rl_offset.reg); 479 480 // do { 481 // tmp = [r_ptr] - expected; 482 // } while (tmp == 0 && failure([r_ptr] <- r_new_value)); 483 // result = tmp != 0; 484 485 RegStorage r_tmp; 486 if (is_long) { 487 r_tmp = AllocTempWide(); 488 } else if (is_object) { 489 r_tmp = AllocTempRef(); 490 } else { 491 r_tmp = AllocTemp(); 492 } 493 494 LIR* loop = NewLIR0(kPseudoTargetLabel); 495 NewLIR2(kA64Ldaxr2rX | wide, r_tmp.GetReg(), r_ptr.GetReg()); 496 OpRegReg(kOpCmp, r_tmp, rl_expected.reg); 497 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 498 LIR* early_exit = OpCondBranch(kCondNe, NULL); 499 500 NewLIR3(kA64Stlxr3wrX | wide, As32BitReg(r_tmp).GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg()); 501 NewLIR3(kA64Cmp3RdT, As32BitReg(r_tmp).GetReg(), 0, ENCODE_NO_SHIFT); 502 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 503 OpCondBranch(kCondNe, loop); 504 505 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 506 LIR* exit = NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe); 507 early_exit->target = exit; 508 509 FreeTemp(r_tmp); // Now unneeded. 510 FreeTemp(r_ptr); // Now unneeded. 511 512 StoreValue(rl_dest, rl_result); 513 514 return true; 515} 516 517LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) { 518 return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target); 519} 520 521LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) { 522 LOG(FATAL) << "Unexpected use of OpVldm for Arm64"; 523 return NULL; 524} 525 526LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) { 527 LOG(FATAL) << "Unexpected use of OpVstm for Arm64"; 528 return NULL; 529} 530 531void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, 532 RegLocation rl_result, int lit, 533 int first_bit, int second_bit) { 534 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit)); 535 if (first_bit != 0) { 536 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit); 537 } 538} 539 540void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) { 541 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64"; 542} 543 544// Test suspend flag, return target of taken suspend branch 545LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) { 546 // FIXME: Define rA64_SUSPEND as w19, when we do not need two copies of reserved register. 547 // Note: The opcode is not set as wide, so actually we are using the 32-bit version register. 548 NewLIR3(kA64Subs3rRd, rA64_SUSPEND, rA64_SUSPEND, 1); 549 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target); 550} 551 552// Decrement register and branch on condition 553LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) { 554 // Combine sub & test using sub setflags encoding here. We need to make sure a 555 // subtract form that sets carry is used, so generate explicitly. 556 // TODO: might be best to add a new op, kOpSubs, and handle it generically. 557 ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd); 558 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags. 559 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 560 return OpCondBranch(c_code, target); 561} 562 563bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 564#if ANDROID_SMP != 0 565 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one. 566 LIR* barrier = last_lir_insn_; 567 568 int dmb_flavor; 569 // TODO: revisit Arm barrier kinds 570 switch (barrier_kind) { 571 case kLoadStore: dmb_flavor = kISH; break; 572 case kLoadLoad: dmb_flavor = kISH; break; 573 case kStoreStore: dmb_flavor = kISHST; break; 574 case kStoreLoad: dmb_flavor = kISH; break; 575 default: 576 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind; 577 dmb_flavor = kSY; // quiet gcc. 578 break; 579 } 580 581 bool ret = false; 582 583 // If the same barrier already exists, don't generate another. 584 if (barrier == nullptr 585 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) { 586 barrier = NewLIR1(kA64Dmb1B, dmb_flavor); 587 ret = true; 588 } 589 590 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well. 591 DCHECK(!barrier->flags.use_def_invalid); 592 barrier->u.m.def_mask = &kEncodeAll; 593 return ret; 594#else 595 return false; 596#endif 597} 598 599void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 600 RegLocation rl_result; 601 602 rl_src = LoadValue(rl_src, kCoreReg); 603 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 604 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 31); 605 StoreValueWide(rl_dest, rl_result); 606} 607 608void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, 609 RegLocation rl_src1, RegLocation rl_src2, bool is_div) { 610 RegLocation rl_result; 611 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 612 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 613 GenDivZeroCheck(rl_src2.reg); 614 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div); 615 StoreValueWide(rl_dest, rl_result); 616} 617 618void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, 619 RegLocation rl_src2) { 620 RegLocation rl_result; 621 622 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 623 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 624 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 625 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT); 626 StoreValueWide(rl_dest, rl_result); 627} 628 629void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { 630 RegLocation rl_result; 631 632 rl_src = LoadValueWide(rl_src, kCoreReg); 633 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 634 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT); 635 StoreValueWide(rl_dest, rl_result); 636} 637 638void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) { 639 RegLocation rl_result; 640 641 rl_src = LoadValueWide(rl_src, kCoreReg); 642 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 643 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT); 644 StoreValueWide(rl_dest, rl_result); 645} 646 647void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest, 648 RegLocation rl_src1, RegLocation rl_src2) { 649 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2); 650} 651 652void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 653 RegLocation rl_src2) { 654 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2); 655} 656 657void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 658 RegLocation rl_src2) { 659 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2); 660} 661 662void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 663 RegLocation rl_src2) { 664 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2); 665} 666 667void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 668 RegLocation rl_src2) { 669 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2); 670} 671 672void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 673 RegLocation rl_src2) { 674 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2); 675} 676 677/* 678 * Generate array load 679 */ 680void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 681 RegLocation rl_index, RegLocation rl_dest, int scale) { 682 RegisterClass reg_class = RegClassBySize(size); 683 int len_offset = mirror::Array::LengthOffset().Int32Value(); 684 int data_offset; 685 RegLocation rl_result; 686 bool constant_index = rl_index.is_const; 687 rl_array = LoadValue(rl_array, kRefReg); 688 if (!constant_index) { 689 rl_index = LoadValue(rl_index, kCoreReg); 690 } 691 692 if (rl_dest.wide) { 693 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 694 } else { 695 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 696 } 697 698 // If index is constant, just fold it into the data offset 699 if (constant_index) { 700 data_offset += mir_graph_->ConstantValue(rl_index) << scale; 701 } 702 703 /* null object? */ 704 GenNullCheck(rl_array.reg, opt_flags); 705 706 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); 707 RegStorage reg_len; 708 if (needs_range_check) { 709 reg_len = AllocTemp(); 710 /* Get len */ 711 Load32Disp(rl_array.reg, len_offset, reg_len); 712 MarkPossibleNullPointerException(opt_flags); 713 } else { 714 ForceImplicitNullCheck(rl_array.reg, opt_flags); 715 } 716 if (rl_dest.wide || rl_dest.fp || constant_index) { 717 RegStorage reg_ptr; 718 if (constant_index) { 719 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case. 720 } else { 721 // No special indexed operation, lea + load w/ displacement 722 reg_ptr = AllocTempRef(); 723 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), 724 EncodeShift(kA64Lsl, scale)); 725 FreeTemp(rl_index.reg); 726 } 727 rl_result = EvalLoc(rl_dest, reg_class, true); 728 729 if (needs_range_check) { 730 if (constant_index) { 731 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); 732 } else { 733 GenArrayBoundsCheck(rl_index.reg, reg_len); 734 } 735 FreeTemp(reg_len); 736 } 737 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size); 738 MarkPossibleNullPointerException(opt_flags); 739 if (!constant_index) { 740 FreeTemp(reg_ptr); 741 } 742 if (rl_dest.wide) { 743 StoreValueWide(rl_dest, rl_result); 744 } else { 745 StoreValue(rl_dest, rl_result); 746 } 747 } else { 748 // Offset base, then use indexed load 749 RegStorage reg_ptr = AllocTempRef(); 750 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); 751 FreeTemp(rl_array.reg); 752 rl_result = EvalLoc(rl_dest, reg_class, true); 753 754 if (needs_range_check) { 755 GenArrayBoundsCheck(rl_index.reg, reg_len); 756 FreeTemp(reg_len); 757 } 758 LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size); 759 MarkPossibleNullPointerException(opt_flags); 760 FreeTemp(reg_ptr); 761 StoreValue(rl_dest, rl_result); 762 } 763} 764 765/* 766 * Generate array store 767 * 768 */ 769void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 770 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) { 771 RegisterClass reg_class = RegClassBySize(size); 772 int len_offset = mirror::Array::LengthOffset().Int32Value(); 773 bool constant_index = rl_index.is_const; 774 775 int data_offset; 776 if (size == k64 || size == kDouble) { 777 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 778 } else { 779 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 780 } 781 782 // If index is constant, just fold it into the data offset. 783 if (constant_index) { 784 data_offset += mir_graph_->ConstantValue(rl_index) << scale; 785 } 786 787 rl_array = LoadValue(rl_array, kRefReg); 788 if (!constant_index) { 789 rl_index = LoadValue(rl_index, kCoreReg); 790 } 791 792 RegStorage reg_ptr; 793 bool allocated_reg_ptr_temp = false; 794 if (constant_index) { 795 reg_ptr = rl_array.reg; 796 } else if (IsTemp(rl_array.reg) && !card_mark) { 797 Clobber(rl_array.reg); 798 reg_ptr = rl_array.reg; 799 } else { 800 allocated_reg_ptr_temp = true; 801 reg_ptr = AllocTempRef(); 802 } 803 804 /* null object? */ 805 GenNullCheck(rl_array.reg, opt_flags); 806 807 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); 808 RegStorage reg_len; 809 if (needs_range_check) { 810 reg_len = AllocTemp(); 811 // NOTE: max live temps(4) here. 812 /* Get len */ 813 Load32Disp(rl_array.reg, len_offset, reg_len); 814 MarkPossibleNullPointerException(opt_flags); 815 } else { 816 ForceImplicitNullCheck(rl_array.reg, opt_flags); 817 } 818 /* at this point, reg_ptr points to array, 2 live temps */ 819 if (rl_src.wide || rl_src.fp || constant_index) { 820 if (rl_src.wide) { 821 rl_src = LoadValueWide(rl_src, reg_class); 822 } else { 823 rl_src = LoadValue(rl_src, reg_class); 824 } 825 if (!constant_index) { 826 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), 827 EncodeShift(kA64Lsl, scale)); 828 } 829 if (needs_range_check) { 830 if (constant_index) { 831 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); 832 } else { 833 GenArrayBoundsCheck(rl_index.reg, reg_len); 834 } 835 FreeTemp(reg_len); 836 } 837 838 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size); 839 MarkPossibleNullPointerException(opt_flags); 840 } else { 841 /* reg_ptr -> array data */ 842 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); 843 rl_src = LoadValue(rl_src, reg_class); 844 if (needs_range_check) { 845 GenArrayBoundsCheck(rl_index.reg, reg_len); 846 FreeTemp(reg_len); 847 } 848 StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size); 849 MarkPossibleNullPointerException(opt_flags); 850 } 851 if (allocated_reg_ptr_temp) { 852 FreeTemp(reg_ptr); 853 } 854 if (card_mark) { 855 MarkGCCard(rl_src.reg, rl_array.reg); 856 } 857} 858 859void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, 860 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) { 861 OpKind op = kOpBkpt; 862 // Per spec, we only care about low 6 bits of shift amount. 863 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f; 864 rl_src = LoadValueWide(rl_src, kCoreReg); 865 if (shift_amount == 0) { 866 StoreValueWide(rl_dest, rl_src); 867 return; 868 } 869 870 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 871 switch (opcode) { 872 case Instruction::SHL_LONG: 873 case Instruction::SHL_LONG_2ADDR: 874 op = kOpLsl; 875 break; 876 case Instruction::SHR_LONG: 877 case Instruction::SHR_LONG_2ADDR: 878 op = kOpAsr; 879 break; 880 case Instruction::USHR_LONG: 881 case Instruction::USHR_LONG_2ADDR: 882 op = kOpLsr; 883 break; 884 default: 885 LOG(FATAL) << "Unexpected case"; 886 } 887 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount); 888 StoreValueWide(rl_dest, rl_result); 889} 890 891void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 892 RegLocation rl_src1, RegLocation rl_src2) { 893 if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) { 894 if (!rl_src2.is_const) { 895 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); 896 } 897 } else { 898 // Associativity. 899 if (!rl_src2.is_const) { 900 DCHECK(rl_src1.is_const); 901 std::swap(rl_src1, rl_src2); 902 } 903 } 904 DCHECK(rl_src2.is_const); 905 906 OpKind op = kOpBkpt; 907 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 908 909 switch (opcode) { 910 case Instruction::ADD_LONG: 911 case Instruction::ADD_LONG_2ADDR: 912 op = kOpAdd; 913 break; 914 case Instruction::SUB_LONG: 915 case Instruction::SUB_LONG_2ADDR: 916 op = kOpSub; 917 break; 918 case Instruction::AND_LONG: 919 case Instruction::AND_LONG_2ADDR: 920 op = kOpAnd; 921 break; 922 case Instruction::OR_LONG: 923 case Instruction::OR_LONG_2ADDR: 924 op = kOpOr; 925 break; 926 case Instruction::XOR_LONG: 927 case Instruction::XOR_LONG_2ADDR: 928 op = kOpXor; 929 break; 930 default: 931 LOG(FATAL) << "Unexpected opcode"; 932 } 933 934 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 935 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 936 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val); 937 StoreValueWide(rl_dest, rl_result); 938} 939 940/** 941 * @brief Split a register list in pairs or registers. 942 * 943 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows: 944 * @code 945 * int reg1 = -1, reg2 = -1; 946 * while (reg_mask) { 947 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 948 * if (UNLIKELY(reg2 < 0)) { 949 * // Single register in reg1. 950 * } else { 951 * // Pair in reg1, reg2. 952 * } 953 * } 954 * @endcode 955 */ 956uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) { 957 // Find first register. 958 int first_bit_set = __builtin_ctz(reg_mask) + 1; 959 int reg = *reg1 + first_bit_set; 960 reg_mask >>= first_bit_set; 961 962 if (LIKELY(reg_mask)) { 963 // Save the first register, find the second and use the pair opcode. 964 int second_bit_set = __builtin_ctz(reg_mask) + 1; 965 *reg2 = reg; 966 reg_mask >>= second_bit_set; 967 *reg1 = reg + second_bit_set; 968 return reg_mask; 969 } 970 971 // Use the single opcode, as we just have one register. 972 *reg1 = reg; 973 *reg2 = -1; 974 return reg_mask; 975} 976 977void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) { 978 int reg1 = -1, reg2 = -1; 979 const int reg_log2_size = 3; 980 981 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 982 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 983 if (UNLIKELY(reg2 < 0)) { 984 NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 985 } else { 986 NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(), 987 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 988 } 989 } 990} 991 992void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) { 993 int reg1 = -1, reg2 = -1; 994 const int reg_log2_size = 3; 995 996 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 997 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 998 if (UNLIKELY(reg2 < 0)) { 999 NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1000 } else { 1001 NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(), 1002 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1003 } 1004 } 1005} 1006 1007void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) { 1008 int reg1 = -1, reg2 = -1; 1009 const int reg_log2_size = 3; 1010 1011 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1012 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1013 if (UNLIKELY(reg2 < 0)) { 1014 NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1015 } else { 1016 NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(), 1017 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1018 } 1019 } 1020} 1021 1022// TODO(Arm64): consider using ld1 and st1? 1023void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) { 1024 int reg1 = -1, reg2 = -1; 1025 const int reg_log2_size = 3; 1026 1027 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1028 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1029 if (UNLIKELY(reg2 < 0)) { 1030 NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1031 } else { 1032 NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(), 1033 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1034 } 1035 } 1036} 1037 1038} // namespace art 1039