int_arm64.cc revision 90969af6deb19b1dbe356d62fe68d8f5698d3d8f
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Thumb2 ISA. */ 18 19#include "arm64_lir.h" 20#include "codegen_arm64.h" 21#include "dex/quick/mir_to_lir-inl.h" 22#include "dex/reg_storage_eq.h" 23#include "entrypoints/quick/quick_entrypoints.h" 24#include "mirror/array.h" 25 26namespace art { 27 28LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) { 29 OpRegReg(kOpCmp, src1, src2); 30 return OpCondBranch(cond, target); 31} 32 33LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) { 34 LOG(FATAL) << "Unexpected use of OpIT for Arm64"; 35 return NULL; 36} 37 38void Arm64Mir2Lir::OpEndIT(LIR* it) { 39 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64"; 40} 41 42/* 43 * 64-bit 3way compare function. 44 * cmp xA, xB 45 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1 46 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC 47 */ 48void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, 49 RegLocation rl_src2) { 50 RegLocation rl_result; 51 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 52 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 53 rl_result = EvalLoc(rl_dest, kCoreReg, true); 54 55 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 56 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq); 57 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), 58 rl_result.reg.GetReg(), kArmCondGe); 59 StoreValue(rl_dest, rl_result); 60} 61 62void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 63 RegLocation rl_src1, RegLocation rl_shift) { 64 OpKind op = kOpBkpt; 65 switch (opcode) { 66 case Instruction::SHL_LONG: 67 case Instruction::SHL_LONG_2ADDR: 68 op = kOpLsl; 69 break; 70 case Instruction::SHR_LONG: 71 case Instruction::SHR_LONG_2ADDR: 72 op = kOpAsr; 73 break; 74 case Instruction::USHR_LONG: 75 case Instruction::USHR_LONG_2ADDR: 76 op = kOpLsr; 77 break; 78 default: 79 LOG(FATAL) << "Unexpected case: " << opcode; 80 } 81 rl_shift = LoadValue(rl_shift, kCoreReg); 82 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 83 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 84 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg)); 85 StoreValueWide(rl_dest, rl_result); 86} 87 88static constexpr bool kUseDeltaEncodingInGenSelect = false; 89 90void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode, 91 RegStorage rs_dest, int result_reg_class) { 92 if (false_val == 0 || // 0 is better as first operand. 93 true_val == 1 || // Potentially Csinc. 94 true_val == -1 || // Potentially Csinv. 95 true_val == false_val + 1) { // Potentially Csinc. 96 ccode = NegateComparison(ccode); 97 std::swap(true_val, false_val); 98 } 99 100 ArmConditionCode code = ArmConditionEncoding(ccode); 101 102 int opcode; // The opcode. 103 RegStorage left_op = RegStorage::InvalidReg(); // The operands. 104 RegStorage right_op = RegStorage::InvalidReg(); // The operands. 105 106 bool is_wide = rs_dest.Is64Bit(); 107 108 RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr; 109 110 if (true_val == 0) { 111 left_op = zero_reg; 112 } else { 113 left_op = rs_dest; 114 LoadConstantNoClobber(rs_dest, true_val); 115 } 116 if (false_val == 1) { 117 right_op = zero_reg; 118 opcode = kA64Csinc4rrrc; 119 } else if (false_val == -1) { 120 right_op = zero_reg; 121 opcode = kA64Csinv4rrrc; 122 } else if (false_val == true_val + 1) { 123 right_op = left_op; 124 opcode = kA64Csinc4rrrc; 125 } else if (false_val == -true_val) { 126 right_op = left_op; 127 opcode = kA64Csneg4rrrc; 128 } else if (false_val == ~true_val) { 129 right_op = left_op; 130 opcode = kA64Csinv4rrrc; 131 } else if (true_val == 0) { 132 // left_op is zero_reg. 133 right_op = rs_dest; 134 LoadConstantNoClobber(rs_dest, false_val); 135 opcode = kA64Csel4rrrc; 136 } else { 137 // Generic case. 138 RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class); 139 if (is_wide) { 140 if (t_reg2.Is32Bit()) { 141 t_reg2 = As64BitReg(t_reg2); 142 } 143 } else { 144 if (t_reg2.Is64Bit()) { 145 t_reg2 = As32BitReg(t_reg2); 146 } 147 } 148 149 if (kUseDeltaEncodingInGenSelect) { 150 int32_t delta = false_val - true_val; 151 uint32_t abs_val = delta < 0 ? -delta : delta; 152 153 if (abs_val < 0x1000) { // TODO: Replace with InexpensiveConstant with opcode. 154 // Can encode as immediate to an add. 155 right_op = t_reg2; 156 OpRegRegImm(kOpAdd, t_reg2, left_op, delta); 157 } 158 } 159 160 // Load as constant. 161 if (!right_op.Valid()) { 162 LoadConstantNoClobber(t_reg2, false_val); 163 right_op = t_reg2; 164 } 165 166 opcode = kA64Csel4rrrc; 167 } 168 169 DCHECK(left_op.Valid() && right_op.Valid()); 170 NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(), 171 code); 172} 173 174void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code, 175 int32_t true_val, int32_t false_val, RegStorage rs_dest, 176 int dest_reg_class) { 177 DCHECK(rs_dest.Valid()); 178 OpRegReg(kOpCmp, left_op, right_op); 179 GenSelect(true_val, false_val, code, rs_dest, dest_reg_class); 180} 181 182void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { 183 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 184 rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg); 185 // rl_src may be aliased with rl_result/rl_dest, so do compare early. 186 OpRegImm(kOpCmp, rl_src.reg, 0); 187 188 RegLocation rl_dest = mir_graph_->GetDest(mir); 189 190 // The kMirOpSelect has two variants, one for constants and one for moves. 191 if (mir->ssa_rep->num_uses == 1) { 192 RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true); 193 GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg, 194 rl_dest.ref ? kRefReg : kCoreReg); 195 StoreValue(rl_dest, rl_result); 196 } else { 197 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]]; 198 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]]; 199 200 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg; 201 rl_true = LoadValue(rl_true, result_reg_class); 202 rl_false = LoadValue(rl_false, result_reg_class); 203 RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true); 204 205 bool is_wide = rl_dest.ref || rl_dest.wide; 206 int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc; 207 NewLIR4(opcode, rl_result.reg.GetReg(), 208 rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode)); 209 StoreValue(rl_dest, rl_result); 210 } 211} 212 213void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { 214 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0); 215 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2); 216 LIR* taken = &block_label_list_[bb->taken]; 217 LIR* not_taken = &block_label_list_[bb->fall_through]; 218 // Normalize such that if either operand is constant, src2 will be constant. 219 ConditionCode ccode = mir->meta.ccode; 220 if (rl_src1.is_const) { 221 std::swap(rl_src1, rl_src2); 222 ccode = FlipComparisonOrder(ccode); 223 } 224 225 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 226 227 if (rl_src2.is_const) { 228 // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.) 229 230 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 231 // Special handling using cbz & cbnz. 232 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) { 233 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken); 234 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken); 235 return; 236 } 237 238 // Only handle Imm if src2 is not already in a register. 239 rl_src2 = UpdateLocWide(rl_src2); 240 if (rl_src2.location != kLocPhysReg) { 241 OpRegImm64(kOpCmp, rl_src1.reg, val); 242 OpCondBranch(ccode, taken); 243 OpCondBranch(NegateComparison(ccode), not_taken); 244 return; 245 } 246 } 247 248 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 249 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 250 OpCondBranch(ccode, taken); 251 OpCondBranch(NegateComparison(ccode), not_taken); 252} 253 254/* 255 * Generate a register comparison to an immediate and branch. Caller 256 * is responsible for setting branch target field. 257 */ 258LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 259 LIR* target) { 260 LIR* branch = nullptr; 261 ArmConditionCode arm_cond = ArmConditionEncoding(cond); 262 if (check_value == 0) { 263 if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) { 264 ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt; 265 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0); 266 branch = NewLIR2(opcode | wide, reg.GetReg(), 0); 267 } else if (arm_cond == kArmCondLs) { 268 // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz. 269 // This case happens for a bounds check of array[0]. 270 ArmOpcode opcode = kA64Cbz2rt; 271 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0); 272 branch = NewLIR2(opcode | wide, reg.GetReg(), 0); 273 } 274 } 275 276 if (branch == nullptr) { 277 OpRegImm(kOpCmp, reg, check_value); 278 branch = NewLIR2(kA64B2ct, arm_cond, 0); 279 } 280 281 branch->target = target; 282 return branch; 283} 284 285LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, 286 RegStorage base_reg, int offset, int check_value, 287 LIR* target) { 288 // It is possible that temp register is 64-bit. (ArgReg or RefReg) 289 // Always compare 32-bit value no matter what temp_reg is. 290 if (temp_reg.Is64Bit()) { 291 temp_reg = As32BitReg(temp_reg); 292 } 293 Load32Disp(base_reg, offset, temp_reg); 294 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); 295 return branch; 296} 297 298LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { 299 bool dest_is_fp = r_dest.IsFloat(); 300 bool src_is_fp = r_src.IsFloat(); 301 ArmOpcode opcode = kA64Brk1d; 302 LIR* res; 303 304 if (LIKELY(dest_is_fp == src_is_fp)) { 305 if (LIKELY(!dest_is_fp)) { 306 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit()); 307 308 // Core/core copy. 309 // Copies involving the sp register require a different instruction. 310 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr; 311 312 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction. 313 // This currently works because the other arguments are set to 0 by default. We should 314 // rather introduce an alias kA64Mov2RR. 315 316 // core/core copy. Do a x/x copy only if both registers are x. 317 if (r_dest.Is64Bit() && r_src.Is64Bit()) { 318 opcode = WIDE(opcode); 319 } 320 } else { 321 // Float/float copy. 322 bool dest_is_double = r_dest.IsDouble(); 323 bool src_is_double = r_src.IsDouble(); 324 325 // We do not do float/double or double/float casts here. 326 DCHECK_EQ(dest_is_double, src_is_double); 327 328 // Homogeneous float/float copy. 329 opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff; 330 } 331 } else { 332 // Inhomogeneous register copy. 333 if (dest_is_fp) { 334 if (r_dest.IsDouble()) { 335 opcode = kA64Fmov2Sx; 336 } else { 337 r_src = Check32BitReg(r_src); 338 opcode = kA64Fmov2sw; 339 } 340 } else { 341 if (r_src.IsDouble()) { 342 opcode = kA64Fmov2xS; 343 } else { 344 r_dest = Check32BitReg(r_dest); 345 opcode = kA64Fmov2ws; 346 } 347 } 348 } 349 350 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); 351 352 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { 353 res->flags.is_nop = true; 354 } 355 356 return res; 357} 358 359void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { 360 if (r_dest != r_src) { 361 LIR* res = OpRegCopyNoInsert(r_dest, r_src); 362 AppendLIR(res); 363 } 364} 365 366void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) { 367 OpRegCopy(r_dest, r_src); 368} 369 370// Table of magic divisors 371struct MagicTable { 372 int magic64_base; 373 int magic64_eor; 374 uint64_t magic64; 375 uint32_t magic32; 376 uint32_t shift; 377 DividePattern pattern; 378}; 379 380static const MagicTable magic_table[] = { 381 { 0, 0, 0, 0, 0, DivideNone}, // 0 382 { 0, 0, 0, 0, 0, DivideNone}, // 1 383 { 0, 0, 0, 0, 0, DivideNone}, // 2 384 {0x3c, -1, 0x5555555555555556, 0x55555556, 0, Divide3}, // 3 385 { 0, 0, 0, 0, 0, DivideNone}, // 4 386 {0xf9, -1, 0x6666666666666667, 0x66666667, 1, Divide5}, // 5 387 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3}, // 6 388 { -1, -1, 0x924924924924924A, 0x92492493, 2, Divide7}, // 7 389 { 0, 0, 0, 0, 0, DivideNone}, // 8 390 { -1, -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5}, // 9 391 {0xf9, -1, 0x6666666666666667, 0x66666667, 2, Divide5}, // 10 392 { -1, -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5}, // 11 393 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5}, // 12 394 { -1, -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5}, // 13 395 { -1, -1, 0x924924924924924A, 0x92492493, 3, Divide7}, // 14 396 {0x78, -1, 0x8888888888888889, 0x88888889, 3, Divide7}, // 15 397}; 398 399// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4) 400bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 401 RegLocation rl_src, RegLocation rl_dest, int lit) { 402 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) { 403 return false; 404 } 405 DividePattern pattern = magic_table[lit].pattern; 406 if (pattern == DivideNone) { 407 return false; 408 } 409 // Tuning: add rem patterns 410 if (!is_div) { 411 return false; 412 } 413 414 RegStorage r_magic = AllocTemp(); 415 LoadConstant(r_magic, magic_table[lit].magic32); 416 rl_src = LoadValue(rl_src, kCoreReg); 417 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 418 RegStorage r_long_mul = AllocTemp(); 419 NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(), 420 r_magic.GetReg(), rl_src.reg.GetReg(), rxzr); 421 switch (pattern) { 422 case Divide3: 423 OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32); 424 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31)); 425 break; 426 case Divide5: 427 OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 428 32 + magic_table[lit].shift); 429 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31)); 430 break; 431 case Divide7: 432 OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg), 433 As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32)); 434 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift); 435 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31)); 436 break; 437 default: 438 LOG(FATAL) << "Unexpected pattern: " << pattern; 439 } 440 StoreValue(rl_dest, rl_result); 441 return true; 442} 443 444bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, 445 RegLocation rl_src, RegLocation rl_dest, int64_t lit) { 446 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) { 447 return false; 448 } 449 DividePattern pattern = magic_table[lit].pattern; 450 if (pattern == DivideNone) { 451 return false; 452 } 453 // Tuning: add rem patterns 454 if (!is_div) { 455 return false; 456 } 457 458 RegStorage r_magic = AllocTempWide(); 459 rl_src = LoadValueWide(rl_src, kCoreReg); 460 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 461 RegStorage r_long_mul = AllocTempWide(); 462 463 if (magic_table[lit].magic64_base >= 0) { 464 // Check that the entry in the table is correct. 465 if (kIsDebugBuild) { 466 uint64_t reconstructed_imm; 467 uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base); 468 if (magic_table[lit].magic64_eor >= 0) { 469 uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor); 470 reconstructed_imm = base ^ eor; 471 } else { 472 reconstructed_imm = base + 1; 473 } 474 DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit; 475 } 476 477 // Load the magic constant in two instructions. 478 NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base); 479 if (magic_table[lit].magic64_eor >= 0) { 480 NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(), 481 magic_table[lit].magic64_eor); 482 } else { 483 NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0); 484 } 485 } else { 486 LoadConstantWide(r_magic, magic_table[lit].magic64); 487 } 488 489 NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg()); 490 switch (pattern) { 491 case Divide3: 492 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63)); 493 break; 494 case Divide5: 495 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift); 496 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63)); 497 break; 498 case Divide7: 499 OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul); 500 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift); 501 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63)); 502 break; 503 default: 504 LOG(FATAL) << "Unexpected pattern: " << pattern; 505 } 506 StoreValueWide(rl_dest, rl_result); 507 return true; 508} 509 510// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 511// and store the result in 'rl_dest'. 512bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 513 RegLocation rl_src, RegLocation rl_dest, int lit) { 514 return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit)); 515} 516 517// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 518// and store the result in 'rl_dest'. 519bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div, 520 RegLocation rl_src, RegLocation rl_dest, int64_t lit) { 521 const bool is_64bit = rl_dest.wide; 522 const int nbits = (is_64bit) ? 64 : 32; 523 524 if (lit < 2) { 525 return false; 526 } 527 if (!IsPowerOfTwo(lit)) { 528 if (is_64bit) { 529 return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit); 530 } else { 531 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit)); 532 } 533 } 534 int k = LowestSetBit(lit); 535 if (k >= nbits - 2) { 536 // Avoid special cases. 537 return false; 538 } 539 540 RegLocation rl_result; 541 RegStorage t_reg; 542 if (is_64bit) { 543 rl_src = LoadValueWide(rl_src, kCoreReg); 544 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 545 t_reg = AllocTempWide(); 546 } else { 547 rl_src = LoadValue(rl_src, kCoreReg); 548 rl_result = EvalLoc(rl_dest, kCoreReg, true); 549 t_reg = AllocTemp(); 550 } 551 552 int shift = EncodeShift(kA64Lsr, nbits - k); 553 if (is_div) { 554 if (lit == 2) { 555 // Division by 2 is by far the most common division by constant. 556 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift); 557 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 558 } else { 559 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1); 560 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift); 561 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 562 } 563 } else { 564 if (lit == 2) { 565 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift); 566 OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1); 567 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift); 568 } else { 569 RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp(); 570 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1); 571 OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift); 572 OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1); 573 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift); 574 } 575 } 576 577 if (is_64bit) { 578 StoreValueWide(rl_dest, rl_result); 579 } else { 580 StoreValue(rl_dest, rl_result); 581 } 582 return true; 583} 584 585bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 586 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64"; 587 return false; 588} 589 590RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) { 591 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64"; 592 return rl_dest; 593} 594 595RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) { 596 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 597 598 // Put the literal in a temp. 599 RegStorage lit_temp = AllocTemp(); 600 LoadConstant(lit_temp, lit); 601 // Use the generic case for div/rem with arg2 in a register. 602 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure. 603 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div); 604 FreeTemp(lit_temp); 605 606 return rl_result; 607} 608 609RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 610 RegLocation rl_src2, bool is_div, bool check_zero) { 611 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64"; 612 return rl_dest; 613} 614 615RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2, 616 bool is_div) { 617 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit()); 618 619 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 620 if (is_div) { 621 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2); 622 } else { 623 // temp = r_src1 / r_src2 624 // dest = r_src1 - temp * r_src2 625 RegStorage temp; 626 ArmOpcode wide; 627 if (rl_result.reg.Is64Bit()) { 628 temp = AllocTempWide(); 629 wide = WIDE(0); 630 } else { 631 temp = AllocTemp(); 632 wide = UNWIDE(0); 633 } 634 OpRegRegReg(kOpDiv, temp, r_src1, r_src2); 635 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(), 636 r_src1.GetReg(), r_src2.GetReg()); 637 FreeTemp(temp); 638 } 639 return rl_result; 640} 641 642bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) { 643 RegLocation rl_src = info->args[0]; 644 rl_src = LoadValueWide(rl_src, kCoreReg); 645 RegLocation rl_dest = InlineTargetWide(info); 646 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 647 RegStorage sign_reg = AllocTempWide(); 648 // abs(x) = y<=x>>63, (x+y)^y. 649 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63); 650 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg); 651 OpRegReg(kOpXor, rl_result.reg, sign_reg); 652 StoreValueWide(rl_dest, rl_result); 653 return true; 654} 655 656bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) { 657 DCHECK_EQ(cu_->instruction_set, kArm64); 658 RegLocation rl_src1 = info->args[0]; 659 RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1]; 660 rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg); 661 rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg); 662 RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info); 663 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 664 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 665 NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(), 666 rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt); 667 (is_long) ? StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result); 668 return true; 669} 670 671bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { 672 RegLocation rl_src_address = info->args[0]; // long address 673 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); 674 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg); 675 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 676 677 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile); 678 if (size == k64) { 679 StoreValueWide(rl_dest, rl_result); 680 } else { 681 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 682 StoreValue(rl_dest, rl_result); 683 } 684 return true; 685} 686 687bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { 688 RegLocation rl_src_address = info->args[0]; // long address 689 RegLocation rl_src_value = info->args[2]; // [size] value 690 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg); 691 692 RegLocation rl_value; 693 if (size == k64) { 694 rl_value = LoadValueWide(rl_src_value, kCoreReg); 695 } else { 696 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 697 rl_value = LoadValue(rl_src_value, kCoreReg); 698 } 699 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile); 700 return true; 701} 702 703void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) { 704 LOG(FATAL) << "Unexpected use of OpLea for Arm64"; 705} 706 707void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) { 708 UNIMPLEMENTED(FATAL) << "Should not be used."; 709} 710 711void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) { 712 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64"; 713} 714 715bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { 716 DCHECK_EQ(cu_->instruction_set, kArm64); 717 // Unused - RegLocation rl_src_unsafe = info->args[0]; 718 RegLocation rl_src_obj = info->args[1]; // Object - known non-null 719 RegLocation rl_src_offset = info->args[2]; // long low 720 RegLocation rl_src_expected = info->args[4]; // int, long or Object 721 // If is_long, high half is in info->args[5] 722 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object 723 // If is_long, high half is in info->args[7] 724 RegLocation rl_dest = InlineTarget(info); // boolean place for result 725 726 // Load Object and offset 727 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); 728 RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg); 729 730 RegLocation rl_new_value; 731 RegLocation rl_expected; 732 if (is_long) { 733 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg); 734 rl_expected = LoadValueWide(rl_src_expected, kCoreReg); 735 } else { 736 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg); 737 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg); 738 } 739 740 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) { 741 // Mark card for object assuming new value is stored. 742 MarkGCCard(rl_new_value.reg, rl_object.reg); 743 } 744 745 RegStorage r_ptr = AllocTempRef(); 746 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg); 747 748 // Free now unneeded rl_object and rl_offset to give more temps. 749 ClobberSReg(rl_object.s_reg_low); 750 FreeTemp(rl_object.reg); 751 ClobberSReg(rl_offset.s_reg_low); 752 FreeTemp(rl_offset.reg); 753 754 // do { 755 // tmp = [r_ptr] - expected; 756 // } while (tmp == 0 && failure([r_ptr] <- r_new_value)); 757 // result = tmp != 0; 758 759 RegStorage r_tmp; 760 RegStorage r_tmp_stored; 761 RegStorage rl_new_value_stored = rl_new_value.reg; 762 ArmOpcode wide = UNWIDE(0); 763 if (is_long) { 764 r_tmp_stored = r_tmp = AllocTempWide(); 765 wide = WIDE(0); 766 } else if (is_object) { 767 // References use 64-bit registers, but are stored as compressed 32-bit values. 768 // This means r_tmp_stored != r_tmp. 769 r_tmp = AllocTempRef(); 770 r_tmp_stored = As32BitReg(r_tmp); 771 rl_new_value_stored = As32BitReg(rl_new_value_stored); 772 } else { 773 r_tmp_stored = r_tmp = AllocTemp(); 774 } 775 776 RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp); 777 LIR* loop = NewLIR0(kPseudoTargetLabel); 778 NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg()); 779 OpRegReg(kOpCmp, r_tmp, rl_expected.reg); 780 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 781 LIR* early_exit = OpCondBranch(kCondNe, NULL); 782 NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg()); 783 NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT); 784 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 785 OpCondBranch(kCondNe, loop); 786 787 LIR* exit_loop = NewLIR0(kPseudoTargetLabel); 788 early_exit->target = exit_loop; 789 790 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 791 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe); 792 793 FreeTemp(r_tmp); // Now unneeded. 794 FreeTemp(r_ptr); // Now unneeded. 795 796 StoreValue(rl_dest, rl_result); 797 798 return true; 799} 800 801LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) { 802 return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target); 803} 804 805LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) { 806 LOG(FATAL) << "Unexpected use of OpVldm for Arm64"; 807 return NULL; 808} 809 810LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) { 811 LOG(FATAL) << "Unexpected use of OpVstm for Arm64"; 812 return NULL; 813} 814 815void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, 816 RegLocation rl_result, int lit, 817 int first_bit, int second_bit) { 818 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit)); 819 if (first_bit != 0) { 820 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit); 821 } 822} 823 824void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) { 825 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64"; 826} 827 828// Test suspend flag, return target of taken suspend branch 829LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) { 830 NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1); 831 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target); 832} 833 834// Decrement register and branch on condition 835LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) { 836 // Combine sub & test using sub setflags encoding here. We need to make sure a 837 // subtract form that sets carry is used, so generate explicitly. 838 // TODO: might be best to add a new op, kOpSubs, and handle it generically. 839 ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd); 840 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags. 841 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 842 return OpCondBranch(c_code, target); 843} 844 845bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 846#if ANDROID_SMP != 0 847 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one. 848 LIR* barrier = last_lir_insn_; 849 850 int dmb_flavor; 851 // TODO: revisit Arm barrier kinds 852 switch (barrier_kind) { 853 case kAnyStore: dmb_flavor = kISH; break; 854 case kLoadAny: dmb_flavor = kISH; break; 855 // We conjecture that kISHLD is insufficient. It is documented 856 // to provide LoadLoad | StoreStore ordering. But if this were used 857 // to implement volatile loads, we suspect that the lack of store 858 // atomicity on ARM would cause us to allow incorrect results for 859 // the canonical IRIW example. But we're not sure. 860 // We should be using acquire loads instead. 861 case kStoreStore: dmb_flavor = kISHST; break; 862 case kAnyAny: dmb_flavor = kISH; break; 863 default: 864 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind; 865 dmb_flavor = kSY; // quiet gcc. 866 break; 867 } 868 869 bool ret = false; 870 871 // If the same barrier already exists, don't generate another. 872 if (barrier == nullptr 873 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) { 874 barrier = NewLIR1(kA64Dmb1B, dmb_flavor); 875 ret = true; 876 } 877 878 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well. 879 DCHECK(!barrier->flags.use_def_invalid); 880 barrier->u.m.def_mask = &kEncodeAll; 881 return ret; 882#else 883 return false; 884#endif 885} 886 887void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 888 RegLocation rl_result; 889 890 rl_src = LoadValue(rl_src, kCoreReg); 891 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 892 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31); 893 StoreValueWide(rl_dest, rl_result); 894} 895 896void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, 897 RegLocation rl_src1, RegLocation rl_src2, bool is_div) { 898 if (rl_src2.is_const) { 899 DCHECK(rl_src2.wide); 900 int64_t lit = mir_graph_->ConstantValueWide(rl_src2); 901 if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) { 902 return; 903 } 904 } 905 906 RegLocation rl_result; 907 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 908 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 909 GenDivZeroCheck(rl_src2.reg); 910 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div); 911 StoreValueWide(rl_dest, rl_result); 912} 913 914void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, 915 RegLocation rl_src2) { 916 RegLocation rl_result; 917 918 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 919 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 920 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 921 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT); 922 StoreValueWide(rl_dest, rl_result); 923} 924 925void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { 926 RegLocation rl_result; 927 928 rl_src = LoadValueWide(rl_src, kCoreReg); 929 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 930 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT); 931 StoreValueWide(rl_dest, rl_result); 932} 933 934void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) { 935 RegLocation rl_result; 936 937 rl_src = LoadValueWide(rl_src, kCoreReg); 938 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 939 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT); 940 StoreValueWide(rl_dest, rl_result); 941} 942 943void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest, 944 RegLocation rl_src1, RegLocation rl_src2) { 945 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2); 946} 947 948void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 949 RegLocation rl_src2) { 950 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2); 951} 952 953void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 954 RegLocation rl_src2) { 955 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2); 956} 957 958void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 959 RegLocation rl_src2) { 960 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2); 961} 962 963void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 964 RegLocation rl_src2) { 965 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2); 966} 967 968void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 969 RegLocation rl_src2) { 970 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2); 971} 972 973/* 974 * Generate array load 975 */ 976void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 977 RegLocation rl_index, RegLocation rl_dest, int scale) { 978 RegisterClass reg_class = RegClassBySize(size); 979 int len_offset = mirror::Array::LengthOffset().Int32Value(); 980 int data_offset; 981 RegLocation rl_result; 982 bool constant_index = rl_index.is_const; 983 rl_array = LoadValue(rl_array, kRefReg); 984 if (!constant_index) { 985 rl_index = LoadValue(rl_index, kCoreReg); 986 } 987 988 if (rl_dest.wide) { 989 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 990 } else { 991 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 992 } 993 994 // If index is constant, just fold it into the data offset 995 if (constant_index) { 996 data_offset += mir_graph_->ConstantValue(rl_index) << scale; 997 } 998 999 /* null object? */ 1000 GenNullCheck(rl_array.reg, opt_flags); 1001 1002 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); 1003 RegStorage reg_len; 1004 if (needs_range_check) { 1005 reg_len = AllocTemp(); 1006 /* Get len */ 1007 Load32Disp(rl_array.reg, len_offset, reg_len); 1008 MarkPossibleNullPointerException(opt_flags); 1009 } else { 1010 ForceImplicitNullCheck(rl_array.reg, opt_flags); 1011 } 1012 if (rl_dest.wide || rl_dest.fp || constant_index) { 1013 RegStorage reg_ptr; 1014 if (constant_index) { 1015 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case. 1016 } else { 1017 // No special indexed operation, lea + load w/ displacement 1018 reg_ptr = AllocTempRef(); 1019 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), 1020 EncodeShift(kA64Lsl, scale)); 1021 FreeTemp(rl_index.reg); 1022 } 1023 rl_result = EvalLoc(rl_dest, reg_class, true); 1024 1025 if (needs_range_check) { 1026 if (constant_index) { 1027 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); 1028 } else { 1029 GenArrayBoundsCheck(rl_index.reg, reg_len); 1030 } 1031 FreeTemp(reg_len); 1032 } 1033 if (rl_result.ref) { 1034 LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile); 1035 } else { 1036 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile); 1037 } 1038 MarkPossibleNullPointerException(opt_flags); 1039 if (!constant_index) { 1040 FreeTemp(reg_ptr); 1041 } 1042 if (rl_dest.wide) { 1043 StoreValueWide(rl_dest, rl_result); 1044 } else { 1045 StoreValue(rl_dest, rl_result); 1046 } 1047 } else { 1048 // Offset base, then use indexed load 1049 RegStorage reg_ptr = AllocTempRef(); 1050 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); 1051 FreeTemp(rl_array.reg); 1052 rl_result = EvalLoc(rl_dest, reg_class, true); 1053 1054 if (needs_range_check) { 1055 GenArrayBoundsCheck(rl_index.reg, reg_len); 1056 FreeTemp(reg_len); 1057 } 1058 if (rl_result.ref) { 1059 LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale); 1060 } else { 1061 LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size); 1062 } 1063 MarkPossibleNullPointerException(opt_flags); 1064 FreeTemp(reg_ptr); 1065 StoreValue(rl_dest, rl_result); 1066 } 1067} 1068 1069/* 1070 * Generate array store 1071 * 1072 */ 1073void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1074 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) { 1075 RegisterClass reg_class = RegClassBySize(size); 1076 int len_offset = mirror::Array::LengthOffset().Int32Value(); 1077 bool constant_index = rl_index.is_const; 1078 1079 int data_offset; 1080 if (size == k64 || size == kDouble) { 1081 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 1082 } else { 1083 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 1084 } 1085 1086 // If index is constant, just fold it into the data offset. 1087 if (constant_index) { 1088 data_offset += mir_graph_->ConstantValue(rl_index) << scale; 1089 } 1090 1091 rl_array = LoadValue(rl_array, kRefReg); 1092 if (!constant_index) { 1093 rl_index = LoadValue(rl_index, kCoreReg); 1094 } 1095 1096 RegStorage reg_ptr; 1097 bool allocated_reg_ptr_temp = false; 1098 if (constant_index) { 1099 reg_ptr = rl_array.reg; 1100 } else if (IsTemp(rl_array.reg) && !card_mark) { 1101 Clobber(rl_array.reg); 1102 reg_ptr = rl_array.reg; 1103 } else { 1104 allocated_reg_ptr_temp = true; 1105 reg_ptr = AllocTempRef(); 1106 } 1107 1108 /* null object? */ 1109 GenNullCheck(rl_array.reg, opt_flags); 1110 1111 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); 1112 RegStorage reg_len; 1113 if (needs_range_check) { 1114 reg_len = AllocTemp(); 1115 // NOTE: max live temps(4) here. 1116 /* Get len */ 1117 Load32Disp(rl_array.reg, len_offset, reg_len); 1118 MarkPossibleNullPointerException(opt_flags); 1119 } else { 1120 ForceImplicitNullCheck(rl_array.reg, opt_flags); 1121 } 1122 /* at this point, reg_ptr points to array, 2 live temps */ 1123 if (rl_src.wide || rl_src.fp || constant_index) { 1124 if (rl_src.wide) { 1125 rl_src = LoadValueWide(rl_src, reg_class); 1126 } else { 1127 rl_src = LoadValue(rl_src, reg_class); 1128 } 1129 if (!constant_index) { 1130 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), 1131 EncodeShift(kA64Lsl, scale)); 1132 } 1133 if (needs_range_check) { 1134 if (constant_index) { 1135 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); 1136 } else { 1137 GenArrayBoundsCheck(rl_index.reg, reg_len); 1138 } 1139 FreeTemp(reg_len); 1140 } 1141 if (rl_src.ref) { 1142 StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile); 1143 } else { 1144 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile); 1145 } 1146 MarkPossibleNullPointerException(opt_flags); 1147 } else { 1148 /* reg_ptr -> array data */ 1149 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); 1150 rl_src = LoadValue(rl_src, reg_class); 1151 if (needs_range_check) { 1152 GenArrayBoundsCheck(rl_index.reg, reg_len); 1153 FreeTemp(reg_len); 1154 } 1155 if (rl_src.ref) { 1156 StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale); 1157 } else { 1158 StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size); 1159 } 1160 MarkPossibleNullPointerException(opt_flags); 1161 } 1162 if (allocated_reg_ptr_temp) { 1163 FreeTemp(reg_ptr); 1164 } 1165 if (card_mark) { 1166 MarkGCCard(rl_src.reg, rl_array.reg); 1167 } 1168} 1169 1170void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, 1171 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) { 1172 OpKind op = kOpBkpt; 1173 // Per spec, we only care about low 6 bits of shift amount. 1174 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f; 1175 rl_src = LoadValueWide(rl_src, kCoreReg); 1176 if (shift_amount == 0) { 1177 StoreValueWide(rl_dest, rl_src); 1178 return; 1179 } 1180 1181 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1182 switch (opcode) { 1183 case Instruction::SHL_LONG: 1184 case Instruction::SHL_LONG_2ADDR: 1185 op = kOpLsl; 1186 break; 1187 case Instruction::SHR_LONG: 1188 case Instruction::SHR_LONG_2ADDR: 1189 op = kOpAsr; 1190 break; 1191 case Instruction::USHR_LONG: 1192 case Instruction::USHR_LONG_2ADDR: 1193 op = kOpLsr; 1194 break; 1195 default: 1196 LOG(FATAL) << "Unexpected case"; 1197 } 1198 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount); 1199 StoreValueWide(rl_dest, rl_result); 1200} 1201 1202void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1203 RegLocation rl_src1, RegLocation rl_src2) { 1204 if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) { 1205 if (!rl_src2.is_const) { 1206 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); 1207 } 1208 } else { 1209 // Associativity. 1210 if (!rl_src2.is_const) { 1211 DCHECK(rl_src1.is_const); 1212 std::swap(rl_src1, rl_src2); 1213 } 1214 } 1215 DCHECK(rl_src2.is_const); 1216 1217 OpKind op = kOpBkpt; 1218 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 1219 1220 switch (opcode) { 1221 case Instruction::ADD_LONG: 1222 case Instruction::ADD_LONG_2ADDR: 1223 op = kOpAdd; 1224 break; 1225 case Instruction::SUB_LONG: 1226 case Instruction::SUB_LONG_2ADDR: 1227 op = kOpSub; 1228 break; 1229 case Instruction::AND_LONG: 1230 case Instruction::AND_LONG_2ADDR: 1231 op = kOpAnd; 1232 break; 1233 case Instruction::OR_LONG: 1234 case Instruction::OR_LONG_2ADDR: 1235 op = kOpOr; 1236 break; 1237 case Instruction::XOR_LONG: 1238 case Instruction::XOR_LONG_2ADDR: 1239 op = kOpXor; 1240 break; 1241 default: 1242 LOG(FATAL) << "Unexpected opcode"; 1243 } 1244 1245 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1246 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1247 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val); 1248 StoreValueWide(rl_dest, rl_result); 1249} 1250 1251/** 1252 * @brief Split a register list in pairs or registers. 1253 * 1254 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows: 1255 * @code 1256 * int reg1 = -1, reg2 = -1; 1257 * while (reg_mask) { 1258 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1259 * if (UNLIKELY(reg2 < 0)) { 1260 * // Single register in reg1. 1261 * } else { 1262 * // Pair in reg1, reg2. 1263 * } 1264 * } 1265 * @endcode 1266 */ 1267uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) { 1268 // Find first register. 1269 int first_bit_set = __builtin_ctz(reg_mask) + 1; 1270 int reg = *reg1 + first_bit_set; 1271 reg_mask >>= first_bit_set; 1272 1273 if (LIKELY(reg_mask)) { 1274 // Save the first register, find the second and use the pair opcode. 1275 int second_bit_set = __builtin_ctz(reg_mask) + 1; 1276 *reg2 = reg; 1277 reg_mask >>= second_bit_set; 1278 *reg1 = reg + second_bit_set; 1279 return reg_mask; 1280 } 1281 1282 // Use the single opcode, as we just have one register. 1283 *reg1 = reg; 1284 *reg2 = -1; 1285 return reg_mask; 1286} 1287 1288void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) { 1289 int reg1 = -1, reg2 = -1; 1290 const int reg_log2_size = 3; 1291 1292 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1293 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1294 if (UNLIKELY(reg2 < 0)) { 1295 NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1296 } else { 1297 DCHECK_LE(offset, 63); 1298 NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(), 1299 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1300 } 1301 } 1302} 1303 1304void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) { 1305 int reg1 = -1, reg2 = -1; 1306 const int reg_log2_size = 3; 1307 1308 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1309 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1310 if (UNLIKELY(reg2 < 0)) { 1311 NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1312 } else { 1313 NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(), 1314 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1315 } 1316 } 1317} 1318 1319void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) { 1320 int reg1 = -1, reg2 = -1; 1321 const int reg_log2_size = 3; 1322 1323 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1324 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1325 if (UNLIKELY(reg2 < 0)) { 1326 NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1327 } else { 1328 NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(), 1329 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1330 } 1331 } 1332} 1333 1334// TODO(Arm64): consider using ld1 and st1? 1335void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) { 1336 int reg1 = -1, reg2 = -1; 1337 const int reg_log2_size = 3; 1338 1339 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1340 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1341 if (UNLIKELY(reg2 < 0)) { 1342 NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1343 } else { 1344 NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(), 1345 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1346 } 1347 } 1348} 1349 1350bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) { 1351 ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0); 1352 RegLocation rl_src_i = info->args[0]; 1353 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); // result reg 1354 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1355 RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg); 1356 NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg()); 1357 (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result); 1358 return true; 1359} 1360 1361} // namespace art 1362