int_arm64.cc revision 69dfe51b684dd9d510dbcb63295fe180f998efde
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Thumb2 ISA. */ 18 19#include "arm64_lir.h" 20#include "codegen_arm64.h" 21#include "dex/quick/mir_to_lir-inl.h" 22#include "dex/reg_storage_eq.h" 23#include "entrypoints/quick/quick_entrypoints.h" 24#include "mirror/array.h" 25 26namespace art { 27 28LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) { 29 OpRegReg(kOpCmp, src1, src2); 30 return OpCondBranch(cond, target); 31} 32 33LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) { 34 LOG(FATAL) << "Unexpected use of OpIT for Arm64"; 35 return NULL; 36} 37 38void Arm64Mir2Lir::OpEndIT(LIR* it) { 39 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64"; 40} 41 42/* 43 * 64-bit 3way compare function. 44 * cmp xA, xB 45 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1 46 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC 47 */ 48void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, 49 RegLocation rl_src2) { 50 RegLocation rl_result; 51 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 52 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 53 rl_result = EvalLoc(rl_dest, kCoreReg, true); 54 55 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 56 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq); 57 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), 58 rl_result.reg.GetReg(), kArmCondGe); 59 StoreValue(rl_dest, rl_result); 60} 61 62void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 63 RegLocation rl_src1, RegLocation rl_shift) { 64 OpKind op = kOpBkpt; 65 switch (opcode) { 66 case Instruction::SHL_LONG: 67 case Instruction::SHL_LONG_2ADDR: 68 op = kOpLsl; 69 break; 70 case Instruction::SHR_LONG: 71 case Instruction::SHR_LONG_2ADDR: 72 op = kOpAsr; 73 break; 74 case Instruction::USHR_LONG: 75 case Instruction::USHR_LONG_2ADDR: 76 op = kOpLsr; 77 break; 78 default: 79 LOG(FATAL) << "Unexpected case: " << opcode; 80 } 81 rl_shift = LoadValue(rl_shift, kCoreReg); 82 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 83 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 84 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg)); 85 StoreValueWide(rl_dest, rl_result); 86} 87 88void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { 89 RegLocation rl_result; 90 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 91 RegLocation rl_dest = mir_graph_->GetDest(mir); 92 RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg; 93 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg; 94 95 rl_src = LoadValue(rl_src, src_reg_class); 96 // rl_src may be aliased with rl_result/rl_dest, so do compare early. 97 OpRegImm(kOpCmp, rl_src.reg, 0); 98 99 ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode); 100 101 // The kMirOpSelect has two variants, one for constants and one for moves. 102 bool is_wide = rl_dest.ref || rl_dest.wide; 103 104 if (mir->ssa_rep->num_uses == 1) { 105 uint32_t true_val = mir->dalvikInsn.vB; 106 uint32_t false_val = mir->dalvikInsn.vC; 107 108 int opcode; // The opcode. 109 int left_op, right_op; // The operands. 110 bool rl_result_evaled = false; 111 112 // Check some simple cases. 113 // TODO: Improve this. 114 int zero_reg = (is_wide ? rs_xzr : rs_wzr).GetReg(); 115 116 if ((true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0)) { 117 // CSInc cheap based on wzr. 118 if (true_val == 1) { 119 // Negate. 120 code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); 121 } 122 123 left_op = right_op = zero_reg; 124 opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc; 125 } else if ((true_val == 0 && false_val == 0xFFFFFFFF) || 126 (true_val == 0xFFFFFFFF && false_val == 0)) { 127 // CSneg cheap based on wzr. 128 if (true_val == 0xFFFFFFFF) { 129 // Negate. 130 code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); 131 } 132 133 left_op = right_op = zero_reg; 134 opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc; 135 } else if (true_val == 0 || false_val == 0) { 136 // Csel half cheap based on wzr. 137 rl_result = EvalLoc(rl_dest, result_reg_class, true); 138 rl_result_evaled = true; 139 if (false_val == 0) { 140 // Negate. 141 code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); 142 } 143 LoadConstantNoClobber(rl_result.reg, true_val == 0 ? false_val : true_val); 144 left_op = zero_reg; 145 right_op = rl_result.reg.GetReg(); 146 opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc; 147 } else if (true_val == 1 || false_val == 1) { 148 // CSInc half cheap based on wzr. 149 rl_result = EvalLoc(rl_dest, result_reg_class, true); 150 rl_result_evaled = true; 151 if (true_val == 1) { 152 // Negate. 153 code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); 154 } 155 LoadConstantNoClobber(rl_result.reg, true_val == 1 ? false_val : true_val); 156 left_op = rl_result.reg.GetReg(); 157 right_op = zero_reg; 158 opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc; 159 } else if (true_val == 0xFFFFFFFF || false_val == 0xFFFFFFFF) { 160 // CSneg half cheap based on wzr. 161 rl_result = EvalLoc(rl_dest, result_reg_class, true); 162 rl_result_evaled = true; 163 if (true_val == 0xFFFFFFFF) { 164 // Negate. 165 code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); 166 } 167 LoadConstantNoClobber(rl_result.reg, true_val == 0xFFFFFFFF ? false_val : true_val); 168 left_op = rl_result.reg.GetReg(); 169 right_op = zero_reg; 170 opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc; 171 } else if ((true_val + 1 == false_val) || (false_val + 1 == true_val)) { 172 // Load a constant and use CSinc. Use rl_result. 173 if (false_val + 1 == true_val) { 174 // Negate. 175 code = ArmConditionEncoding(NegateComparison(mir->meta.ccode)); 176 true_val = false_val; 177 } 178 179 rl_result = EvalLoc(rl_dest, result_reg_class, true); 180 rl_result_evaled = true; 181 LoadConstantNoClobber(rl_result.reg, true_val); 182 left_op = right_op = rl_result.reg.GetReg(); 183 opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc; 184 } else { 185 // Csel. The rest. Use rl_result and a temp. 186 // TODO: To minimize the constants being loaded, check whether one can be inexpensively 187 // loaded as n - 1 or ~n. 188 rl_result = EvalLoc(rl_dest, result_reg_class, true); 189 rl_result_evaled = true; 190 LoadConstantNoClobber(rl_result.reg, true_val); 191 RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class); 192 if (rl_dest.wide) { 193 if (t_reg2.Is32Bit()) { 194 t_reg2 = As64BitReg(t_reg2); 195 } 196 } 197 LoadConstantNoClobber(t_reg2, false_val); 198 199 // Use csel. 200 left_op = rl_result.reg.GetReg(); 201 right_op = t_reg2.GetReg(); 202 opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc; 203 } 204 205 if (!rl_result_evaled) { 206 rl_result = EvalLoc(rl_dest, result_reg_class, true); 207 } 208 209 NewLIR4(opcode, rl_result.reg.GetReg(), left_op, right_op, code); 210 } else { 211 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]]; 212 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]]; 213 214 rl_true = LoadValue(rl_true, result_reg_class); 215 rl_false = LoadValue(rl_false, result_reg_class); 216 rl_result = EvalLoc(rl_dest, result_reg_class, true); 217 218 int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc; 219 NewLIR4(opcode, rl_result.reg.GetReg(), 220 rl_true.reg.GetReg(), rl_false.reg.GetReg(), code); 221 } 222 StoreValue(rl_dest, rl_result); 223} 224 225void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { 226 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0); 227 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2); 228 LIR* taken = &block_label_list_[bb->taken]; 229 LIR* not_taken = &block_label_list_[bb->fall_through]; 230 // Normalize such that if either operand is constant, src2 will be constant. 231 ConditionCode ccode = mir->meta.ccode; 232 if (rl_src1.is_const) { 233 std::swap(rl_src1, rl_src2); 234 ccode = FlipComparisonOrder(ccode); 235 } 236 237 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 238 239 if (rl_src2.is_const) { 240 // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.) 241 242 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 243 // Special handling using cbz & cbnz. 244 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) { 245 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken); 246 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken); 247 return; 248 } 249 250 // Only handle Imm if src2 is not already in a register. 251 rl_src2 = UpdateLocWide(rl_src2); 252 if (rl_src2.location != kLocPhysReg) { 253 OpRegImm64(kOpCmp, rl_src1.reg, val); 254 OpCondBranch(ccode, taken); 255 OpCondBranch(NegateComparison(ccode), not_taken); 256 return; 257 } 258 } 259 260 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 261 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 262 OpCondBranch(ccode, taken); 263 OpCondBranch(NegateComparison(ccode), not_taken); 264} 265 266/* 267 * Generate a register comparison to an immediate and branch. Caller 268 * is responsible for setting branch target field. 269 */ 270LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 271 LIR* target) { 272 LIR* branch = nullptr; 273 ArmConditionCode arm_cond = ArmConditionEncoding(cond); 274 if (check_value == 0) { 275 if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) { 276 ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt; 277 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0); 278 branch = NewLIR2(opcode | wide, reg.GetReg(), 0); 279 } else if (arm_cond == kArmCondLs) { 280 // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz. 281 // This case happens for a bounds check of array[0]. 282 ArmOpcode opcode = kA64Cbz2rt; 283 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0); 284 branch = NewLIR2(opcode | wide, reg.GetReg(), 0); 285 } 286 } 287 288 if (branch == nullptr) { 289 OpRegImm(kOpCmp, reg, check_value); 290 branch = NewLIR2(kA64B2ct, arm_cond, 0); 291 } 292 293 branch->target = target; 294 return branch; 295} 296 297LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, 298 RegStorage base_reg, int offset, int check_value, 299 LIR* target, LIR** compare) { 300 DCHECK(compare == nullptr); 301 // It is possible that temp register is 64-bit. (ArgReg or RefReg) 302 // Always compare 32-bit value no matter what temp_reg is. 303 if (temp_reg.Is64Bit()) { 304 temp_reg = As32BitReg(temp_reg); 305 } 306 Load32Disp(base_reg, offset, temp_reg); 307 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); 308 return branch; 309} 310 311LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { 312 bool dest_is_fp = r_dest.IsFloat(); 313 bool src_is_fp = r_src.IsFloat(); 314 ArmOpcode opcode = kA64Brk1d; 315 LIR* res; 316 317 if (LIKELY(dest_is_fp == src_is_fp)) { 318 if (LIKELY(!dest_is_fp)) { 319 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit()); 320 321 // Core/core copy. 322 // Copies involving the sp register require a different instruction. 323 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr; 324 325 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction. 326 // This currently works because the other arguments are set to 0 by default. We should 327 // rather introduce an alias kA64Mov2RR. 328 329 // core/core copy. Do a x/x copy only if both registers are x. 330 if (r_dest.Is64Bit() && r_src.Is64Bit()) { 331 opcode = WIDE(opcode); 332 } 333 } else { 334 // Float/float copy. 335 bool dest_is_double = r_dest.IsDouble(); 336 bool src_is_double = r_src.IsDouble(); 337 338 // We do not do float/double or double/float casts here. 339 DCHECK_EQ(dest_is_double, src_is_double); 340 341 // Homogeneous float/float copy. 342 opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff; 343 } 344 } else { 345 // Inhomogeneous register copy. 346 if (dest_is_fp) { 347 if (r_dest.IsDouble()) { 348 opcode = kA64Fmov2Sx; 349 } else { 350 r_src = Check32BitReg(r_src); 351 opcode = kA64Fmov2sw; 352 } 353 } else { 354 if (r_src.IsDouble()) { 355 opcode = kA64Fmov2xS; 356 } else { 357 r_dest = Check32BitReg(r_dest); 358 opcode = kA64Fmov2ws; 359 } 360 } 361 } 362 363 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); 364 365 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { 366 res->flags.is_nop = true; 367 } 368 369 return res; 370} 371 372void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { 373 if (r_dest != r_src) { 374 LIR* res = OpRegCopyNoInsert(r_dest, r_src); 375 AppendLIR(res); 376 } 377} 378 379void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) { 380 OpRegCopy(r_dest, r_src); 381} 382 383// Table of magic divisors 384struct MagicTable { 385 int magic64_base; 386 int magic64_eor; 387 uint64_t magic64; 388 uint32_t magic32; 389 uint32_t shift; 390 DividePattern pattern; 391}; 392 393static const MagicTable magic_table[] = { 394 { 0, 0, 0, 0, 0, DivideNone}, // 0 395 { 0, 0, 0, 0, 0, DivideNone}, // 1 396 { 0, 0, 0, 0, 0, DivideNone}, // 2 397 {0x3c, -1, 0x5555555555555556, 0x55555556, 0, Divide3}, // 3 398 { 0, 0, 0, 0, 0, DivideNone}, // 4 399 {0xf9, -1, 0x6666666666666667, 0x66666667, 1, Divide5}, // 5 400 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3}, // 6 401 { -1, -1, 0x924924924924924A, 0x92492493, 2, Divide7}, // 7 402 { 0, 0, 0, 0, 0, DivideNone}, // 8 403 { -1, -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5}, // 9 404 {0xf9, -1, 0x6666666666666667, 0x66666667, 2, Divide5}, // 10 405 { -1, -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5}, // 11 406 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5}, // 12 407 { -1, -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5}, // 13 408 { -1, -1, 0x924924924924924A, 0x92492493, 3, Divide7}, // 14 409 {0x78, -1, 0x8888888888888889, 0x88888889, 3, Divide7}, // 15 410}; 411 412// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4) 413bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 414 RegLocation rl_src, RegLocation rl_dest, int lit) { 415 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) { 416 return false; 417 } 418 DividePattern pattern = magic_table[lit].pattern; 419 if (pattern == DivideNone) { 420 return false; 421 } 422 // Tuning: add rem patterns 423 if (!is_div) { 424 return false; 425 } 426 427 RegStorage r_magic = AllocTemp(); 428 LoadConstant(r_magic, magic_table[lit].magic32); 429 rl_src = LoadValue(rl_src, kCoreReg); 430 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 431 RegStorage r_long_mul = AllocTemp(); 432 NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(), 433 r_magic.GetReg(), rl_src.reg.GetReg(), rxzr); 434 switch (pattern) { 435 case Divide3: 436 OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32); 437 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31)); 438 break; 439 case Divide5: 440 OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 441 32 + magic_table[lit].shift); 442 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31)); 443 break; 444 case Divide7: 445 OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg), 446 As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32)); 447 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift); 448 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31)); 449 break; 450 default: 451 LOG(FATAL) << "Unexpected pattern: " << pattern; 452 } 453 StoreValue(rl_dest, rl_result); 454 return true; 455} 456 457bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, 458 RegLocation rl_src, RegLocation rl_dest, int64_t lit) { 459 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) { 460 return false; 461 } 462 DividePattern pattern = magic_table[lit].pattern; 463 if (pattern == DivideNone) { 464 return false; 465 } 466 // Tuning: add rem patterns 467 if (!is_div) { 468 return false; 469 } 470 471 RegStorage r_magic = AllocTempWide(); 472 rl_src = LoadValueWide(rl_src, kCoreReg); 473 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 474 RegStorage r_long_mul = AllocTempWide(); 475 476 if (magic_table[lit].magic64_base >= 0) { 477 // Check that the entry in the table is correct. 478 if (kIsDebugBuild) { 479 uint64_t reconstructed_imm; 480 uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base); 481 if (magic_table[lit].magic64_eor >= 0) { 482 uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor); 483 reconstructed_imm = base ^ eor; 484 } else { 485 reconstructed_imm = base + 1; 486 } 487 DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit; 488 } 489 490 // Load the magic constant in two instructions. 491 NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base); 492 if (magic_table[lit].magic64_eor >= 0) { 493 NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(), 494 magic_table[lit].magic64_eor); 495 } else { 496 NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0); 497 } 498 } else { 499 LoadConstantWide(r_magic, magic_table[lit].magic64); 500 } 501 502 NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg()); 503 switch (pattern) { 504 case Divide3: 505 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63)); 506 break; 507 case Divide5: 508 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift); 509 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63)); 510 break; 511 case Divide7: 512 OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul); 513 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift); 514 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63)); 515 break; 516 default: 517 LOG(FATAL) << "Unexpected pattern: " << pattern; 518 } 519 StoreValueWide(rl_dest, rl_result); 520 return true; 521} 522 523// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 524// and store the result in 'rl_dest'. 525bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 526 RegLocation rl_src, RegLocation rl_dest, int lit) { 527 return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit)); 528} 529 530// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 531// and store the result in 'rl_dest'. 532bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div, 533 RegLocation rl_src, RegLocation rl_dest, int64_t lit) { 534 const bool is_64bit = rl_dest.wide; 535 const int nbits = (is_64bit) ? 64 : 32; 536 537 if (lit < 2) { 538 return false; 539 } 540 if (!IsPowerOfTwo(lit)) { 541 if (is_64bit) { 542 return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit); 543 } else { 544 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit)); 545 } 546 } 547 int k = LowestSetBit(lit); 548 if (k >= nbits - 2) { 549 // Avoid special cases. 550 return false; 551 } 552 553 RegLocation rl_result; 554 RegStorage t_reg; 555 if (is_64bit) { 556 rl_src = LoadValueWide(rl_src, kCoreReg); 557 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 558 t_reg = AllocTempWide(); 559 } else { 560 rl_src = LoadValue(rl_src, kCoreReg); 561 rl_result = EvalLoc(rl_dest, kCoreReg, true); 562 t_reg = AllocTemp(); 563 } 564 565 int shift = EncodeShift(kA64Lsr, nbits - k); 566 if (is_div) { 567 if (lit == 2) { 568 // Division by 2 is by far the most common division by constant. 569 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift); 570 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 571 } else { 572 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1); 573 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift); 574 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 575 } 576 } else { 577 if (lit == 2) { 578 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift); 579 OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1); 580 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift); 581 } else { 582 RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp(); 583 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1); 584 OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift); 585 OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1); 586 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift); 587 } 588 } 589 590 if (is_64bit) { 591 StoreValueWide(rl_dest, rl_result); 592 } else { 593 StoreValue(rl_dest, rl_result); 594 } 595 return true; 596} 597 598bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 599 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64"; 600 return false; 601} 602 603RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) { 604 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64"; 605 return rl_dest; 606} 607 608RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) { 609 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 610 611 // Put the literal in a temp. 612 RegStorage lit_temp = AllocTemp(); 613 LoadConstant(lit_temp, lit); 614 // Use the generic case for div/rem with arg2 in a register. 615 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure. 616 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div); 617 FreeTemp(lit_temp); 618 619 return rl_result; 620} 621 622RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 623 RegLocation rl_src2, bool is_div, bool check_zero) { 624 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64"; 625 return rl_dest; 626} 627 628RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2, 629 bool is_div) { 630 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit()); 631 632 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 633 if (is_div) { 634 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2); 635 } else { 636 // temp = r_src1 / r_src2 637 // dest = r_src1 - temp * r_src2 638 RegStorage temp; 639 ArmOpcode wide; 640 if (rl_result.reg.Is64Bit()) { 641 temp = AllocTempWide(); 642 wide = WIDE(0); 643 } else { 644 temp = AllocTemp(); 645 wide = UNWIDE(0); 646 } 647 OpRegRegReg(kOpDiv, temp, r_src1, r_src2); 648 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(), 649 r_src1.GetReg(), r_src2.GetReg()); 650 FreeTemp(temp); 651 } 652 return rl_result; 653} 654 655bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) { 656 RegLocation rl_src = info->args[0]; 657 rl_src = LoadValueWide(rl_src, kCoreReg); 658 RegLocation rl_dest = InlineTargetWide(info); 659 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 660 RegStorage sign_reg = AllocTempWide(); 661 // abs(x) = y<=x>>63, (x+y)^y. 662 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63); 663 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg); 664 OpRegReg(kOpXor, rl_result.reg, sign_reg); 665 StoreValueWide(rl_dest, rl_result); 666 return true; 667} 668 669bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) { 670 DCHECK_EQ(cu_->instruction_set, kArm64); 671 RegLocation rl_src1 = info->args[0]; 672 RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1]; 673 rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg); 674 rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg); 675 RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info); 676 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 677 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 678 NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(), 679 rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt); 680 (is_long) ? StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result); 681 return true; 682} 683 684bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { 685 RegLocation rl_src_address = info->args[0]; // long address 686 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); 687 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg); 688 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 689 690 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile); 691 if (size == k64) { 692 StoreValueWide(rl_dest, rl_result); 693 } else { 694 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 695 StoreValue(rl_dest, rl_result); 696 } 697 return true; 698} 699 700bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { 701 RegLocation rl_src_address = info->args[0]; // long address 702 RegLocation rl_src_value = info->args[2]; // [size] value 703 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg); 704 705 RegLocation rl_value; 706 if (size == k64) { 707 rl_value = LoadValueWide(rl_src_value, kCoreReg); 708 } else { 709 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 710 rl_value = LoadValue(rl_src_value, kCoreReg); 711 } 712 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile); 713 return true; 714} 715 716void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) { 717 LOG(FATAL) << "Unexpected use of OpLea for Arm64"; 718} 719 720void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) { 721 UNIMPLEMENTED(FATAL) << "Should not be used."; 722} 723 724void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) { 725 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64"; 726} 727 728bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { 729 DCHECK_EQ(cu_->instruction_set, kArm64); 730 // Unused - RegLocation rl_src_unsafe = info->args[0]; 731 RegLocation rl_src_obj = info->args[1]; // Object - known non-null 732 RegLocation rl_src_offset = info->args[2]; // long low 733 RegLocation rl_src_expected = info->args[4]; // int, long or Object 734 // If is_long, high half is in info->args[5] 735 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object 736 // If is_long, high half is in info->args[7] 737 RegLocation rl_dest = InlineTarget(info); // boolean place for result 738 739 // Load Object and offset 740 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); 741 RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg); 742 743 RegLocation rl_new_value; 744 RegLocation rl_expected; 745 if (is_long) { 746 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg); 747 rl_expected = LoadValueWide(rl_src_expected, kCoreReg); 748 } else { 749 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg); 750 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg); 751 } 752 753 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) { 754 // Mark card for object assuming new value is stored. 755 MarkGCCard(rl_new_value.reg, rl_object.reg); 756 } 757 758 RegStorage r_ptr = AllocTempRef(); 759 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg); 760 761 // Free now unneeded rl_object and rl_offset to give more temps. 762 ClobberSReg(rl_object.s_reg_low); 763 FreeTemp(rl_object.reg); 764 ClobberSReg(rl_offset.s_reg_low); 765 FreeTemp(rl_offset.reg); 766 767 // do { 768 // tmp = [r_ptr] - expected; 769 // } while (tmp == 0 && failure([r_ptr] <- r_new_value)); 770 // result = tmp != 0; 771 772 RegStorage r_tmp; 773 RegStorage r_tmp_stored; 774 RegStorage rl_new_value_stored = rl_new_value.reg; 775 ArmOpcode wide = UNWIDE(0); 776 if (is_long) { 777 r_tmp_stored = r_tmp = AllocTempWide(); 778 wide = WIDE(0); 779 } else if (is_object) { 780 // References use 64-bit registers, but are stored as compressed 32-bit values. 781 // This means r_tmp_stored != r_tmp. 782 r_tmp = AllocTempRef(); 783 r_tmp_stored = As32BitReg(r_tmp); 784 rl_new_value_stored = As32BitReg(rl_new_value_stored); 785 } else { 786 r_tmp_stored = r_tmp = AllocTemp(); 787 } 788 789 RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp); 790 LIR* loop = NewLIR0(kPseudoTargetLabel); 791 NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg()); 792 OpRegReg(kOpCmp, r_tmp, rl_expected.reg); 793 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 794 LIR* early_exit = OpCondBranch(kCondNe, NULL); 795 NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg()); 796 NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT); 797 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 798 OpCondBranch(kCondNe, loop); 799 800 LIR* exit_loop = NewLIR0(kPseudoTargetLabel); 801 early_exit->target = exit_loop; 802 803 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 804 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe); 805 806 FreeTemp(r_tmp); // Now unneeded. 807 FreeTemp(r_ptr); // Now unneeded. 808 809 StoreValue(rl_dest, rl_result); 810 811 return true; 812} 813 814LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) { 815 return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target); 816} 817 818LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) { 819 LOG(FATAL) << "Unexpected use of OpVldm for Arm64"; 820 return NULL; 821} 822 823LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) { 824 LOG(FATAL) << "Unexpected use of OpVstm for Arm64"; 825 return NULL; 826} 827 828void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, 829 RegLocation rl_result, int lit, 830 int first_bit, int second_bit) { 831 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit)); 832 if (first_bit != 0) { 833 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit); 834 } 835} 836 837void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) { 838 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64"; 839} 840 841// Test suspend flag, return target of taken suspend branch 842LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) { 843 NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1); 844 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target); 845} 846 847// Decrement register and branch on condition 848LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) { 849 // Combine sub & test using sub setflags encoding here. We need to make sure a 850 // subtract form that sets carry is used, so generate explicitly. 851 // TODO: might be best to add a new op, kOpSubs, and handle it generically. 852 ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd); 853 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags. 854 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 855 return OpCondBranch(c_code, target); 856} 857 858bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { 859#if ANDROID_SMP != 0 860 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one. 861 LIR* barrier = last_lir_insn_; 862 863 int dmb_flavor; 864 // TODO: revisit Arm barrier kinds 865 switch (barrier_kind) { 866 case kAnyStore: dmb_flavor = kISH; break; 867 case kLoadAny: dmb_flavor = kISH; break; 868 // We conjecture that kISHLD is insufficient. It is documented 869 // to provide LoadLoad | StoreStore ordering. But if this were used 870 // to implement volatile loads, we suspect that the lack of store 871 // atomicity on ARM would cause us to allow incorrect results for 872 // the canonical IRIW example. But we're not sure. 873 // We should be using acquire loads instead. 874 case kStoreStore: dmb_flavor = kISHST; break; 875 case kAnyAny: dmb_flavor = kISH; break; 876 default: 877 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind; 878 dmb_flavor = kSY; // quiet gcc. 879 break; 880 } 881 882 bool ret = false; 883 884 // If the same barrier already exists, don't generate another. 885 if (barrier == nullptr 886 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) { 887 barrier = NewLIR1(kA64Dmb1B, dmb_flavor); 888 ret = true; 889 } 890 891 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well. 892 DCHECK(!barrier->flags.use_def_invalid); 893 barrier->u.m.def_mask = &kEncodeAll; 894 return ret; 895#else 896 return false; 897#endif 898} 899 900void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 901 RegLocation rl_result; 902 903 rl_src = LoadValue(rl_src, kCoreReg); 904 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 905 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31); 906 StoreValueWide(rl_dest, rl_result); 907} 908 909void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, 910 RegLocation rl_src1, RegLocation rl_src2, bool is_div) { 911 if (rl_src2.is_const) { 912 DCHECK(rl_src2.wide); 913 int64_t lit = mir_graph_->ConstantValueWide(rl_src2); 914 if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) { 915 return; 916 } 917 } 918 919 RegLocation rl_result; 920 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 921 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 922 GenDivZeroCheck(rl_src2.reg); 923 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div); 924 StoreValueWide(rl_dest, rl_result); 925} 926 927void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, 928 RegLocation rl_src2) { 929 RegLocation rl_result; 930 931 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 932 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 933 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 934 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT); 935 StoreValueWide(rl_dest, rl_result); 936} 937 938void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { 939 RegLocation rl_result; 940 941 rl_src = LoadValueWide(rl_src, kCoreReg); 942 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 943 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT); 944 StoreValueWide(rl_dest, rl_result); 945} 946 947void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) { 948 RegLocation rl_result; 949 950 rl_src = LoadValueWide(rl_src, kCoreReg); 951 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 952 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT); 953 StoreValueWide(rl_dest, rl_result); 954} 955 956void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest, 957 RegLocation rl_src1, RegLocation rl_src2) { 958 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2); 959} 960 961void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 962 RegLocation rl_src2) { 963 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2); 964} 965 966void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 967 RegLocation rl_src2) { 968 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2); 969} 970 971void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 972 RegLocation rl_src2) { 973 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2); 974} 975 976void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 977 RegLocation rl_src2) { 978 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2); 979} 980 981void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, 982 RegLocation rl_src2) { 983 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2); 984} 985 986/* 987 * Generate array load 988 */ 989void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 990 RegLocation rl_index, RegLocation rl_dest, int scale) { 991 RegisterClass reg_class = RegClassBySize(size); 992 int len_offset = mirror::Array::LengthOffset().Int32Value(); 993 int data_offset; 994 RegLocation rl_result; 995 bool constant_index = rl_index.is_const; 996 rl_array = LoadValue(rl_array, kRefReg); 997 if (!constant_index) { 998 rl_index = LoadValue(rl_index, kCoreReg); 999 } 1000 1001 if (rl_dest.wide) { 1002 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 1003 } else { 1004 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 1005 } 1006 1007 // If index is constant, just fold it into the data offset 1008 if (constant_index) { 1009 data_offset += mir_graph_->ConstantValue(rl_index) << scale; 1010 } 1011 1012 /* null object? */ 1013 GenNullCheck(rl_array.reg, opt_flags); 1014 1015 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); 1016 RegStorage reg_len; 1017 if (needs_range_check) { 1018 reg_len = AllocTemp(); 1019 /* Get len */ 1020 Load32Disp(rl_array.reg, len_offset, reg_len); 1021 MarkPossibleNullPointerException(opt_flags); 1022 } else { 1023 ForceImplicitNullCheck(rl_array.reg, opt_flags); 1024 } 1025 if (rl_dest.wide || rl_dest.fp || constant_index) { 1026 RegStorage reg_ptr; 1027 if (constant_index) { 1028 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case. 1029 } else { 1030 // No special indexed operation, lea + load w/ displacement 1031 reg_ptr = AllocTempRef(); 1032 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), 1033 EncodeShift(kA64Lsl, scale)); 1034 FreeTemp(rl_index.reg); 1035 } 1036 rl_result = EvalLoc(rl_dest, reg_class, true); 1037 1038 if (needs_range_check) { 1039 if (constant_index) { 1040 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); 1041 } else { 1042 GenArrayBoundsCheck(rl_index.reg, reg_len); 1043 } 1044 FreeTemp(reg_len); 1045 } 1046 if (rl_result.ref) { 1047 LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile); 1048 } else { 1049 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile); 1050 } 1051 MarkPossibleNullPointerException(opt_flags); 1052 if (!constant_index) { 1053 FreeTemp(reg_ptr); 1054 } 1055 if (rl_dest.wide) { 1056 StoreValueWide(rl_dest, rl_result); 1057 } else { 1058 StoreValue(rl_dest, rl_result); 1059 } 1060 } else { 1061 // Offset base, then use indexed load 1062 RegStorage reg_ptr = AllocTempRef(); 1063 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); 1064 FreeTemp(rl_array.reg); 1065 rl_result = EvalLoc(rl_dest, reg_class, true); 1066 1067 if (needs_range_check) { 1068 GenArrayBoundsCheck(rl_index.reg, reg_len); 1069 FreeTemp(reg_len); 1070 } 1071 if (rl_result.ref) { 1072 LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale); 1073 } else { 1074 LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size); 1075 } 1076 MarkPossibleNullPointerException(opt_flags); 1077 FreeTemp(reg_ptr); 1078 StoreValue(rl_dest, rl_result); 1079 } 1080} 1081 1082/* 1083 * Generate array store 1084 * 1085 */ 1086void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1087 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) { 1088 RegisterClass reg_class = RegClassBySize(size); 1089 int len_offset = mirror::Array::LengthOffset().Int32Value(); 1090 bool constant_index = rl_index.is_const; 1091 1092 int data_offset; 1093 if (size == k64 || size == kDouble) { 1094 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 1095 } else { 1096 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 1097 } 1098 1099 // If index is constant, just fold it into the data offset. 1100 if (constant_index) { 1101 data_offset += mir_graph_->ConstantValue(rl_index) << scale; 1102 } 1103 1104 rl_array = LoadValue(rl_array, kRefReg); 1105 if (!constant_index) { 1106 rl_index = LoadValue(rl_index, kCoreReg); 1107 } 1108 1109 RegStorage reg_ptr; 1110 bool allocated_reg_ptr_temp = false; 1111 if (constant_index) { 1112 reg_ptr = rl_array.reg; 1113 } else if (IsTemp(rl_array.reg) && !card_mark) { 1114 Clobber(rl_array.reg); 1115 reg_ptr = rl_array.reg; 1116 } else { 1117 allocated_reg_ptr_temp = true; 1118 reg_ptr = AllocTempRef(); 1119 } 1120 1121 /* null object? */ 1122 GenNullCheck(rl_array.reg, opt_flags); 1123 1124 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); 1125 RegStorage reg_len; 1126 if (needs_range_check) { 1127 reg_len = AllocTemp(); 1128 // NOTE: max live temps(4) here. 1129 /* Get len */ 1130 Load32Disp(rl_array.reg, len_offset, reg_len); 1131 MarkPossibleNullPointerException(opt_flags); 1132 } else { 1133 ForceImplicitNullCheck(rl_array.reg, opt_flags); 1134 } 1135 /* at this point, reg_ptr points to array, 2 live temps */ 1136 if (rl_src.wide || rl_src.fp || constant_index) { 1137 if (rl_src.wide) { 1138 rl_src = LoadValueWide(rl_src, reg_class); 1139 } else { 1140 rl_src = LoadValue(rl_src, reg_class); 1141 } 1142 if (!constant_index) { 1143 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg), 1144 EncodeShift(kA64Lsl, scale)); 1145 } 1146 if (needs_range_check) { 1147 if (constant_index) { 1148 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len); 1149 } else { 1150 GenArrayBoundsCheck(rl_index.reg, reg_len); 1151 } 1152 FreeTemp(reg_len); 1153 } 1154 if (rl_src.ref) { 1155 StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile); 1156 } else { 1157 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile); 1158 } 1159 MarkPossibleNullPointerException(opt_flags); 1160 } else { 1161 /* reg_ptr -> array data */ 1162 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); 1163 rl_src = LoadValue(rl_src, reg_class); 1164 if (needs_range_check) { 1165 GenArrayBoundsCheck(rl_index.reg, reg_len); 1166 FreeTemp(reg_len); 1167 } 1168 if (rl_src.ref) { 1169 StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale); 1170 } else { 1171 StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size); 1172 } 1173 MarkPossibleNullPointerException(opt_flags); 1174 } 1175 if (allocated_reg_ptr_temp) { 1176 FreeTemp(reg_ptr); 1177 } 1178 if (card_mark) { 1179 MarkGCCard(rl_src.reg, rl_array.reg); 1180 } 1181} 1182 1183void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, 1184 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) { 1185 OpKind op = kOpBkpt; 1186 // Per spec, we only care about low 6 bits of shift amount. 1187 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f; 1188 rl_src = LoadValueWide(rl_src, kCoreReg); 1189 if (shift_amount == 0) { 1190 StoreValueWide(rl_dest, rl_src); 1191 return; 1192 } 1193 1194 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1195 switch (opcode) { 1196 case Instruction::SHL_LONG: 1197 case Instruction::SHL_LONG_2ADDR: 1198 op = kOpLsl; 1199 break; 1200 case Instruction::SHR_LONG: 1201 case Instruction::SHR_LONG_2ADDR: 1202 op = kOpAsr; 1203 break; 1204 case Instruction::USHR_LONG: 1205 case Instruction::USHR_LONG_2ADDR: 1206 op = kOpLsr; 1207 break; 1208 default: 1209 LOG(FATAL) << "Unexpected case"; 1210 } 1211 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount); 1212 StoreValueWide(rl_dest, rl_result); 1213} 1214 1215void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1216 RegLocation rl_src1, RegLocation rl_src2) { 1217 if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) { 1218 if (!rl_src2.is_const) { 1219 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); 1220 } 1221 } else { 1222 // Associativity. 1223 if (!rl_src2.is_const) { 1224 DCHECK(rl_src1.is_const); 1225 std::swap(rl_src1, rl_src2); 1226 } 1227 } 1228 DCHECK(rl_src2.is_const); 1229 1230 OpKind op = kOpBkpt; 1231 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 1232 1233 switch (opcode) { 1234 case Instruction::ADD_LONG: 1235 case Instruction::ADD_LONG_2ADDR: 1236 op = kOpAdd; 1237 break; 1238 case Instruction::SUB_LONG: 1239 case Instruction::SUB_LONG_2ADDR: 1240 op = kOpSub; 1241 break; 1242 case Instruction::AND_LONG: 1243 case Instruction::AND_LONG_2ADDR: 1244 op = kOpAnd; 1245 break; 1246 case Instruction::OR_LONG: 1247 case Instruction::OR_LONG_2ADDR: 1248 op = kOpOr; 1249 break; 1250 case Instruction::XOR_LONG: 1251 case Instruction::XOR_LONG_2ADDR: 1252 op = kOpXor; 1253 break; 1254 default: 1255 LOG(FATAL) << "Unexpected opcode"; 1256 } 1257 1258 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1259 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1260 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val); 1261 StoreValueWide(rl_dest, rl_result); 1262} 1263 1264/** 1265 * @brief Split a register list in pairs or registers. 1266 * 1267 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows: 1268 * @code 1269 * int reg1 = -1, reg2 = -1; 1270 * while (reg_mask) { 1271 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1272 * if (UNLIKELY(reg2 < 0)) { 1273 * // Single register in reg1. 1274 * } else { 1275 * // Pair in reg1, reg2. 1276 * } 1277 * } 1278 * @endcode 1279 */ 1280uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) { 1281 // Find first register. 1282 int first_bit_set = __builtin_ctz(reg_mask) + 1; 1283 int reg = *reg1 + first_bit_set; 1284 reg_mask >>= first_bit_set; 1285 1286 if (LIKELY(reg_mask)) { 1287 // Save the first register, find the second and use the pair opcode. 1288 int second_bit_set = __builtin_ctz(reg_mask) + 1; 1289 *reg2 = reg; 1290 reg_mask >>= second_bit_set; 1291 *reg1 = reg + second_bit_set; 1292 return reg_mask; 1293 } 1294 1295 // Use the single opcode, as we just have one register. 1296 *reg1 = reg; 1297 *reg2 = -1; 1298 return reg_mask; 1299} 1300 1301void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) { 1302 int reg1 = -1, reg2 = -1; 1303 const int reg_log2_size = 3; 1304 1305 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1306 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1307 if (UNLIKELY(reg2 < 0)) { 1308 NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1309 } else { 1310 DCHECK_LE(offset, 63); 1311 NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(), 1312 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1313 } 1314 } 1315} 1316 1317void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) { 1318 int reg1 = -1, reg2 = -1; 1319 const int reg_log2_size = 3; 1320 1321 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1322 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1323 if (UNLIKELY(reg2 < 0)) { 1324 NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1325 } else { 1326 NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(), 1327 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset); 1328 } 1329 } 1330} 1331 1332void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) { 1333 int reg1 = -1, reg2 = -1; 1334 const int reg_log2_size = 3; 1335 1336 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1337 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1338 if (UNLIKELY(reg2 < 0)) { 1339 NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1340 } else { 1341 NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(), 1342 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1343 } 1344 } 1345} 1346 1347// TODO(Arm64): consider using ld1 and st1? 1348void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) { 1349 int reg1 = -1, reg2 = -1; 1350 const int reg_log2_size = 3; 1351 1352 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) { 1353 reg_mask = GenPairWise(reg_mask, & reg1, & reg2); 1354 if (UNLIKELY(reg2 < 0)) { 1355 NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1356 } else { 1357 NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(), 1358 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset); 1359 } 1360 } 1361} 1362 1363bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) { 1364 ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0); 1365 RegLocation rl_src_i = info->args[0]; 1366 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); // result reg 1367 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1368 RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg); 1369 NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg()); 1370 (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result); 1371 return true; 1372} 1373 1374} // namespace art 1375