gen_common.cc revision 807140048f82a2b87ee5bcf337f23b6a3d1d5269
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object_array-inl.h" 23#include "mirror/object-inl.h" 24#include "mirror/object_reference.h" 25#include "verifier/method_verifier.h" 26#include <functional> 27 28namespace art { 29 30// Shortcuts to repeatedly used long types. 31typedef mirror::ObjectArray<mirror::Object> ObjArray; 32typedef mirror::ObjectArray<mirror::Class> ClassArray; 33 34/* 35 * This source files contains "gen" codegen routines that should 36 * be applicable to most targets. Only mid-level support utilities 37 * and "op" calls may be used here. 38 */ 39 40/* 41 * Generate a kPseudoBarrier marker to indicate the boundary of special 42 * blocks. 43 */ 44void Mir2Lir::GenBarrier() { 45 LIR* barrier = NewLIR0(kPseudoBarrier); 46 /* Mark all resources as being clobbered */ 47 DCHECK(!barrier->flags.use_def_invalid); 48 barrier->u.m.def_mask = &kEncodeAll; 49} 50 51void Mir2Lir::GenDivZeroException() { 52 LIR* branch = OpUnconditionalBranch(nullptr); 53 AddDivZeroCheckSlowPath(branch); 54} 55 56void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 57 LIR* branch = OpCondBranch(c_code, nullptr); 58 AddDivZeroCheckSlowPath(branch); 59} 60 61void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 62 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 63 AddDivZeroCheckSlowPath(branch); 64} 65 66void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 67 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 68 public: 69 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 70 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 71 } 72 73 void Compile() OVERRIDE { 74 m2l_->ResetRegPool(); 75 m2l_->ResetDefTracking(); 76 GenerateTargetLabel(kPseudoThrowTarget); 77 m2l_->CallRuntimeHelper(kQuickThrowDivZero, true); 78 } 79 }; 80 81 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 82} 83 84void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 85 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 86 public: 87 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 88 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 89 index_(index), length_(length) { 90 } 91 92 void Compile() OVERRIDE { 93 m2l_->ResetRegPool(); 94 m2l_->ResetDefTracking(); 95 GenerateTargetLabel(kPseudoThrowTarget); 96 m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, index_, length_, true); 97 } 98 99 private: 100 const RegStorage index_; 101 const RegStorage length_; 102 }; 103 104 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 105 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 106} 107 108void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 109 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 110 public: 111 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 112 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 113 index_(index), length_(length) { 114 } 115 116 void Compile() OVERRIDE { 117 m2l_->ResetRegPool(); 118 m2l_->ResetDefTracking(); 119 GenerateTargetLabel(kPseudoThrowTarget); 120 121 RegStorage arg1_32 = m2l_->TargetReg(kArg1, kNotWide); 122 RegStorage arg0_32 = m2l_->TargetReg(kArg0, kNotWide); 123 124 m2l_->OpRegCopy(arg1_32, length_); 125 m2l_->LoadConstant(arg0_32, index_); 126 m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, arg0_32, arg1_32, true); 127 } 128 129 private: 130 const int32_t index_; 131 const RegStorage length_; 132 }; 133 134 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 135 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 136} 137 138LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 139 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 140 public: 141 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 142 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 143 } 144 145 void Compile() OVERRIDE { 146 m2l_->ResetRegPool(); 147 m2l_->ResetDefTracking(); 148 GenerateTargetLabel(kPseudoThrowTarget); 149 m2l_->CallRuntimeHelper(kQuickThrowNullPointer, true); 150 } 151 }; 152 153 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 154 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 155 return branch; 156} 157 158/* Perform null-check on a register. */ 159LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 160 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 161 return GenExplicitNullCheck(m_reg, opt_flags); 162 } 163 // If null check has not been eliminated, reset redundant store tracking. 164 if ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0) { 165 ResetDefTracking(); 166 } 167 return nullptr; 168} 169 170/* Perform an explicit null-check on a register. */ 171LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 172 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 173 return NULL; 174 } 175 return GenNullCheck(m_reg); 176} 177 178void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 179 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 180 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 181 return; 182 } 183 // Insert after last instruction. 184 MarkSafepointPC(last_lir_insn_); 185 } 186} 187 188void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) { 189 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 190 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 191 return; 192 } 193 MarkSafepointPCAfter(after); 194 } 195} 196 197void Mir2Lir::MarkPossibleStackOverflowException() { 198 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 199 MarkSafepointPC(last_lir_insn_); 200 } 201} 202 203void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 204 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 205 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 206 return; 207 } 208 // Force an implicit null check by performing a memory operation (load) from the given 209 // register with offset 0. This will cause a signal if the register contains 0 (null). 210 RegStorage tmp = AllocTemp(); 211 // TODO: for Mips, would be best to use rZERO as the bogus register target. 212 LIR* load = Load32Disp(reg, 0, tmp); 213 FreeTemp(tmp); 214 MarkSafepointPC(load); 215 } 216} 217 218void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 219 RegLocation rl_src2, LIR* taken, 220 LIR* fall_through) { 221 DCHECK(!rl_src1.fp); 222 DCHECK(!rl_src2.fp); 223 ConditionCode cond; 224 switch (opcode) { 225 case Instruction::IF_EQ: 226 cond = kCondEq; 227 break; 228 case Instruction::IF_NE: 229 cond = kCondNe; 230 break; 231 case Instruction::IF_LT: 232 cond = kCondLt; 233 break; 234 case Instruction::IF_GE: 235 cond = kCondGe; 236 break; 237 case Instruction::IF_GT: 238 cond = kCondGt; 239 break; 240 case Instruction::IF_LE: 241 cond = kCondLe; 242 break; 243 default: 244 cond = static_cast<ConditionCode>(0); 245 LOG(FATAL) << "Unexpected opcode " << opcode; 246 } 247 248 // Normalize such that if either operand is constant, src2 will be constant 249 if (rl_src1.is_const) { 250 RegLocation rl_temp = rl_src1; 251 rl_src1 = rl_src2; 252 rl_src2 = rl_temp; 253 cond = FlipComparisonOrder(cond); 254 } 255 256 rl_src1 = LoadValue(rl_src1); 257 // Is this really an immediate comparison? 258 if (rl_src2.is_const) { 259 // If it's already live in a register or not easily materialized, just keep going 260 RegLocation rl_temp = UpdateLoc(rl_src2); 261 int32_t constant_value = mir_graph_->ConstantValue(rl_src2); 262 if ((rl_temp.location == kLocDalvikFrame) && 263 InexpensiveConstantInt(constant_value, opcode)) { 264 // OK - convert this to a compare immediate and branch 265 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 266 return; 267 } 268 269 // It's also commonly more efficient to have a test against zero with Eq/Ne. This is not worse 270 // for x86, and allows a cbz/cbnz for Arm and Mips. At the same time, it works around a register 271 // mismatch for 64b systems, where a reference is compared against null, as dex bytecode uses 272 // the 32b literal 0 for null. 273 if (constant_value == 0 && (cond == kCondEq || cond == kCondNe)) { 274 // Use the OpCmpImmBranch and ignore the value in the register. 275 OpCmpImmBranch(cond, rl_src1.reg, 0, taken); 276 return; 277 } 278 } 279 280 rl_src2 = LoadValue(rl_src2); 281 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 282} 283 284void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 285 LIR* fall_through) { 286 ConditionCode cond; 287 DCHECK(!rl_src.fp); 288 rl_src = LoadValue(rl_src); 289 switch (opcode) { 290 case Instruction::IF_EQZ: 291 cond = kCondEq; 292 break; 293 case Instruction::IF_NEZ: 294 cond = kCondNe; 295 break; 296 case Instruction::IF_LTZ: 297 cond = kCondLt; 298 break; 299 case Instruction::IF_GEZ: 300 cond = kCondGe; 301 break; 302 case Instruction::IF_GTZ: 303 cond = kCondGt; 304 break; 305 case Instruction::IF_LEZ: 306 cond = kCondLe; 307 break; 308 default: 309 cond = static_cast<ConditionCode>(0); 310 LOG(FATAL) << "Unexpected opcode " << opcode; 311 } 312 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 313} 314 315void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 316 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 317 if (rl_src.location == kLocPhysReg) { 318 OpRegCopy(rl_result.reg, rl_src.reg); 319 } else { 320 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 321 } 322 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 323 StoreValueWide(rl_dest, rl_result); 324} 325 326void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 327 RegLocation rl_src) { 328 rl_src = LoadValue(rl_src, kCoreReg); 329 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 330 OpKind op = kOpInvalid; 331 switch (opcode) { 332 case Instruction::INT_TO_BYTE: 333 op = kOp2Byte; 334 break; 335 case Instruction::INT_TO_SHORT: 336 op = kOp2Short; 337 break; 338 case Instruction::INT_TO_CHAR: 339 op = kOp2Char; 340 break; 341 default: 342 LOG(ERROR) << "Bad int conversion type"; 343 } 344 OpRegReg(op, rl_result.reg, rl_src.reg); 345 StoreValue(rl_dest, rl_result); 346} 347 348/* 349 * Let helper function take care of everything. Will call 350 * Array::AllocFromCode(type_idx, method, count); 351 * Note: AllocFromCode will handle checks for errNegativeArraySize. 352 */ 353void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 354 RegLocation rl_src) { 355 FlushAllRegs(); /* Everything to home location */ 356 const DexFile* dex_file = cu_->dex_file; 357 CompilerDriver* driver = cu_->compiler_driver; 358 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) { 359 bool is_type_initialized; // Ignored as an array does not have an initializer. 360 bool use_direct_type_ptr; 361 uintptr_t direct_type_ptr; 362 bool is_finalizable; 363 if (kEmbedClassInCode && 364 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 365 &direct_type_ptr, &is_finalizable)) { 366 // The fast path. 367 if (!use_direct_type_ptr) { 368 LoadClassType(type_idx, kArg0); 369 CallRuntimeHelperRegMethodRegLocation(kQuickAllocArrayResolved, TargetReg(kArg0, kNotWide), 370 rl_src, true); 371 } else { 372 // Use the direct pointer. 373 CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayResolved, direct_type_ptr, rl_src, 374 true); 375 } 376 } else { 377 // The slow path. 378 CallRuntimeHelperImmMethodRegLocation(kQuickAllocArray, type_idx, rl_src, true); 379 } 380 } else { 381 CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayWithAccessCheck, type_idx, rl_src, true); 382 } 383 StoreValue(rl_dest, GetReturn(kRefReg)); 384} 385 386/* 387 * Similar to GenNewArray, but with post-allocation initialization. 388 * Verifier guarantees we're dealing with an array class. Current 389 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 390 * Current code also throws internal unimp if not 'L', '[' or 'I'. 391 */ 392void Mir2Lir::GenFilledNewArray(CallInfo* info) { 393 int elems = info->num_arg_words; 394 int type_idx = info->index; 395 FlushAllRegs(); /* Everything to home location */ 396 QuickEntrypointEnum target; 397 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 398 type_idx)) { 399 target = kQuickCheckAndAllocArray; 400 } else { 401 target = kQuickCheckAndAllocArrayWithAccessCheck; 402 } 403 CallRuntimeHelperImmMethodImm(target, type_idx, elems, true); 404 FreeTemp(TargetReg(kArg2, kNotWide)); 405 FreeTemp(TargetReg(kArg1, kNotWide)); 406 /* 407 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 408 * return region. Because AllocFromCode placed the new array 409 * in kRet0, we'll just lock it into place. When debugger support is 410 * added, it may be necessary to additionally copy all return 411 * values to a home location in thread-local storage 412 */ 413 RegStorage ref_reg = TargetReg(kRet0, kRef); 414 LockTemp(ref_reg); 415 416 // TODO: use the correct component size, currently all supported types 417 // share array alignment with ints (see comment at head of function) 418 size_t component_size = sizeof(int32_t); 419 420 // Having a range of 0 is legal 421 if (info->is_range && (elems > 0)) { 422 /* 423 * Bit of ugliness here. We're going generate a mem copy loop 424 * on the register range, but it is possible that some regs 425 * in the range have been promoted. This is unlikely, but 426 * before generating the copy, we'll just force a flush 427 * of any regs in the source range that have been promoted to 428 * home location. 429 */ 430 for (int i = 0; i < elems; i++) { 431 RegLocation loc = UpdateLoc(info->args[i]); 432 if (loc.location == kLocPhysReg) { 433 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 434 Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 435 } 436 } 437 /* 438 * TUNING note: generated code here could be much improved, but 439 * this is an uncommon operation and isn't especially performance 440 * critical. 441 */ 442 // This is addressing the stack, which may be out of the 4G area. 443 RegStorage r_src = AllocTempRef(); 444 RegStorage r_dst = AllocTempRef(); 445 RegStorage r_idx = AllocTempRef(); // Not really a reference, but match src/dst. 446 RegStorage r_val; 447 switch (cu_->instruction_set) { 448 case kThumb2: 449 case kArm64: 450 r_val = TargetReg(kLr, kNotWide); 451 break; 452 case kX86: 453 case kX86_64: 454 FreeTemp(ref_reg); 455 r_val = AllocTemp(); 456 break; 457 case kMips: 458 r_val = AllocTemp(); 459 break; 460 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 461 } 462 // Set up source pointer 463 RegLocation rl_first = info->args[0]; 464 OpRegRegImm(kOpAdd, r_src, TargetPtrReg(kSp), SRegOffset(rl_first.s_reg_low)); 465 // Set up the target pointer 466 OpRegRegImm(kOpAdd, r_dst, ref_reg, 467 mirror::Array::DataOffset(component_size).Int32Value()); 468 // Set up the loop counter (known to be > 0) 469 LoadConstant(r_idx, elems - 1); 470 // Generate the copy loop. Going backwards for convenience 471 LIR* target = NewLIR0(kPseudoTargetLabel); 472 // Copy next element 473 { 474 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 475 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 476 // NOTE: No dalvik register annotation, local optimizations will be stopped 477 // by the loop boundaries. 478 } 479 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 480 FreeTemp(r_val); 481 OpDecAndBranch(kCondGe, r_idx, target); 482 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 483 // Restore the target pointer 484 OpRegRegImm(kOpAdd, ref_reg, r_dst, 485 -mirror::Array::DataOffset(component_size).Int32Value()); 486 } 487 } else if (!info->is_range) { 488 // TUNING: interleave 489 for (int i = 0; i < elems; i++) { 490 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 491 Store32Disp(ref_reg, 492 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 493 // If the LoadValue caused a temp to be allocated, free it 494 if (IsTemp(rl_arg.reg)) { 495 FreeTemp(rl_arg.reg); 496 } 497 } 498 } 499 if (info->result.location != kLocInvalid) { 500 StoreValue(info->result, GetReturn(kRefReg)); 501 } 502} 503 504// 505// Slow path to ensure a class is initialized for sget/sput. 506// 507class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 508 public: 509 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 510 RegStorage r_base) : 511 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 512 storage_index_(storage_index), r_base_(r_base) { 513 } 514 515 void Compile() { 516 LIR* unresolved_target = GenerateTargetLabel(); 517 uninit_->target = unresolved_target; 518 m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true); 519 // Copy helper's result into r_base, a no-op on all but MIPS. 520 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef)); 521 522 m2l_->OpUnconditionalBranch(cont_); 523 } 524 525 private: 526 LIR* const uninit_; 527 const int storage_index_; 528 const RegStorage r_base_; 529}; 530 531void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 532 bool is_object) { 533 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 534 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 535 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 536 if (!SLOW_FIELD_PATH && field_info.FastPut()) { 537 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 538 RegStorage r_base; 539 if (field_info.IsReferrersClass()) { 540 // Fast path, static storage base is this method's class 541 RegLocation rl_method = LoadCurrMethod(); 542 r_base = AllocTempRef(); 543 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 544 kNotVolatile); 545 if (IsTemp(rl_method.reg)) { 546 FreeTemp(rl_method.reg); 547 } 548 } else { 549 // Medium path, static storage base in a different class which requires checks that the other 550 // class is initialized. 551 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 552 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 553 // May do runtime call so everything to home locations. 554 FlushAllRegs(); 555 // Using fixed register to sync with possible call to runtime support. 556 RegStorage r_method = TargetReg(kArg1, kRef); 557 LockTemp(r_method); 558 LoadCurrMethodDirect(r_method); 559 r_base = TargetReg(kArg0, kRef); 560 LockTemp(r_base); 561 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 562 kNotVolatile); 563 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 564 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 565 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 566 if (!field_info.IsInitialized() && 567 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 568 // Check if r_base is NULL or a not yet initialized class. 569 570 // The slow path is invoked if the r_base is NULL or the class pointed 571 // to by it is not initialized. 572 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 573 RegStorage r_tmp = TargetReg(kArg2, kNotWide); 574 LockTemp(r_tmp); 575 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 576 mirror::Class::StatusOffset().Int32Value(), 577 mirror::Class::kStatusInitialized, nullptr, nullptr); 578 LIR* cont = NewLIR0(kPseudoTargetLabel); 579 580 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 581 field_info.StorageIndex(), r_base)); 582 583 FreeTemp(r_tmp); 584 // Ensure load of status and store of value don't re-order. 585 // TODO: Presumably the actual value store is control-dependent on the status load, 586 // and will thus not be reordered in any case, since stores are never speculated. 587 // Does later code "know" that the class is now initialized? If so, we still 588 // need the barrier to guard later static loads. 589 GenMemBarrier(kLoadAny); 590 } 591 FreeTemp(r_method); 592 } 593 // rBase now holds static storage base 594 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 595 if (is_long_or_double) { 596 rl_src = LoadValueWide(rl_src, reg_class); 597 } else { 598 rl_src = LoadValue(rl_src, reg_class); 599 } 600 if (is_object) { 601 StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, 602 field_info.IsVolatile() ? kVolatile : kNotVolatile); 603 } else { 604 StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size, 605 field_info.IsVolatile() ? kVolatile : kNotVolatile); 606 } 607 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 608 MarkGCCard(rl_src.reg, r_base); 609 } 610 FreeTemp(r_base); 611 } else { 612 FlushAllRegs(); // Everything to home locations 613 QuickEntrypointEnum target = 614 is_long_or_double ? kQuickSet64Static 615 : (is_object ? kQuickSetObjStatic : kQuickSet32Static); 616 CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true); 617 } 618} 619 620void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 621 bool is_long_or_double, bool is_object) { 622 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 623 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 624 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 625 if (!SLOW_FIELD_PATH && field_info.FastGet()) { 626 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 627 RegStorage r_base; 628 if (field_info.IsReferrersClass()) { 629 // Fast path, static storage base is this method's class 630 RegLocation rl_method = LoadCurrMethod(); 631 r_base = AllocTempRef(); 632 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 633 kNotVolatile); 634 } else { 635 // Medium path, static storage base in a different class which requires checks that the other 636 // class is initialized 637 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 638 // May do runtime call so everything to home locations. 639 FlushAllRegs(); 640 // Using fixed register to sync with possible call to runtime support. 641 RegStorage r_method = TargetReg(kArg1, kRef); 642 LockTemp(r_method); 643 LoadCurrMethodDirect(r_method); 644 r_base = TargetReg(kArg0, kRef); 645 LockTemp(r_base); 646 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 647 kNotVolatile); 648 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 649 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 650 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 651 if (!field_info.IsInitialized() && 652 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 653 // Check if r_base is NULL or a not yet initialized class. 654 655 // The slow path is invoked if the r_base is NULL or the class pointed 656 // to by it is not initialized. 657 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 658 RegStorage r_tmp = TargetReg(kArg2, kNotWide); 659 LockTemp(r_tmp); 660 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 661 mirror::Class::StatusOffset().Int32Value(), 662 mirror::Class::kStatusInitialized, nullptr, nullptr); 663 LIR* cont = NewLIR0(kPseudoTargetLabel); 664 665 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 666 field_info.StorageIndex(), r_base)); 667 668 FreeTemp(r_tmp); 669 // Ensure load of status and load of value don't re-order. 670 GenMemBarrier(kLoadAny); 671 } 672 FreeTemp(r_method); 673 } 674 // r_base now holds static storage base 675 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 676 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 677 678 int field_offset = field_info.FieldOffset().Int32Value(); 679 if (is_object) { 680 LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile : 681 kNotVolatile); 682 } else { 683 LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ? 684 kVolatile : kNotVolatile); 685 } 686 FreeTemp(r_base); 687 688 if (is_long_or_double) { 689 StoreValueWide(rl_dest, rl_result); 690 } else { 691 StoreValue(rl_dest, rl_result); 692 } 693 } else { 694 FlushAllRegs(); // Everything to home locations 695 QuickEntrypointEnum target = 696 is_long_or_double ? kQuickGet64Static 697 : (is_object ? kQuickGetObjStatic : kQuickGet32Static); 698 CallRuntimeHelperImm(target, field_info.FieldIndex(), true); 699 700 // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp. 701 if (is_long_or_double) { 702 RegLocation rl_result = GetReturnWide(kCoreReg); 703 StoreValueWide(rl_dest, rl_result); 704 } else { 705 RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg); 706 StoreValue(rl_dest, rl_result); 707 } 708 } 709} 710 711// Generate code for all slow paths. 712void Mir2Lir::HandleSlowPaths() { 713 // We should check slow_paths_.Size() every time, because a new slow path 714 // may be created during slowpath->Compile(). 715 for (size_t i = 0; i < slow_paths_.Size(); ++i) { 716 LIRSlowPath* slowpath = slow_paths_.Get(i); 717 slowpath->Compile(); 718 } 719 slow_paths_.Reset(); 720} 721 722void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 723 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 724 bool is_object) { 725 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 726 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 727 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 728 if (!SLOW_FIELD_PATH && field_info.FastGet()) { 729 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 730 // A load of the class will lead to an iget with offset 0. 731 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 732 rl_obj = LoadValue(rl_obj, kRefReg); 733 GenNullCheck(rl_obj.reg, opt_flags); 734 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 735 int field_offset = field_info.FieldOffset().Int32Value(); 736 LIR* load_lir; 737 if (is_object) { 738 load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ? 739 kVolatile : kNotVolatile); 740 } else { 741 load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size, 742 field_info.IsVolatile() ? kVolatile : kNotVolatile); 743 } 744 MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir); 745 if (is_long_or_double) { 746 StoreValueWide(rl_dest, rl_result); 747 } else { 748 StoreValue(rl_dest, rl_result); 749 } 750 } else { 751 QuickEntrypointEnum target = 752 is_long_or_double ? kQuickGet64Instance 753 : (is_object ? kQuickGetObjInstance : kQuickGet32Instance); 754 // Second argument of pGetXXInstance is always a reference. 755 DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U); 756 CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true); 757 758 // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp. 759 if (is_long_or_double) { 760 RegLocation rl_result = GetReturnWide(kCoreReg); 761 StoreValueWide(rl_dest, rl_result); 762 } else { 763 RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg); 764 StoreValue(rl_dest, rl_result); 765 } 766 } 767} 768 769void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 770 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 771 bool is_object) { 772 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 773 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 774 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 775 if (!SLOW_FIELD_PATH && field_info.FastPut()) { 776 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 777 // Dex code never writes to the class field. 778 DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()), 779 sizeof(mirror::HeapReference<mirror::Class>)); 780 rl_obj = LoadValue(rl_obj, kRefReg); 781 if (is_long_or_double) { 782 rl_src = LoadValueWide(rl_src, reg_class); 783 } else { 784 rl_src = LoadValue(rl_src, reg_class); 785 } 786 GenNullCheck(rl_obj.reg, opt_flags); 787 int field_offset = field_info.FieldOffset().Int32Value(); 788 LIR* store; 789 if (is_object) { 790 store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ? 791 kVolatile : kNotVolatile); 792 } else { 793 store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size, 794 field_info.IsVolatile() ? kVolatile : kNotVolatile); 795 } 796 MarkPossibleNullPointerExceptionAfter(opt_flags, store); 797 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 798 MarkGCCard(rl_src.reg, rl_obj.reg); 799 } 800 } else { 801 QuickEntrypointEnum target = 802 is_long_or_double ? kQuickSet64Instance 803 : (is_object ? kQuickSetObjInstance : kQuickSet32Instance); 804 CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src, 805 true); 806 } 807} 808 809void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 810 RegLocation rl_src) { 811 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 812 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 813 (opt_flags & MIR_IGNORE_NULL_CHECK)); 814 QuickEntrypointEnum target = needs_range_check 815 ? (needs_null_check ? kQuickAputObjectWithNullAndBoundCheck 816 : kQuickAputObjectWithBoundCheck) 817 : kQuickAputObject; 818 CallRuntimeHelperRegLocationRegLocationRegLocation(target, rl_array, rl_index, rl_src, true); 819} 820 821void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 822 RegLocation rl_method = LoadCurrMethod(); 823 CheckRegLocation(rl_method); 824 RegStorage res_reg = AllocTempRef(); 825 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 826 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 827 *cu_->dex_file, 828 type_idx)) { 829 // Call out to helper which resolves type and verifies access. 830 // Resolved type returned in kRet0. 831 CallRuntimeHelperImmReg(kQuickInitializeTypeAndVerifyAccess, type_idx, rl_method.reg, true); 832 RegLocation rl_result = GetReturn(kRefReg); 833 StoreValue(rl_dest, rl_result); 834 } else { 835 // We're don't need access checks, load type from dex cache 836 int32_t dex_cache_offset = 837 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 838 LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile); 839 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 840 LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); 841 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 842 type_idx) || SLOW_TYPE_PATH) { 843 // Slow path, at runtime test if type is null and if so initialize 844 FlushAllRegs(); 845 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 846 LIR* cont = NewLIR0(kPseudoTargetLabel); 847 848 // Object to generate the slow path for class resolution. 849 class SlowPath : public LIRSlowPath { 850 public: 851 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 852 const RegLocation& rl_method, const RegLocation& rl_result) : 853 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 854 rl_method_(rl_method), rl_result_(rl_result) { 855 } 856 857 void Compile() { 858 GenerateTargetLabel(); 859 860 m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, rl_method_.reg, true); 861 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0, kRef)); 862 m2l_->OpUnconditionalBranch(cont_); 863 } 864 865 private: 866 const int type_idx_; 867 const RegLocation rl_method_; 868 const RegLocation rl_result_; 869 }; 870 871 // Add to list for future. 872 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 873 874 StoreValue(rl_dest, rl_result); 875 } else { 876 // Fast path, we're done - just store result 877 StoreValue(rl_dest, rl_result); 878 } 879 } 880} 881 882void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 883 /* NOTE: Most strings should be available at compile time */ 884 const int32_t offset_of_string = 885 mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx).Int32Value(); 886 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 887 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 888 // slow path, resolve string if not in dex cache 889 FlushAllRegs(); 890 LockCallTemps(); // Using explicit registers 891 892 // If the Method* is already in a register, we can save a copy. 893 RegLocation rl_method = mir_graph_->GetMethodLoc(); 894 RegStorage r_method; 895 if (rl_method.location == kLocPhysReg) { 896 // A temp would conflict with register use below. 897 DCHECK(!IsTemp(rl_method.reg)); 898 r_method = rl_method.reg; 899 } else { 900 r_method = TargetReg(kArg2, kRef); 901 LoadCurrMethodDirect(r_method); 902 } 903 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 904 TargetReg(kArg0, kRef), kNotVolatile); 905 906 // Might call out to helper, which will return resolved string in kRet0 907 LoadRefDisp(TargetReg(kArg0, kRef), offset_of_string, TargetReg(kRet0, kRef), kNotVolatile); 908 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0, kRef), 0, NULL); 909 LIR* cont = NewLIR0(kPseudoTargetLabel); 910 911 { 912 // Object to generate the slow path for string resolution. 913 class SlowPath : public LIRSlowPath { 914 public: 915 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : 916 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), 917 r_method_(r_method), string_idx_(string_idx) { 918 } 919 920 void Compile() { 921 GenerateTargetLabel(); 922 m2l_->CallRuntimeHelperRegImm(kQuickResolveString, r_method_, string_idx_, true); 923 m2l_->OpUnconditionalBranch(cont_); 924 } 925 926 private: 927 const RegStorage r_method_; 928 const int32_t string_idx_; 929 }; 930 931 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); 932 } 933 934 GenBarrier(); 935 StoreValue(rl_dest, GetReturn(kRefReg)); 936 } else { 937 // Try to see if we can embed a direct pointer. 938 bool use_direct_ptr = false; 939 size_t direct_ptr = 0; 940 bool embed_string = false; 941 // TODO: Implement for X86. 942 if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { 943 embed_string = cu_->compiler_driver->CanEmbedStringInCode(*cu_->dex_file, string_idx, 944 &use_direct_ptr, &direct_ptr); 945 } 946 if (embed_string) { 947 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 948 if (!use_direct_ptr) { 949 LoadString(string_idx, rl_result.reg); 950 } else { 951 LoadConstant(rl_result.reg, static_cast<int32_t>(direct_ptr)); 952 } 953 StoreValue(rl_dest, rl_result); 954 } else { 955 RegLocation rl_method = LoadCurrMethod(); 956 RegStorage res_reg = AllocTempRef(); 957 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 958 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg, 959 kNotVolatile); 960 LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile); 961 StoreValue(rl_dest, rl_result); 962 } 963 } 964} 965 966/* 967 * Let helper function take care of everything. Will 968 * call Class::NewInstanceFromCode(type_idx, method); 969 */ 970void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 971 FlushAllRegs(); /* Everything to home location */ 972 // alloc will always check for resolution, do we also need to verify 973 // access because the verifier was unable to? 974 const DexFile* dex_file = cu_->dex_file; 975 CompilerDriver* driver = cu_->compiler_driver; 976 if (driver->CanAccessInstantiableTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) { 977 bool is_type_initialized; 978 bool use_direct_type_ptr; 979 uintptr_t direct_type_ptr; 980 bool is_finalizable; 981 if (kEmbedClassInCode && 982 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 983 &direct_type_ptr, &is_finalizable) && 984 !is_finalizable) { 985 // The fast path. 986 if (!use_direct_type_ptr) { 987 LoadClassType(type_idx, kArg0); 988 if (!is_type_initialized) { 989 CallRuntimeHelperRegMethod(kQuickAllocObjectResolved, TargetReg(kArg0, kRef), true); 990 } else { 991 CallRuntimeHelperRegMethod(kQuickAllocObjectInitialized, TargetReg(kArg0, kRef), true); 992 } 993 } else { 994 // Use the direct pointer. 995 if (!is_type_initialized) { 996 CallRuntimeHelperImmMethod(kQuickAllocObjectResolved, direct_type_ptr, true); 997 } else { 998 CallRuntimeHelperImmMethod(kQuickAllocObjectInitialized, direct_type_ptr, true); 999 } 1000 } 1001 } else { 1002 // The slow path. 1003 CallRuntimeHelperImmMethod(kQuickAllocObject, type_idx, true); 1004 } 1005 } else { 1006 CallRuntimeHelperImmMethod(kQuickAllocObjectWithAccessCheck, type_idx, true); 1007 } 1008 StoreValue(rl_dest, GetReturn(kRefReg)); 1009} 1010 1011void Mir2Lir::GenThrow(RegLocation rl_src) { 1012 FlushAllRegs(); 1013 CallRuntimeHelperRegLocation(kQuickDeliverException, rl_src, true); 1014} 1015 1016// For final classes there are no sub-classes to check and so we can answer the instance-of 1017// question with simple comparisons. 1018void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1019 RegLocation rl_src) { 1020 // X86 has its own implementation. 1021 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1022 1023 RegLocation object = LoadValue(rl_src, kRefReg); 1024 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1025 RegStorage result_reg = rl_result.reg; 1026 if (IsSameReg(result_reg, object.reg)) { 1027 result_reg = AllocTypedTemp(false, kCoreReg); 1028 DCHECK(!IsSameReg(result_reg, object.reg)); 1029 } 1030 LoadConstant(result_reg, 0); // assume false 1031 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1032 1033 RegStorage check_class = AllocTypedTemp(false, kRefReg); 1034 RegStorage object_class = AllocTypedTemp(false, kRefReg); 1035 1036 LoadCurrMethodDirect(check_class); 1037 if (use_declaring_class) { 1038 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, 1039 kNotVolatile); 1040 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1041 kNotVolatile); 1042 } else { 1043 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1044 check_class, kNotVolatile); 1045 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1046 kNotVolatile); 1047 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1048 LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); 1049 } 1050 1051 // FIXME: what should we be comparing here? compressed or decompressed references? 1052 if (cu_->instruction_set == kThumb2) { 1053 OpRegReg(kOpCmp, check_class, object_class); // Same? 1054 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1055 LoadConstant(result_reg, 1); // .eq case - load true 1056 OpEndIT(it); 1057 } else { 1058 GenSelectConst32(check_class, object_class, kCondEq, 1, 0, result_reg, kCoreReg); 1059 } 1060 LIR* target = NewLIR0(kPseudoTargetLabel); 1061 null_branchover->target = target; 1062 FreeTemp(object_class); 1063 FreeTemp(check_class); 1064 if (IsTemp(result_reg)) { 1065 OpRegCopy(rl_result.reg, result_reg); 1066 FreeTemp(result_reg); 1067 } 1068 StoreValue(rl_dest, rl_result); 1069} 1070 1071void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1072 bool type_known_abstract, bool use_declaring_class, 1073 bool can_assume_type_is_in_dex_cache, 1074 uint32_t type_idx, RegLocation rl_dest, 1075 RegLocation rl_src) { 1076 FlushAllRegs(); 1077 // May generate a call - use explicit registers 1078 LockCallTemps(); 1079 RegStorage method_reg = TargetReg(kArg1, kRef); 1080 LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* 1081 RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* 1082 RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg0 will hold the ref. 1083 RegStorage ret_reg = GetReturn(kRefReg).reg; 1084 if (needs_access_check) { 1085 // Check we have access to type_idx and if not throw IllegalAccessError, 1086 // returns Class* in kArg0 1087 CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true); 1088 OpRegCopy(class_reg, ret_reg); // Align usage with fast path 1089 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1090 } else if (use_declaring_class) { 1091 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1092 LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1093 class_reg, kNotVolatile); 1094 } else { 1095 if (can_assume_type_is_in_dex_cache) { 1096 // Conditionally, as in the other case we will also load it. 1097 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1098 } 1099 1100 // Load dex cache entry into class_reg (kArg2) 1101 LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1102 class_reg, kNotVolatile); 1103 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1104 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1105 if (!can_assume_type_is_in_dex_cache) { 1106 LIR* slow_path_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1107 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 1108 1109 // Should load value here. 1110 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1111 1112 class InitTypeSlowPath : public Mir2Lir::LIRSlowPath { 1113 public: 1114 InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx, 1115 RegLocation rl_src) 1116 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx), 1117 rl_src_(rl_src) { 1118 } 1119 1120 void Compile() OVERRIDE { 1121 GenerateTargetLabel(); 1122 1123 m2l_->CallRuntimeHelperImm(kQuickInitializeType, type_idx_, true); 1124 m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kRef), 1125 m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path 1126 m2l_->OpUnconditionalBranch(cont_); 1127 } 1128 1129 private: 1130 uint32_t type_idx_; 1131 RegLocation rl_src_; 1132 }; 1133 1134 AddSlowPath(new (arena_) InitTypeSlowPath(this, slow_path_branch, slow_path_target, 1135 type_idx, rl_src)); 1136 } 1137 } 1138 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1139 RegLocation rl_result = GetReturn(kCoreReg); 1140 if (!IsSameReg(rl_result.reg, ref_reg)) { 1141 // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken. 1142 LoadConstant(rl_result.reg, 0); 1143 } 1144 LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL); 1145 1146 /* load object->klass_ */ 1147 RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref. 1148 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1149 LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), 1150 ref_class_reg, kNotVolatile); 1151 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1152 LIR* branchover = NULL; 1153 if (type_known_final) { 1154 // rl_result == ref == class. 1155 GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg, 1156 kCoreReg); 1157 } else { 1158 if (cu_->instruction_set == kThumb2) { 1159 RegStorage r_tgt = LoadHelper(kQuickInstanceofNonTrivial); 1160 LIR* it = nullptr; 1161 if (!type_known_abstract) { 1162 /* Uses conditional nullification */ 1163 OpRegReg(kOpCmp, ref_class_reg, class_reg); // Same? 1164 it = OpIT(kCondEq, "EE"); // if-convert the test 1165 LoadConstant(rl_result.reg, 1); // .eq case - load true 1166 } 1167 OpRegCopy(ref_reg, class_reg); // .ne case - arg0 <= class 1168 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1169 if (it != nullptr) { 1170 OpEndIT(it); 1171 } 1172 FreeTemp(r_tgt); 1173 } else { 1174 if (!type_known_abstract) { 1175 /* Uses branchovers */ 1176 LoadConstant(rl_result.reg, 1); // assume true 1177 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL); 1178 } 1179 1180 OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class 1181 CallRuntimeHelper(kQuickInstanceofNonTrivial, false); 1182 } 1183 } 1184 // TODO: only clobber when type isn't final? 1185 ClobberCallerSave(); 1186 /* branch targets here */ 1187 LIR* target = NewLIR0(kPseudoTargetLabel); 1188 StoreValue(rl_dest, rl_result); 1189 branch1->target = target; 1190 if (branchover != nullptr) { 1191 branchover->target = target; 1192 } 1193} 1194 1195void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1196 bool type_known_final, type_known_abstract, use_declaring_class; 1197 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1198 *cu_->dex_file, 1199 type_idx, 1200 &type_known_final, 1201 &type_known_abstract, 1202 &use_declaring_class); 1203 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1204 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1205 1206 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1207 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1208 } else { 1209 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1210 use_declaring_class, can_assume_type_is_in_dex_cache, 1211 type_idx, rl_dest, rl_src); 1212 } 1213} 1214 1215void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1216 bool type_known_final, type_known_abstract, use_declaring_class; 1217 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1218 *cu_->dex_file, 1219 type_idx, 1220 &type_known_final, 1221 &type_known_abstract, 1222 &use_declaring_class); 1223 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1224 // of the exception throw path. 1225 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1226 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1227 // Verifier type analysis proved this check cast would never cause an exception. 1228 return; 1229 } 1230 FlushAllRegs(); 1231 // May generate a call - use explicit registers 1232 LockCallTemps(); 1233 RegStorage method_reg = TargetReg(kArg1, kRef); 1234 LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* 1235 RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* 1236 if (needs_access_check) { 1237 // Check we have access to type_idx and if not throw IllegalAccessError, 1238 // returns Class* in kRet0 1239 // InitializeTypeAndVerifyAccess(idx, method) 1240 CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true); 1241 OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path 1242 } else if (use_declaring_class) { 1243 LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1244 class_reg, kNotVolatile); 1245 } else { 1246 // Load dex cache entry into class_reg (kArg2) 1247 LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1248 class_reg, kNotVolatile); 1249 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1250 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1251 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1252 // Need to test presence of type in dex cache at runtime 1253 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1254 LIR* cont = NewLIR0(kPseudoTargetLabel); 1255 1256 // Slow path to initialize the type. Executed if the type is NULL. 1257 class SlowPath : public LIRSlowPath { 1258 public: 1259 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1260 const RegStorage class_reg) : 1261 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1262 class_reg_(class_reg) { 1263 } 1264 1265 void Compile() { 1266 GenerateTargetLabel(); 1267 1268 // Call out to helper, which will return resolved type in kArg0 1269 // InitializeTypeFromCode(idx, method) 1270 m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, 1271 m2l_->TargetReg(kArg1, kRef), true); 1272 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path 1273 m2l_->OpUnconditionalBranch(cont_); 1274 } 1275 1276 public: 1277 const int type_idx_; 1278 const RegStorage class_reg_; 1279 }; 1280 1281 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1282 } 1283 } 1284 // At this point, class_reg (kArg2) has class 1285 LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref 1286 1287 // Slow path for the case where the classes are not equal. In this case we need 1288 // to call a helper function to do the check. 1289 class SlowPath : public LIRSlowPath { 1290 public: 1291 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1292 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1293 } 1294 1295 void Compile() { 1296 GenerateTargetLabel(); 1297 1298 if (load_) { 1299 m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), 1300 m2l_->TargetReg(kArg1, kRef), kNotVolatile); 1301 } 1302 m2l_->CallRuntimeHelperRegReg(kQuickCheckCast, m2l_->TargetReg(kArg2, kRef), 1303 m2l_->TargetReg(kArg1, kRef), true); 1304 m2l_->OpUnconditionalBranch(cont_); 1305 } 1306 1307 private: 1308 const bool load_; 1309 }; 1310 1311 if (type_known_abstract) { 1312 // Easier case, run slow path if target is non-null (slow path will load from target) 1313 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0, kRef), 0, nullptr); 1314 LIR* cont = NewLIR0(kPseudoTargetLabel); 1315 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1316 } else { 1317 // Harder, more common case. We need to generate a forward branch over the load 1318 // if the target is null. If it's non-null we perform the load and branch to the 1319 // slow path if the classes are not equal. 1320 1321 /* Null is OK - continue */ 1322 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, nullptr); 1323 /* load object->klass_ */ 1324 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1325 LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), 1326 TargetReg(kArg1, kRef), kNotVolatile); 1327 1328 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), class_reg, nullptr); 1329 LIR* cont = NewLIR0(kPseudoTargetLabel); 1330 1331 // Add the slow path that will not perform load since this is already done. 1332 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1333 1334 // Set the null check to branch to the continuation. 1335 branch1->target = cont; 1336 } 1337} 1338 1339void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1340 RegLocation rl_src1, RegLocation rl_src2) { 1341 RegLocation rl_result; 1342 if (cu_->instruction_set == kThumb2) { 1343 /* 1344 * NOTE: This is the one place in the code in which we might have 1345 * as many as six live temporary registers. There are 5 in the normal 1346 * set for Arm. Until we have spill capabilities, temporarily add 1347 * lr to the temp set. It is safe to do this locally, but note that 1348 * lr is used explicitly elsewhere in the code generator and cannot 1349 * normally be used as a general temp register. 1350 */ 1351 MarkTemp(TargetReg(kLr, kNotWide)); // Add lr to the temp pool 1352 FreeTemp(TargetReg(kLr, kNotWide)); // and make it available 1353 } 1354 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1355 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1356 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1357 // The longs may overlap - use intermediate temp if so 1358 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1359 RegStorage t_reg = AllocTemp(); 1360 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1361 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1362 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1363 FreeTemp(t_reg); 1364 } else { 1365 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1366 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1367 } 1368 /* 1369 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1370 * following StoreValueWide might need to allocate a temp register. 1371 * To further work around the lack of a spill capability, explicitly 1372 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1373 * Remove when spill is functional. 1374 */ 1375 FreeRegLocTemps(rl_result, rl_src1); 1376 FreeRegLocTemps(rl_result, rl_src2); 1377 StoreValueWide(rl_dest, rl_result); 1378 if (cu_->instruction_set == kThumb2) { 1379 Clobber(TargetReg(kLr, kNotWide)); 1380 UnmarkTemp(TargetReg(kLr, kNotWide)); // Remove lr from the temp pool 1381 } 1382} 1383 1384void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1385 RegLocation rl_src1, RegLocation rl_shift) { 1386 QuickEntrypointEnum target; 1387 switch (opcode) { 1388 case Instruction::SHL_LONG: 1389 case Instruction::SHL_LONG_2ADDR: 1390 target = kQuickShlLong; 1391 break; 1392 case Instruction::SHR_LONG: 1393 case Instruction::SHR_LONG_2ADDR: 1394 target = kQuickShrLong; 1395 break; 1396 case Instruction::USHR_LONG: 1397 case Instruction::USHR_LONG_2ADDR: 1398 target = kQuickUshrLong; 1399 break; 1400 default: 1401 LOG(FATAL) << "Unexpected case"; 1402 target = kQuickShlLong; 1403 } 1404 FlushAllRegs(); /* Send everything to home location */ 1405 CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_shift, false); 1406 RegLocation rl_result = GetReturnWide(kCoreReg); 1407 StoreValueWide(rl_dest, rl_result); 1408} 1409 1410 1411void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1412 RegLocation rl_src1, RegLocation rl_src2) { 1413 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1414 OpKind op = kOpBkpt; 1415 bool is_div_rem = false; 1416 bool check_zero = false; 1417 bool unary = false; 1418 RegLocation rl_result; 1419 bool shift_op = false; 1420 switch (opcode) { 1421 case Instruction::NEG_INT: 1422 op = kOpNeg; 1423 unary = true; 1424 break; 1425 case Instruction::NOT_INT: 1426 op = kOpMvn; 1427 unary = true; 1428 break; 1429 case Instruction::ADD_INT: 1430 case Instruction::ADD_INT_2ADDR: 1431 op = kOpAdd; 1432 break; 1433 case Instruction::SUB_INT: 1434 case Instruction::SUB_INT_2ADDR: 1435 op = kOpSub; 1436 break; 1437 case Instruction::MUL_INT: 1438 case Instruction::MUL_INT_2ADDR: 1439 op = kOpMul; 1440 break; 1441 case Instruction::DIV_INT: 1442 case Instruction::DIV_INT_2ADDR: 1443 check_zero = true; 1444 op = kOpDiv; 1445 is_div_rem = true; 1446 break; 1447 /* NOTE: returns in kArg1 */ 1448 case Instruction::REM_INT: 1449 case Instruction::REM_INT_2ADDR: 1450 check_zero = true; 1451 op = kOpRem; 1452 is_div_rem = true; 1453 break; 1454 case Instruction::AND_INT: 1455 case Instruction::AND_INT_2ADDR: 1456 op = kOpAnd; 1457 break; 1458 case Instruction::OR_INT: 1459 case Instruction::OR_INT_2ADDR: 1460 op = kOpOr; 1461 break; 1462 case Instruction::XOR_INT: 1463 case Instruction::XOR_INT_2ADDR: 1464 op = kOpXor; 1465 break; 1466 case Instruction::SHL_INT: 1467 case Instruction::SHL_INT_2ADDR: 1468 shift_op = true; 1469 op = kOpLsl; 1470 break; 1471 case Instruction::SHR_INT: 1472 case Instruction::SHR_INT_2ADDR: 1473 shift_op = true; 1474 op = kOpAsr; 1475 break; 1476 case Instruction::USHR_INT: 1477 case Instruction::USHR_INT_2ADDR: 1478 shift_op = true; 1479 op = kOpLsr; 1480 break; 1481 default: 1482 LOG(FATAL) << "Invalid word arith op: " << opcode; 1483 } 1484 if (!is_div_rem) { 1485 if (unary) { 1486 rl_src1 = LoadValue(rl_src1, kCoreReg); 1487 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1488 OpRegReg(op, rl_result.reg, rl_src1.reg); 1489 } else { 1490 if ((shift_op) && (cu_->instruction_set != kArm64)) { 1491 rl_src2 = LoadValue(rl_src2, kCoreReg); 1492 RegStorage t_reg = AllocTemp(); 1493 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1494 rl_src1 = LoadValue(rl_src1, kCoreReg); 1495 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1496 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1497 FreeTemp(t_reg); 1498 } else { 1499 rl_src1 = LoadValue(rl_src1, kCoreReg); 1500 rl_src2 = LoadValue(rl_src2, kCoreReg); 1501 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1502 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1503 } 1504 } 1505 StoreValue(rl_dest, rl_result); 1506 } else { 1507 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1508 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1509 rl_src1 = LoadValue(rl_src1, kCoreReg); 1510 rl_src2 = LoadValue(rl_src2, kCoreReg); 1511 if (check_zero) { 1512 GenDivZeroCheck(rl_src2.reg); 1513 } 1514 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1515 done = true; 1516 } else if (cu_->instruction_set == kThumb2) { 1517 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1518 // Use ARM SDIV instruction for division. For remainder we also need to 1519 // calculate using a MUL and subtract. 1520 rl_src1 = LoadValue(rl_src1, kCoreReg); 1521 rl_src2 = LoadValue(rl_src2, kCoreReg); 1522 if (check_zero) { 1523 GenDivZeroCheck(rl_src2.reg); 1524 } 1525 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1526 done = true; 1527 } 1528 } 1529 1530 // If we haven't already generated the code use the callout function. 1531 if (!done) { 1532 FlushAllRegs(); /* Send everything to home location */ 1533 LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide)); 1534 RegStorage r_tgt = CallHelperSetup(kQuickIdivmod); 1535 LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide)); 1536 if (check_zero) { 1537 GenDivZeroCheck(TargetReg(kArg1, kNotWide)); 1538 } 1539 // NOTE: callout here is not a safepoint. 1540 CallHelper(r_tgt, kQuickIdivmod, false /* not a safepoint */); 1541 if (op == kOpDiv) 1542 rl_result = GetReturn(kCoreReg); 1543 else 1544 rl_result = GetReturnAlt(); 1545 } 1546 StoreValue(rl_dest, rl_result); 1547 } 1548} 1549 1550/* 1551 * The following are the first-level codegen routines that analyze the format 1552 * of each bytecode then either dispatch special purpose codegen routines 1553 * or produce corresponding Thumb instructions directly. 1554 */ 1555 1556// Returns true if no more than two bits are set in 'x'. 1557static bool IsPopCountLE2(unsigned int x) { 1558 x &= x - 1; 1559 return (x & (x - 1)) == 0; 1560} 1561 1562// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1563// and store the result in 'rl_dest'. 1564bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1565 RegLocation rl_src, RegLocation rl_dest, int lit) { 1566 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1567 return false; 1568 } 1569 // No divide instruction for Arm, so check for more special cases 1570 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1571 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1572 } 1573 int k = LowestSetBit(lit); 1574 if (k >= 30) { 1575 // Avoid special cases. 1576 return false; 1577 } 1578 rl_src = LoadValue(rl_src, kCoreReg); 1579 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1580 if (is_div) { 1581 RegStorage t_reg = AllocTemp(); 1582 if (lit == 2) { 1583 // Division by 2 is by far the most common division by constant. 1584 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1585 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1586 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1587 } else { 1588 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1589 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1590 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1591 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1592 } 1593 } else { 1594 RegStorage t_reg1 = AllocTemp(); 1595 RegStorage t_reg2 = AllocTemp(); 1596 if (lit == 2) { 1597 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1598 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1599 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1600 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1601 } else { 1602 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1603 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1604 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1605 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1606 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1607 } 1608 } 1609 StoreValue(rl_dest, rl_result); 1610 return true; 1611} 1612 1613// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1614// and store the result in 'rl_dest'. 1615bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1616 if (lit < 0) { 1617 return false; 1618 } 1619 if (lit == 0) { 1620 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1621 LoadConstant(rl_result.reg, 0); 1622 StoreValue(rl_dest, rl_result); 1623 return true; 1624 } 1625 if (lit == 1) { 1626 rl_src = LoadValue(rl_src, kCoreReg); 1627 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1628 OpRegCopy(rl_result.reg, rl_src.reg); 1629 StoreValue(rl_dest, rl_result); 1630 return true; 1631 } 1632 // There is RegRegRegShift on Arm, so check for more special cases 1633 if (cu_->instruction_set == kThumb2) { 1634 return EasyMultiply(rl_src, rl_dest, lit); 1635 } 1636 // Can we simplify this multiplication? 1637 bool power_of_two = false; 1638 bool pop_count_le2 = false; 1639 bool power_of_two_minus_one = false; 1640 if (IsPowerOfTwo(lit)) { 1641 power_of_two = true; 1642 } else if (IsPopCountLE2(lit)) { 1643 pop_count_le2 = true; 1644 } else if (IsPowerOfTwo(lit + 1)) { 1645 power_of_two_minus_one = true; 1646 } else { 1647 return false; 1648 } 1649 rl_src = LoadValue(rl_src, kCoreReg); 1650 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1651 if (power_of_two) { 1652 // Shift. 1653 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1654 } else if (pop_count_le2) { 1655 // Shift and add and shift. 1656 int first_bit = LowestSetBit(lit); 1657 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1658 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1659 } else { 1660 // Reverse subtract: (src << (shift + 1)) - src. 1661 DCHECK(power_of_two_minus_one); 1662 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1663 RegStorage t_reg = AllocTemp(); 1664 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1665 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1666 } 1667 StoreValue(rl_dest, rl_result); 1668 return true; 1669} 1670 1671void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1672 int lit) { 1673 RegLocation rl_result; 1674 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1675 int shift_op = false; 1676 bool is_div = false; 1677 1678 switch (opcode) { 1679 case Instruction::RSUB_INT_LIT8: 1680 case Instruction::RSUB_INT: { 1681 rl_src = LoadValue(rl_src, kCoreReg); 1682 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1683 if (cu_->instruction_set == kThumb2) { 1684 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1685 } else { 1686 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1687 OpRegImm(kOpAdd, rl_result.reg, lit); 1688 } 1689 StoreValue(rl_dest, rl_result); 1690 return; 1691 } 1692 1693 case Instruction::SUB_INT: 1694 case Instruction::SUB_INT_2ADDR: 1695 lit = -lit; 1696 // Intended fallthrough 1697 case Instruction::ADD_INT: 1698 case Instruction::ADD_INT_2ADDR: 1699 case Instruction::ADD_INT_LIT8: 1700 case Instruction::ADD_INT_LIT16: 1701 op = kOpAdd; 1702 break; 1703 case Instruction::MUL_INT: 1704 case Instruction::MUL_INT_2ADDR: 1705 case Instruction::MUL_INT_LIT8: 1706 case Instruction::MUL_INT_LIT16: { 1707 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1708 return; 1709 } 1710 op = kOpMul; 1711 break; 1712 } 1713 case Instruction::AND_INT: 1714 case Instruction::AND_INT_2ADDR: 1715 case Instruction::AND_INT_LIT8: 1716 case Instruction::AND_INT_LIT16: 1717 op = kOpAnd; 1718 break; 1719 case Instruction::OR_INT: 1720 case Instruction::OR_INT_2ADDR: 1721 case Instruction::OR_INT_LIT8: 1722 case Instruction::OR_INT_LIT16: 1723 op = kOpOr; 1724 break; 1725 case Instruction::XOR_INT: 1726 case Instruction::XOR_INT_2ADDR: 1727 case Instruction::XOR_INT_LIT8: 1728 case Instruction::XOR_INT_LIT16: 1729 op = kOpXor; 1730 break; 1731 case Instruction::SHL_INT_LIT8: 1732 case Instruction::SHL_INT: 1733 case Instruction::SHL_INT_2ADDR: 1734 lit &= 31; 1735 shift_op = true; 1736 op = kOpLsl; 1737 break; 1738 case Instruction::SHR_INT_LIT8: 1739 case Instruction::SHR_INT: 1740 case Instruction::SHR_INT_2ADDR: 1741 lit &= 31; 1742 shift_op = true; 1743 op = kOpAsr; 1744 break; 1745 case Instruction::USHR_INT_LIT8: 1746 case Instruction::USHR_INT: 1747 case Instruction::USHR_INT_2ADDR: 1748 lit &= 31; 1749 shift_op = true; 1750 op = kOpLsr; 1751 break; 1752 1753 case Instruction::DIV_INT: 1754 case Instruction::DIV_INT_2ADDR: 1755 case Instruction::DIV_INT_LIT8: 1756 case Instruction::DIV_INT_LIT16: 1757 case Instruction::REM_INT: 1758 case Instruction::REM_INT_2ADDR: 1759 case Instruction::REM_INT_LIT8: 1760 case Instruction::REM_INT_LIT16: { 1761 if (lit == 0) { 1762 GenDivZeroException(); 1763 return; 1764 } 1765 if ((opcode == Instruction::DIV_INT) || 1766 (opcode == Instruction::DIV_INT_2ADDR) || 1767 (opcode == Instruction::DIV_INT_LIT8) || 1768 (opcode == Instruction::DIV_INT_LIT16)) { 1769 is_div = true; 1770 } else { 1771 is_div = false; 1772 } 1773 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1774 return; 1775 } 1776 1777 bool done = false; 1778 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1779 rl_src = LoadValue(rl_src, kCoreReg); 1780 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1781 done = true; 1782 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1783 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1784 done = true; 1785 } else if (cu_->instruction_set == kThumb2) { 1786 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1787 // Use ARM SDIV instruction for division. For remainder we also need to 1788 // calculate using a MUL and subtract. 1789 rl_src = LoadValue(rl_src, kCoreReg); 1790 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1791 done = true; 1792 } 1793 } 1794 1795 if (!done) { 1796 FlushAllRegs(); /* Everything to home location. */ 1797 LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide)); 1798 Clobber(TargetReg(kArg0, kNotWide)); 1799 CallRuntimeHelperRegImm(kQuickIdivmod, TargetReg(kArg0, kNotWide), lit, false); 1800 if (is_div) 1801 rl_result = GetReturn(kCoreReg); 1802 else 1803 rl_result = GetReturnAlt(); 1804 } 1805 StoreValue(rl_dest, rl_result); 1806 return; 1807 } 1808 default: 1809 LOG(FATAL) << "Unexpected opcode " << opcode; 1810 } 1811 rl_src = LoadValue(rl_src, kCoreReg); 1812 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1813 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1814 if (shift_op && (lit == 0)) { 1815 OpRegCopy(rl_result.reg, rl_src.reg); 1816 } else { 1817 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1818 } 1819 StoreValue(rl_dest, rl_result); 1820} 1821 1822void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1823 RegLocation rl_src1, RegLocation rl_src2) { 1824 RegLocation rl_result; 1825 OpKind first_op = kOpBkpt; 1826 OpKind second_op = kOpBkpt; 1827 bool call_out = false; 1828 bool check_zero = false; 1829 int ret_reg = TargetReg(kRet0, kNotWide).GetReg(); 1830 QuickEntrypointEnum target; 1831 1832 switch (opcode) { 1833 case Instruction::NOT_LONG: 1834 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1835 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1836 // Check for destructive overlap 1837 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1838 RegStorage t_reg = AllocTemp(); 1839 OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1840 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1841 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1842 FreeTemp(t_reg); 1843 } else { 1844 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1845 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1846 } 1847 StoreValueWide(rl_dest, rl_result); 1848 return; 1849 case Instruction::ADD_LONG: 1850 case Instruction::ADD_LONG_2ADDR: 1851 first_op = kOpAdd; 1852 second_op = kOpAdc; 1853 break; 1854 case Instruction::SUB_LONG: 1855 case Instruction::SUB_LONG_2ADDR: 1856 first_op = kOpSub; 1857 second_op = kOpSbc; 1858 break; 1859 case Instruction::MUL_LONG: 1860 case Instruction::MUL_LONG_2ADDR: 1861 call_out = true; 1862 ret_reg = TargetReg(kRet0, kNotWide).GetReg(); 1863 target = kQuickLmul; 1864 break; 1865 case Instruction::DIV_LONG: 1866 case Instruction::DIV_LONG_2ADDR: 1867 call_out = true; 1868 check_zero = true; 1869 ret_reg = TargetReg(kRet0, kNotWide).GetReg(); 1870 target = kQuickLdiv; 1871 break; 1872 case Instruction::REM_LONG: 1873 case Instruction::REM_LONG_2ADDR: 1874 call_out = true; 1875 check_zero = true; 1876 target = kQuickLmod; 1877 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1878 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2, kNotWide).GetReg() : 1879 TargetReg(kRet0, kNotWide).GetReg(); 1880 break; 1881 case Instruction::AND_LONG_2ADDR: 1882 case Instruction::AND_LONG: 1883 first_op = kOpAnd; 1884 second_op = kOpAnd; 1885 break; 1886 case Instruction::OR_LONG: 1887 case Instruction::OR_LONG_2ADDR: 1888 first_op = kOpOr; 1889 second_op = kOpOr; 1890 break; 1891 case Instruction::XOR_LONG: 1892 case Instruction::XOR_LONG_2ADDR: 1893 first_op = kOpXor; 1894 second_op = kOpXor; 1895 break; 1896 default: 1897 LOG(FATAL) << "Invalid long arith op"; 1898 } 1899 if (!call_out) { 1900 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1901 } else { 1902 FlushAllRegs(); /* Send everything to home location */ 1903 if (check_zero) { 1904 RegStorage r_tmp1 = TargetReg(kArg0, kWide); 1905 RegStorage r_tmp2 = TargetReg(kArg2, kWide); 1906 LoadValueDirectWideFixed(rl_src2, r_tmp2); 1907 RegStorage r_tgt = CallHelperSetup(target); 1908 GenDivZeroCheckWide(r_tmp2); 1909 LoadValueDirectWideFixed(rl_src1, r_tmp1); 1910 // NOTE: callout here is not a safepoint 1911 CallHelper(r_tgt, target, false /* not safepoint */); 1912 } else { 1913 CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_src2, false); 1914 } 1915 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1916 if (ret_reg == TargetReg(kRet0, kNotWide).GetReg()) 1917 rl_result = GetReturnWide(kCoreReg); 1918 else 1919 rl_result = GetReturnWideAlt(); 1920 StoreValueWide(rl_dest, rl_result); 1921 } 1922} 1923 1924void Mir2Lir::GenConst(RegLocation rl_dest, int value) { 1925 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 1926 LoadConstantNoClobber(rl_result.reg, value); 1927 StoreValue(rl_dest, rl_result); 1928 if (value == 0) { 1929 Workaround7250540(rl_dest, rl_result.reg); 1930 } 1931} 1932 1933void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, 1934 RegLocation rl_src) { 1935 /* 1936 * Don't optimize the register usage since it calls out to support 1937 * functions 1938 */ 1939 1940 FlushAllRegs(); /* Send everything to home location */ 1941 CallRuntimeHelperRegLocation(trampoline, rl_src, false); 1942 if (rl_dest.wide) { 1943 RegLocation rl_result; 1944 rl_result = GetReturnWide(LocToRegClass(rl_dest)); 1945 StoreValueWide(rl_dest, rl_result); 1946 } else { 1947 RegLocation rl_result; 1948 rl_result = GetReturn(LocToRegClass(rl_dest)); 1949 StoreValue(rl_dest, rl_result); 1950 } 1951} 1952 1953class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { 1954 public: 1955 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) 1956 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) { 1957 } 1958 1959 void Compile() OVERRIDE { 1960 m2l_->ResetRegPool(); 1961 m2l_->ResetDefTracking(); 1962 GenerateTargetLabel(kPseudoSuspendTarget); 1963 m2l_->CallRuntimeHelper(kQuickTestSuspend, true); 1964 if (cont_ != nullptr) { 1965 m2l_->OpUnconditionalBranch(cont_); 1966 } 1967 } 1968}; 1969 1970/* Check if we need to check for pending suspend request */ 1971void Mir2Lir::GenSuspendTest(int opt_flags) { 1972 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) { 1973 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1974 return; 1975 } 1976 FlushAllRegs(); 1977 LIR* branch = OpTestSuspend(NULL); 1978 LIR* cont = NewLIR0(kPseudoTargetLabel); 1979 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); 1980 } else { 1981 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1982 return; 1983 } 1984 FlushAllRegs(); // TODO: needed? 1985 LIR* inst = CheckSuspendUsingLoad(); 1986 MarkSafepointPC(inst); 1987 } 1988} 1989 1990/* Check if we need to check for pending suspend request */ 1991void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 1992 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) { 1993 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1994 OpUnconditionalBranch(target); 1995 return; 1996 } 1997 OpTestSuspend(target); 1998 FlushAllRegs(); 1999 LIR* branch = OpUnconditionalBranch(nullptr); 2000 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target)); 2001 } else { 2002 // For the implicit suspend check, just perform the trigger 2003 // load and branch to the target. 2004 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2005 OpUnconditionalBranch(target); 2006 return; 2007 } 2008 FlushAllRegs(); 2009 LIR* inst = CheckSuspendUsingLoad(); 2010 MarkSafepointPC(inst); 2011 OpUnconditionalBranch(target); 2012 } 2013} 2014 2015/* Call out to helper assembly routine that will null check obj and then lock it. */ 2016void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2017 FlushAllRegs(); 2018 CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true); 2019} 2020 2021/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2022void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2023 FlushAllRegs(); 2024 CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true); 2025} 2026 2027/* Generic code for generating a wide constant into a VR. */ 2028void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2029 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2030 LoadConstantWide(rl_result.reg, value); 2031 StoreValueWide(rl_dest, rl_result); 2032} 2033 2034void Mir2Lir::GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2035 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2036 const uint16_t entries = table[1]; 2037 // Chained cmp-and-branch. 2038 const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]); 2039 int32_t current_key = as_int32[0]; 2040 const int32_t* targets = &as_int32[1]; 2041 rl_src = LoadValue(rl_src, kCoreReg); 2042 int i = 0; 2043 for (; i < entries; i++, current_key++) { 2044 if (!InexpensiveConstantInt(current_key, Instruction::Code::IF_EQ)) { 2045 // Switch to using a temp and add. 2046 break; 2047 } 2048 BasicBlock* case_block = 2049 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2050 OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]); 2051 } 2052 if (i < entries) { 2053 // The rest do not seem to be inexpensive. Try to allocate a temp and use add. 2054 RegStorage key_temp = AllocTypedTemp(false, kCoreReg, false); 2055 if (key_temp.Valid()) { 2056 LoadConstantNoClobber(key_temp, current_key); 2057 for (; i < entries - 1; i++, current_key++) { 2058 BasicBlock* case_block = 2059 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2060 OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]); 2061 OpRegImm(kOpAdd, key_temp, 1); // Increment key. 2062 } 2063 BasicBlock* case_block = 2064 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2065 OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]); 2066 } else { 2067 // No free temp, just finish the old loop. 2068 for (; i < entries; i++, current_key++) { 2069 BasicBlock* case_block = 2070 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2071 OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]); 2072 } 2073 } 2074 } 2075} 2076 2077void Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2078 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2079 if (cu_->verbose) { 2080 DumpSparseSwitchTable(table); 2081 } 2082 2083 const uint16_t entries = table[1]; 2084 if (entries <= kSmallSwitchThreshold) { 2085 GenSmallPackedSwitch(mir, table_offset, rl_src); 2086 } else { 2087 // Use the backend-specific implementation. 2088 GenLargePackedSwitch(mir, table_offset, rl_src); 2089 } 2090} 2091 2092void Mir2Lir::GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2093 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2094 const uint16_t entries = table[1]; 2095 // Chained cmp-and-branch. 2096 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 2097 const int32_t* targets = &keys[entries]; 2098 rl_src = LoadValue(rl_src, kCoreReg); 2099 for (int i = 0; i < entries; i++) { 2100 int key = keys[i]; 2101 BasicBlock* case_block = 2102 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2103 OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]); 2104 } 2105} 2106 2107void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2108 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2109 if (cu_->verbose) { 2110 DumpSparseSwitchTable(table); 2111 } 2112 2113 const uint16_t entries = table[1]; 2114 if (entries <= kSmallSwitchThreshold) { 2115 GenSmallSparseSwitch(mir, table_offset, rl_src); 2116 } else { 2117 // Use the backend-specific implementation. 2118 GenLargeSparseSwitch(mir, table_offset, rl_src); 2119 } 2120} 2121 2122} // namespace art 2123