gen_common.cc revision 4289456fa265b833434c2a8eee9e7a16da31c524
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object-inl.h" 23#include "verifier/method_verifier.h" 24#include <functional> 25 26namespace art { 27 28/* 29 * This source files contains "gen" codegen routines that should 30 * be applicable to most targets. Only mid-level support utilities 31 * and "op" calls may be used here. 32 */ 33 34/* 35 * Generate a kPseudoBarrier marker to indicate the boundary of special 36 * blocks. 37 */ 38void Mir2Lir::GenBarrier() { 39 LIR* barrier = NewLIR0(kPseudoBarrier); 40 /* Mark all resources as being clobbered */ 41 DCHECK(!barrier->flags.use_def_invalid); 42 barrier->u.m.def_mask = ENCODE_ALL; 43} 44 45LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) { 46 LIR* tgt; 47 LIR* branch; 48 if (c_code == kCondAl) { 49 tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal, 50 imm_val); 51 branch = OpUnconditionalBranch(tgt); 52 } else { 53 tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val); 54 branch = OpCmpImmBranch(c_code, reg, imm_val, tgt); 55 } 56 // Remember branch target - will process later 57 throw_launchpads_.Insert(tgt); 58 return branch; 59} 60 61void Mir2Lir::AddDivZeroSlowPath(ConditionCode c_code) { 62 LIR* branch = OpCondBranch(c_code, nullptr); 63 AddDivZeroCheckSlowPath(branch); 64} 65 66void Mir2Lir::AddDivZeroSlowPath(ConditionCode c_code, RegStorage reg, int imm_val) { 67 LIR* branch; 68 if (c_code == kCondAl) { 69 branch = OpUnconditionalBranch(nullptr); 70 } else { 71 branch = OpCmpImmBranch(c_code, reg, imm_val, nullptr); 72 } 73 AddDivZeroCheckSlowPath(branch); 74} 75 76void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 77 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 78 public: 79 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 80 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 81 } 82 83 void Compile() { 84 m2l_->ResetRegPool(); 85 m2l_->ResetDefTracking(); 86 GenerateTargetLabel(); 87 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 88 } 89 }; 90 91 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 92} 93 94/* Perform null-check on a register. */ 95LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 96 if (Runtime::Current()->ExplicitNullChecks()) { 97 return GenExplicitNullCheck(m_reg, opt_flags); 98 } 99 return nullptr; 100} 101 102/* Perform an explicit null-check on a register. */ 103LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 104 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 105 return NULL; 106 } 107 return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer); 108} 109 110void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 111 if (!Runtime::Current()->ExplicitNullChecks()) { 112 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 113 return; 114 } 115 MarkSafepointPC(last_lir_insn_); 116 } 117} 118 119void Mir2Lir::MarkPossibleStackOverflowException() { 120 if (!Runtime::Current()->ExplicitStackOverflowChecks()) { 121 MarkSafepointPC(last_lir_insn_); 122 } 123} 124 125void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 126 if (!Runtime::Current()->ExplicitNullChecks()) { 127 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 128 return; 129 } 130 // Force an implicit null check by performing a memory operation (load) from the given 131 // register with offset 0. This will cause a signal if the register contains 0 (null). 132 RegStorage tmp = AllocTemp(); 133 // TODO: for Mips, would be best to use rZERO as the bogus register target. 134 LIR* load = LoadWordDisp(reg, 0, tmp); 135 FreeTemp(tmp); 136 MarkSafepointPC(load); 137 } 138} 139 140/* Perform check on two registers */ 141LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2, 142 ThrowKind kind) { 143 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(), 144 reg2.GetReg()); 145 LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt); 146 // Remember branch target - will process later 147 throw_launchpads_.Insert(tgt); 148 return branch; 149} 150 151void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 152 RegLocation rl_src2, LIR* taken, 153 LIR* fall_through) { 154 ConditionCode cond; 155 switch (opcode) { 156 case Instruction::IF_EQ: 157 cond = kCondEq; 158 break; 159 case Instruction::IF_NE: 160 cond = kCondNe; 161 break; 162 case Instruction::IF_LT: 163 cond = kCondLt; 164 break; 165 case Instruction::IF_GE: 166 cond = kCondGe; 167 break; 168 case Instruction::IF_GT: 169 cond = kCondGt; 170 break; 171 case Instruction::IF_LE: 172 cond = kCondLe; 173 break; 174 default: 175 cond = static_cast<ConditionCode>(0); 176 LOG(FATAL) << "Unexpected opcode " << opcode; 177 } 178 179 // Normalize such that if either operand is constant, src2 will be constant 180 if (rl_src1.is_const) { 181 RegLocation rl_temp = rl_src1; 182 rl_src1 = rl_src2; 183 rl_src2 = rl_temp; 184 cond = FlipComparisonOrder(cond); 185 } 186 187 rl_src1 = LoadValue(rl_src1, kCoreReg); 188 // Is this really an immediate comparison? 189 if (rl_src2.is_const) { 190 // If it's already live in a register or not easily materialized, just keep going 191 RegLocation rl_temp = UpdateLoc(rl_src2); 192 if ((rl_temp.location == kLocDalvikFrame) && 193 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 194 // OK - convert this to a compare immediate and branch 195 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 196 return; 197 } 198 } 199 rl_src2 = LoadValue(rl_src2, kCoreReg); 200 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 201} 202 203void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 204 LIR* fall_through) { 205 ConditionCode cond; 206 rl_src = LoadValue(rl_src, kCoreReg); 207 switch (opcode) { 208 case Instruction::IF_EQZ: 209 cond = kCondEq; 210 break; 211 case Instruction::IF_NEZ: 212 cond = kCondNe; 213 break; 214 case Instruction::IF_LTZ: 215 cond = kCondLt; 216 break; 217 case Instruction::IF_GEZ: 218 cond = kCondGe; 219 break; 220 case Instruction::IF_GTZ: 221 cond = kCondGt; 222 break; 223 case Instruction::IF_LEZ: 224 cond = kCondLe; 225 break; 226 default: 227 cond = static_cast<ConditionCode>(0); 228 LOG(FATAL) << "Unexpected opcode " << opcode; 229 } 230 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 231} 232 233void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 234 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 235 if (rl_src.location == kLocPhysReg) { 236 OpRegCopy(rl_result.reg, rl_src.reg); 237 } else { 238 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 239 } 240 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 241 StoreValueWide(rl_dest, rl_result); 242} 243 244void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 245 RegLocation rl_src) { 246 rl_src = LoadValue(rl_src, kCoreReg); 247 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 248 OpKind op = kOpInvalid; 249 switch (opcode) { 250 case Instruction::INT_TO_BYTE: 251 op = kOp2Byte; 252 break; 253 case Instruction::INT_TO_SHORT: 254 op = kOp2Short; 255 break; 256 case Instruction::INT_TO_CHAR: 257 op = kOp2Char; 258 break; 259 default: 260 LOG(ERROR) << "Bad int conversion type"; 261 } 262 OpRegReg(op, rl_result.reg, rl_src.reg); 263 StoreValue(rl_dest, rl_result); 264} 265 266/* 267 * Let helper function take care of everything. Will call 268 * Array::AllocFromCode(type_idx, method, count); 269 * Note: AllocFromCode will handle checks for errNegativeArraySize. 270 */ 271void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 272 RegLocation rl_src) { 273 FlushAllRegs(); /* Everything to home location */ 274 ThreadOffset<4> func_offset(-1); 275 const DexFile* dex_file = cu_->dex_file; 276 CompilerDriver* driver = cu_->compiler_driver; 277 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, 278 type_idx)) { 279 bool is_type_initialized; // Ignored as an array does not have an initializer. 280 bool use_direct_type_ptr; 281 uintptr_t direct_type_ptr; 282 if (kEmbedClassInCode && 283 driver->CanEmbedTypeInCode(*dex_file, type_idx, 284 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 285 // The fast path. 286 if (!use_direct_type_ptr) { 287 LoadClassType(type_idx, kArg0); 288 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 289 CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true); 290 } else { 291 // Use the direct pointer. 292 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 293 CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true); 294 } 295 } else { 296 // The slow path. 297 DCHECK_EQ(func_offset.Int32Value(), -1); 298 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArray); 299 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 300 } 301 DCHECK_NE(func_offset.Int32Value(), -1); 302 } else { 303 func_offset= QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayWithAccessCheck); 304 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 305 } 306 RegLocation rl_result = GetReturn(false); 307 StoreValue(rl_dest, rl_result); 308} 309 310/* 311 * Similar to GenNewArray, but with post-allocation initialization. 312 * Verifier guarantees we're dealing with an array class. Current 313 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 314 * Current code also throws internal unimp if not 'L', '[' or 'I'. 315 */ 316void Mir2Lir::GenFilledNewArray(CallInfo* info) { 317 int elems = info->num_arg_words; 318 int type_idx = info->index; 319 FlushAllRegs(); /* Everything to home location */ 320 ThreadOffset<4> func_offset(-1); 321 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 322 type_idx)) { 323 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArray); 324 } else { 325 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArrayWithAccessCheck); 326 } 327 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 328 FreeTemp(TargetReg(kArg2)); 329 FreeTemp(TargetReg(kArg1)); 330 /* 331 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 332 * return region. Because AllocFromCode placed the new array 333 * in kRet0, we'll just lock it into place. When debugger support is 334 * added, it may be necessary to additionally copy all return 335 * values to a home location in thread-local storage 336 */ 337 LockTemp(TargetReg(kRet0)); 338 339 // TODO: use the correct component size, currently all supported types 340 // share array alignment with ints (see comment at head of function) 341 size_t component_size = sizeof(int32_t); 342 343 // Having a range of 0 is legal 344 if (info->is_range && (elems > 0)) { 345 /* 346 * Bit of ugliness here. We're going generate a mem copy loop 347 * on the register range, but it is possible that some regs 348 * in the range have been promoted. This is unlikely, but 349 * before generating the copy, we'll just force a flush 350 * of any regs in the source range that have been promoted to 351 * home location. 352 */ 353 for (int i = 0; i < elems; i++) { 354 RegLocation loc = UpdateLoc(info->args[i]); 355 if (loc.location == kLocPhysReg) { 356 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord); 357 } 358 } 359 /* 360 * TUNING note: generated code here could be much improved, but 361 * this is an uncommon operation and isn't especially performance 362 * critical. 363 */ 364 RegStorage r_src = AllocTemp(); 365 RegStorage r_dst = AllocTemp(); 366 RegStorage r_idx = AllocTemp(); 367 RegStorage r_val; 368 switch (cu_->instruction_set) { 369 case kThumb2: 370 r_val = TargetReg(kLr); 371 break; 372 case kX86: 373 case kX86_64: 374 FreeTemp(TargetReg(kRet0)); 375 r_val = AllocTemp(); 376 break; 377 case kMips: 378 r_val = AllocTemp(); 379 break; 380 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 381 } 382 // Set up source pointer 383 RegLocation rl_first = info->args[0]; 384 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 385 // Set up the target pointer 386 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 387 mirror::Array::DataOffset(component_size).Int32Value()); 388 // Set up the loop counter (known to be > 0) 389 LoadConstant(r_idx, elems - 1); 390 // Generate the copy loop. Going backwards for convenience 391 LIR* target = NewLIR0(kPseudoTargetLabel); 392 // Copy next element 393 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord); 394 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); 395 FreeTemp(r_val); 396 OpDecAndBranch(kCondGe, r_idx, target); 397 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 398 // Restore the target pointer 399 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 400 -mirror::Array::DataOffset(component_size).Int32Value()); 401 } 402 } else if (!info->is_range) { 403 // TUNING: interleave 404 for (int i = 0; i < elems; i++) { 405 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 406 StoreBaseDisp(TargetReg(kRet0), 407 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, 408 rl_arg.reg, kWord); 409 // If the LoadValue caused a temp to be allocated, free it 410 if (IsTemp(rl_arg.reg)) { 411 FreeTemp(rl_arg.reg); 412 } 413 } 414 } 415 if (info->result.location != kLocInvalid) { 416 StoreValue(info->result, GetReturn(false /* not fp */)); 417 } 418} 419 420// 421// Slow path to ensure a class is initialized for sget/sput. 422// 423class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 424 public: 425 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 426 RegStorage r_base) : 427 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 428 storage_index_(storage_index), r_base_(r_base) { 429 } 430 431 void Compile() { 432 LIR* unresolved_target = GenerateTargetLabel(); 433 uninit_->target = unresolved_target; 434 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 435 storage_index_, true); 436 // Copy helper's result into r_base, a no-op on all but MIPS. 437 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 438 439 m2l_->OpUnconditionalBranch(cont_); 440 } 441 442 private: 443 LIR* const uninit_; 444 const int storage_index_; 445 const RegStorage r_base_; 446}; 447 448void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 449 bool is_object) { 450 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 451 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 452 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 453 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 454 RegStorage r_base; 455 if (field_info.IsReferrersClass()) { 456 // Fast path, static storage base is this method's class 457 RegLocation rl_method = LoadCurrMethod(); 458 r_base = AllocTemp(); 459 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 460 if (IsTemp(rl_method.reg)) { 461 FreeTemp(rl_method.reg); 462 } 463 } else { 464 // Medium path, static storage base in a different class which requires checks that the other 465 // class is initialized. 466 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 467 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 468 // May do runtime call so everything to home locations. 469 FlushAllRegs(); 470 // Using fixed register to sync with possible call to runtime support. 471 RegStorage r_method = TargetReg(kArg1); 472 LockTemp(r_method); 473 LoadCurrMethodDirect(r_method); 474 r_base = TargetReg(kArg0); 475 LockTemp(r_base); 476 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 477 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 478 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 479 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 480 if (!field_info.IsInitialized() && 481 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 482 // Check if r_base is NULL or a not yet initialized class. 483 484 // The slow path is invoked if the r_base is NULL or the class pointed 485 // to by it is not initialized. 486 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 487 RegStorage r_tmp = TargetReg(kArg2); 488 LockTemp(r_tmp); 489 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 490 mirror::Class::StatusOffset().Int32Value(), 491 mirror::Class::kStatusInitialized, NULL); 492 LIR* cont = NewLIR0(kPseudoTargetLabel); 493 494 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 495 field_info.StorageIndex(), r_base)); 496 497 FreeTemp(r_tmp); 498 } 499 FreeTemp(r_method); 500 } 501 // rBase now holds static storage base 502 if (is_long_or_double) { 503 rl_src = LoadValueWide(rl_src, kAnyReg); 504 } else { 505 rl_src = LoadValue(rl_src, kAnyReg); 506 } 507 if (field_info.IsVolatile()) { 508 // There might have been a store before this volatile one so insert StoreStore barrier. 509 GenMemBarrier(kStoreStore); 510 } 511 if (is_long_or_double) { 512 StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 513 } else { 514 StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 515 } 516 if (field_info.IsVolatile()) { 517 // A load might follow the volatile store so insert a StoreLoad barrier. 518 GenMemBarrier(kStoreLoad); 519 } 520 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 521 MarkGCCard(rl_src.reg, r_base); 522 } 523 FreeTemp(r_base); 524 } else { 525 FlushAllRegs(); // Everything to home locations 526 ThreadOffset<4> setter_offset = 527 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Static) 528 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjStatic) 529 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Static)); 530 CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true); 531 } 532} 533 534void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 535 bool is_long_or_double, bool is_object) { 536 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 537 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 538 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 539 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 540 RegStorage r_base; 541 if (field_info.IsReferrersClass()) { 542 // Fast path, static storage base is this method's class 543 RegLocation rl_method = LoadCurrMethod(); 544 r_base = AllocTemp(); 545 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 546 } else { 547 // Medium path, static storage base in a different class which requires checks that the other 548 // class is initialized 549 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 550 // May do runtime call so everything to home locations. 551 FlushAllRegs(); 552 // Using fixed register to sync with possible call to runtime support. 553 RegStorage r_method = TargetReg(kArg1); 554 LockTemp(r_method); 555 LoadCurrMethodDirect(r_method); 556 r_base = TargetReg(kArg0); 557 LockTemp(r_base); 558 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 559 LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 560 sizeof(int32_t*) * field_info.StorageIndex(), r_base); 561 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 562 if (!field_info.IsInitialized() && 563 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 564 // Check if r_base is NULL or a not yet initialized class. 565 566 // The slow path is invoked if the r_base is NULL or the class pointed 567 // to by it is not initialized. 568 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 569 RegStorage r_tmp = TargetReg(kArg2); 570 LockTemp(r_tmp); 571 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 572 mirror::Class::StatusOffset().Int32Value(), 573 mirror::Class::kStatusInitialized, NULL); 574 LIR* cont = NewLIR0(kPseudoTargetLabel); 575 576 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 577 field_info.StorageIndex(), r_base)); 578 579 FreeTemp(r_tmp); 580 } 581 FreeTemp(r_method); 582 } 583 // r_base now holds static storage base 584 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 585 586 if (is_long_or_double) { 587 LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG); 588 } else { 589 LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); 590 } 591 FreeTemp(r_base); 592 593 if (field_info.IsVolatile()) { 594 // Without context sensitive analysis, we must issue the most conservative barriers. 595 // In this case, either a load or store may follow so we issue both barriers. 596 GenMemBarrier(kLoadLoad); 597 GenMemBarrier(kLoadStore); 598 } 599 600 if (is_long_or_double) { 601 StoreValueWide(rl_dest, rl_result); 602 } else { 603 StoreValue(rl_dest, rl_result); 604 } 605 } else { 606 FlushAllRegs(); // Everything to home locations 607 ThreadOffset<4> getterOffset = 608 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Static) 609 :(is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjStatic) 610 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Static)); 611 CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true); 612 if (is_long_or_double) { 613 RegLocation rl_result = GetReturnWide(rl_dest.fp); 614 StoreValueWide(rl_dest, rl_result); 615 } else { 616 RegLocation rl_result = GetReturn(rl_dest.fp); 617 StoreValue(rl_dest, rl_result); 618 } 619 } 620} 621 622// Generate code for all slow paths. 623void Mir2Lir::HandleSlowPaths() { 624 int n = slow_paths_.Size(); 625 for (int i = 0; i < n; ++i) { 626 LIRSlowPath* slowpath = slow_paths_.Get(i); 627 slowpath->Compile(); 628 } 629 slow_paths_.Reset(); 630} 631 632void Mir2Lir::HandleSuspendLaunchPads() { 633 int num_elems = suspend_launchpads_.Size(); 634 ThreadOffset<4> helper_offset = QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend); 635 for (int i = 0; i < num_elems; i++) { 636 ResetRegPool(); 637 ResetDefTracking(); 638 LIR* lab = suspend_launchpads_.Get(i); 639 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0])); 640 current_dalvik_offset_ = lab->operands[1]; 641 AppendLIR(lab); 642 RegStorage r_tgt = CallHelperSetup(helper_offset); 643 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */); 644 OpUnconditionalBranch(resume_lab); 645 } 646} 647 648void Mir2Lir::HandleThrowLaunchPads() { 649 int num_elems = throw_launchpads_.Size(); 650 for (int i = 0; i < num_elems; i++) { 651 ResetRegPool(); 652 ResetDefTracking(); 653 LIR* lab = throw_launchpads_.Get(i); 654 current_dalvik_offset_ = lab->operands[1]; 655 AppendLIR(lab); 656 ThreadOffset<4> func_offset(-1); 657 int v1 = lab->operands[2]; 658 int v2 = lab->operands[3]; 659 const bool target_x86 = cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64; 660 switch (lab->operands[0]) { 661 case kThrowNullPointer: 662 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer); 663 break; 664 case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index 665 // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. 666 if (target_x86) { 667 OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v1), 668 mirror::Array::LengthOffset().Int32Value()); 669 } else { 670 OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v1)); 671 } 672 // Make sure the following LoadConstant doesn't mess with kArg1. 673 LockTemp(TargetReg(kArg1)); 674 LoadConstant(TargetReg(kArg0), v2); 675 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds); 676 break; 677 case kThrowArrayBounds: 678 // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 679 if (v2 != TargetReg(kArg0).GetReg()) { 680 OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1)); 681 if (target_x86) { 682 // x86 leaves the array pointer in v2, so load the array length that the handler expects 683 OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2), 684 mirror::Array::LengthOffset().Int32Value()); 685 } else { 686 OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2)); 687 } 688 } else { 689 if (v1 == TargetReg(kArg1).GetReg()) { 690 // Swap v1 and v2, using kArg2 as a temp 691 OpRegCopy(TargetReg(kArg2), RegStorage::Solo32(v1)); 692 if (target_x86) { 693 // x86 leaves the array pointer in v2; load the array length that the handler expects 694 OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2), 695 mirror::Array::LengthOffset().Int32Value()); 696 } else { 697 OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2)); 698 } 699 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); 700 } else { 701 if (target_x86) { 702 // x86 leaves the array pointer in v2; load the array length that the handler expects 703 OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2), 704 mirror::Array::LengthOffset().Int32Value()); 705 } else { 706 OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2)); 707 } 708 OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1)); 709 } 710 } 711 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds); 712 break; 713 case kThrowNoSuchMethod: 714 OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1)); 715 func_offset = 716 QUICK_ENTRYPOINT_OFFSET(4, pThrowNoSuchMethod); 717 break; 718 default: 719 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0]; 720 } 721 ClobberCallerSave(); 722 RegStorage r_tgt = CallHelperSetup(func_offset); 723 CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */); 724 } 725} 726 727void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 728 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 729 bool is_object) { 730 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 731 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 732 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 733 RegLocation rl_result; 734 RegisterClass reg_class = oat_reg_class_by_size(size); 735 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 736 rl_obj = LoadValue(rl_obj, kCoreReg); 737 if (is_long_or_double) { 738 DCHECK(rl_dest.wide); 739 GenNullCheck(rl_obj.reg, opt_flags); 740 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 741 rl_result = EvalLoc(rl_dest, reg_class, true); 742 // FIXME? duplicate null check? 743 GenNullCheck(rl_obj.reg, opt_flags); 744 LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, 745 rl_obj.s_reg_low); 746 MarkPossibleNullPointerException(opt_flags); 747 if (field_info.IsVolatile()) { 748 // Without context sensitive analysis, we must issue the most conservative barriers. 749 // In this case, either a load or store may follow so we issue both barriers. 750 GenMemBarrier(kLoadLoad); 751 GenMemBarrier(kLoadStore); 752 } 753 } else { 754 RegStorage reg_ptr = AllocTemp(); 755 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 756 rl_result = EvalLoc(rl_dest, reg_class, true); 757 LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG); 758 MarkPossibleNullPointerException(opt_flags); 759 if (field_info.IsVolatile()) { 760 // Without context sensitive analysis, we must issue the most conservative barriers. 761 // In this case, either a load or store may follow so we issue both barriers. 762 GenMemBarrier(kLoadLoad); 763 GenMemBarrier(kLoadStore); 764 } 765 FreeTemp(reg_ptr); 766 } 767 StoreValueWide(rl_dest, rl_result); 768 } else { 769 rl_result = EvalLoc(rl_dest, reg_class, true); 770 GenNullCheck(rl_obj.reg, opt_flags); 771 LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord, 772 rl_obj.s_reg_low); 773 MarkPossibleNullPointerException(opt_flags); 774 if (field_info.IsVolatile()) { 775 // Without context sensitive analysis, we must issue the most conservative barriers. 776 // In this case, either a load or store may follow so we issue both barriers. 777 GenMemBarrier(kLoadLoad); 778 GenMemBarrier(kLoadStore); 779 } 780 StoreValue(rl_dest, rl_result); 781 } 782 } else { 783 ThreadOffset<4> getterOffset = 784 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Instance) 785 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjInstance) 786 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Instance)); 787 CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true); 788 if (is_long_or_double) { 789 RegLocation rl_result = GetReturnWide(rl_dest.fp); 790 StoreValueWide(rl_dest, rl_result); 791 } else { 792 RegLocation rl_result = GetReturn(rl_dest.fp); 793 StoreValue(rl_dest, rl_result); 794 } 795 } 796} 797 798void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 799 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 800 bool is_object) { 801 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 802 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 803 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 804 RegisterClass reg_class = oat_reg_class_by_size(size); 805 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 806 rl_obj = LoadValue(rl_obj, kCoreReg); 807 if (is_long_or_double) { 808 rl_src = LoadValueWide(rl_src, kAnyReg); 809 GenNullCheck(rl_obj.reg, opt_flags); 810 RegStorage reg_ptr = AllocTemp(); 811 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 812 if (field_info.IsVolatile()) { 813 // There might have been a store before this volatile one so insert StoreStore barrier. 814 GenMemBarrier(kStoreStore); 815 } 816 StoreBaseDispWide(reg_ptr, 0, rl_src.reg); 817 MarkPossibleNullPointerException(opt_flags); 818 if (field_info.IsVolatile()) { 819 // A load might follow the volatile store so insert a StoreLoad barrier. 820 GenMemBarrier(kStoreLoad); 821 } 822 FreeTemp(reg_ptr); 823 } else { 824 rl_src = LoadValue(rl_src, reg_class); 825 GenNullCheck(rl_obj.reg, opt_flags); 826 if (field_info.IsVolatile()) { 827 // There might have been a store before this volatile one so insert StoreStore barrier. 828 GenMemBarrier(kStoreStore); 829 } 830 StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord); 831 MarkPossibleNullPointerException(opt_flags); 832 if (field_info.IsVolatile()) { 833 // A load might follow the volatile store so insert a StoreLoad barrier. 834 GenMemBarrier(kStoreLoad); 835 } 836 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 837 MarkGCCard(rl_src.reg, rl_obj.reg); 838 } 839 } 840 } else { 841 ThreadOffset<4> setter_offset = 842 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Instance) 843 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjInstance) 844 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Instance)); 845 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(), 846 rl_obj, rl_src, true); 847 } 848} 849 850void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 851 RegLocation rl_src) { 852 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 853 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 854 (opt_flags & MIR_IGNORE_NULL_CHECK)); 855 ThreadOffset<4> helper = needs_range_check 856 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithNullAndBoundCheck) 857 : QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithBoundCheck)) 858 : QUICK_ENTRYPOINT_OFFSET(4, pAputObject); 859 CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true); 860} 861 862void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 863 RegLocation rl_method = LoadCurrMethod(); 864 RegStorage res_reg = AllocTemp(); 865 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 866 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 867 *cu_->dex_file, 868 type_idx)) { 869 // Call out to helper which resolves type and verifies access. 870 // Resolved type returned in kRet0. 871 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 872 type_idx, rl_method.reg, true); 873 RegLocation rl_result = GetReturn(false); 874 StoreValue(rl_dest, rl_result); 875 } else { 876 // We're don't need access checks, load type from dex cache 877 int32_t dex_cache_offset = 878 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 879 LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg); 880 int32_t offset_of_type = 881 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 882 * type_idx); 883 LoadWordDisp(res_reg, offset_of_type, rl_result.reg); 884 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 885 type_idx) || SLOW_TYPE_PATH) { 886 // Slow path, at runtime test if type is null and if so initialize 887 FlushAllRegs(); 888 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 889 LIR* cont = NewLIR0(kPseudoTargetLabel); 890 891 // Object to generate the slow path for class resolution. 892 class SlowPath : public LIRSlowPath { 893 public: 894 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 895 const RegLocation& rl_method, const RegLocation& rl_result) : 896 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 897 rl_method_(rl_method), rl_result_(rl_result) { 898 } 899 900 void Compile() { 901 GenerateTargetLabel(); 902 903 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 904 rl_method_.reg, true); 905 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 906 907 m2l_->OpUnconditionalBranch(cont_); 908 } 909 910 private: 911 const int type_idx_; 912 const RegLocation rl_method_; 913 const RegLocation rl_result_; 914 }; 915 916 // Add to list for future. 917 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 918 919 StoreValue(rl_dest, rl_result); 920 } else { 921 // Fast path, we're done - just store result 922 StoreValue(rl_dest, rl_result); 923 } 924 } 925} 926 927void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 928 /* NOTE: Most strings should be available at compile time */ 929 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() + 930 (sizeof(mirror::String*) * string_idx); 931 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 932 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 933 // slow path, resolve string if not in dex cache 934 FlushAllRegs(); 935 LockCallTemps(); // Using explicit registers 936 937 // If the Method* is already in a register, we can save a copy. 938 RegLocation rl_method = mir_graph_->GetMethodLoc(); 939 RegStorage r_method; 940 if (rl_method.location == kLocPhysReg) { 941 // A temp would conflict with register use below. 942 DCHECK(!IsTemp(rl_method.reg)); 943 r_method = rl_method.reg; 944 } else { 945 r_method = TargetReg(kArg2); 946 LoadCurrMethodDirect(r_method); 947 } 948 LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 949 TargetReg(kArg0)); 950 951 // Might call out to helper, which will return resolved string in kRet0 952 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 953 if (cu_->instruction_set == kThumb2 || 954 cu_->instruction_set == kMips) { 955 // OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? 956 LoadConstant(TargetReg(kArg1), string_idx); 957 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 958 LIR* cont = NewLIR0(kPseudoTargetLabel); 959 GenBarrier(); 960 961 // Object to generate the slow path for string resolution. 962 class SlowPath : public LIRSlowPath { 963 public: 964 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) : 965 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) { 966 } 967 968 void Compile() { 969 GenerateTargetLabel(); 970 971 RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pResolveString)); 972 973 m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq 974 LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt); 975 m2l_->MarkSafepointPC(call_inst); 976 m2l_->FreeTemp(r_tgt); 977 978 m2l_->OpUnconditionalBranch(cont_); 979 } 980 981 private: 982 RegStorage r_method_; 983 }; 984 985 // Add to list for future. 986 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method)); 987 } else { 988 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 989 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); 990 LoadConstant(TargetReg(kArg1), string_idx); 991 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), r_method, TargetReg(kArg1), 992 true); 993 LIR* target = NewLIR0(kPseudoTargetLabel); 994 branch->target = target; 995 } 996 GenBarrier(); 997 StoreValue(rl_dest, GetReturn(false)); 998 } else { 999 RegLocation rl_method = LoadCurrMethod(); 1000 RegStorage res_reg = AllocTemp(); 1001 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1002 LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 1003 LoadWordDisp(res_reg, offset_of_string, rl_result.reg); 1004 StoreValue(rl_dest, rl_result); 1005 } 1006} 1007 1008/* 1009 * Let helper function take care of everything. Will 1010 * call Class::NewInstanceFromCode(type_idx, method); 1011 */ 1012void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1013 FlushAllRegs(); /* Everything to home location */ 1014 // alloc will always check for resolution, do we also need to verify 1015 // access because the verifier was unable to? 1016 ThreadOffset<4> func_offset(-1); 1017 const DexFile* dex_file = cu_->dex_file; 1018 CompilerDriver* driver = cu_->compiler_driver; 1019 if (driver->CanAccessInstantiableTypeWithoutChecks( 1020 cu_->method_idx, *dex_file, type_idx)) { 1021 bool is_type_initialized; 1022 bool use_direct_type_ptr; 1023 uintptr_t direct_type_ptr; 1024 if (kEmbedClassInCode && 1025 driver->CanEmbedTypeInCode(*dex_file, type_idx, 1026 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 1027 // The fast path. 1028 if (!use_direct_type_ptr) { 1029 LoadClassType(type_idx, kArg0); 1030 if (!is_type_initialized) { 1031 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1032 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1033 } else { 1034 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1035 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1036 } 1037 } else { 1038 // Use the direct pointer. 1039 if (!is_type_initialized) { 1040 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1041 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1042 } else { 1043 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1044 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1045 } 1046 } 1047 } else { 1048 // The slow path. 1049 DCHECK_EQ(func_offset.Int32Value(), -1); 1050 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObject); 1051 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1052 } 1053 DCHECK_NE(func_offset.Int32Value(), -1); 1054 } else { 1055 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectWithAccessCheck); 1056 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1057 } 1058 RegLocation rl_result = GetReturn(false); 1059 StoreValue(rl_dest, rl_result); 1060} 1061 1062void Mir2Lir::GenThrow(RegLocation rl_src) { 1063 FlushAllRegs(); 1064 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1065} 1066 1067// For final classes there are no sub-classes to check and so we can answer the instance-of 1068// question with simple comparisons. 1069void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1070 RegLocation rl_src) { 1071 // X86 has its own implementation. 1072 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1073 1074 RegLocation object = LoadValue(rl_src, kCoreReg); 1075 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1076 RegStorage result_reg = rl_result.reg; 1077 if (result_reg == object.reg) { 1078 result_reg = AllocTypedTemp(false, kCoreReg); 1079 } 1080 LoadConstant(result_reg, 0); // assume false 1081 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1082 1083 RegStorage check_class = AllocTypedTemp(false, kCoreReg); 1084 RegStorage object_class = AllocTypedTemp(false, kCoreReg); 1085 1086 LoadCurrMethodDirect(check_class); 1087 if (use_declaring_class) { 1088 LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); 1089 LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1090 } else { 1091 LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1092 check_class); 1093 LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1094 int32_t offset_of_type = 1095 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1096 (sizeof(mirror::Class*) * type_idx); 1097 LoadWordDisp(check_class, offset_of_type, check_class); 1098 } 1099 1100 LIR* ne_branchover = NULL; 1101 if (cu_->instruction_set == kThumb2) { 1102 OpRegReg(kOpCmp, check_class, object_class); // Same? 1103 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1104 LoadConstant(result_reg, 1); // .eq case - load true 1105 OpEndIT(it); 1106 } else { 1107 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1108 LoadConstant(result_reg, 1); // eq case - load true 1109 } 1110 LIR* target = NewLIR0(kPseudoTargetLabel); 1111 null_branchover->target = target; 1112 if (ne_branchover != NULL) { 1113 ne_branchover->target = target; 1114 } 1115 FreeTemp(object_class); 1116 FreeTemp(check_class); 1117 if (IsTemp(result_reg)) { 1118 OpRegCopy(rl_result.reg, result_reg); 1119 FreeTemp(result_reg); 1120 } 1121 StoreValue(rl_dest, rl_result); 1122} 1123 1124void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1125 bool type_known_abstract, bool use_declaring_class, 1126 bool can_assume_type_is_in_dex_cache, 1127 uint32_t type_idx, RegLocation rl_dest, 1128 RegLocation rl_src) { 1129 // X86 has its own implementation. 1130 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1131 1132 FlushAllRegs(); 1133 // May generate a call - use explicit registers 1134 LockCallTemps(); 1135 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1136 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1137 if (needs_access_check) { 1138 // Check we have access to type_idx and if not throw IllegalAccessError, 1139 // returns Class* in kArg0 1140 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1141 type_idx, true); 1142 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1143 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1144 } else if (use_declaring_class) { 1145 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1146 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1147 class_reg); 1148 } else { 1149 // Load dex cache entry into class_reg (kArg2) 1150 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1151 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1152 class_reg); 1153 int32_t offset_of_type = 1154 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) 1155 * type_idx); 1156 LoadWordDisp(class_reg, offset_of_type, class_reg); 1157 if (!can_assume_type_is_in_dex_cache) { 1158 // Need to test presence of type in dex cache at runtime 1159 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1160 // Not resolved 1161 // Call out to helper, which will return resolved type in kRet0 1162 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1163 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1164 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1165 // Rejoin code paths 1166 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1167 hop_branch->target = hop_target; 1168 } 1169 } 1170 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1171 RegLocation rl_result = GetReturn(false); 1172 if (cu_->instruction_set == kMips) { 1173 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1174 LoadConstant(rl_result.reg, 0); 1175 } 1176 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1177 1178 /* load object->klass_ */ 1179 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1180 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1181 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1182 LIR* branchover = NULL; 1183 if (type_known_final) { 1184 // rl_result == ref == null == 0. 1185 if (cu_->instruction_set == kThumb2) { 1186 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1187 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1188 LoadConstant(rl_result.reg, 1); // .eq case - load true 1189 LoadConstant(rl_result.reg, 0); // .ne case - load false 1190 OpEndIT(it); 1191 } else { 1192 LoadConstant(rl_result.reg, 0); // ne case - load false 1193 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1194 LoadConstant(rl_result.reg, 1); // eq case - load true 1195 } 1196 } else { 1197 if (cu_->instruction_set == kThumb2) { 1198 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1199 LIR* it = nullptr; 1200 if (!type_known_abstract) { 1201 /* Uses conditional nullification */ 1202 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1203 it = OpIT(kCondEq, "EE"); // if-convert the test 1204 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1205 } 1206 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1207 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1208 if (it != nullptr) { 1209 OpEndIT(it); 1210 } 1211 FreeTemp(r_tgt); 1212 } else { 1213 if (!type_known_abstract) { 1214 /* Uses branchovers */ 1215 LoadConstant(rl_result.reg, 1); // assume true 1216 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1217 } 1218 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1219 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1220 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1221 FreeTemp(r_tgt); 1222 } 1223 } 1224 // TODO: only clobber when type isn't final? 1225 ClobberCallerSave(); 1226 /* branch targets here */ 1227 LIR* target = NewLIR0(kPseudoTargetLabel); 1228 StoreValue(rl_dest, rl_result); 1229 branch1->target = target; 1230 if (branchover != NULL) { 1231 branchover->target = target; 1232 } 1233} 1234 1235void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1236 bool type_known_final, type_known_abstract, use_declaring_class; 1237 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1238 *cu_->dex_file, 1239 type_idx, 1240 &type_known_final, 1241 &type_known_abstract, 1242 &use_declaring_class); 1243 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1244 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1245 1246 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1247 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1248 } else { 1249 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1250 use_declaring_class, can_assume_type_is_in_dex_cache, 1251 type_idx, rl_dest, rl_src); 1252 } 1253} 1254 1255void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1256 bool type_known_final, type_known_abstract, use_declaring_class; 1257 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1258 *cu_->dex_file, 1259 type_idx, 1260 &type_known_final, 1261 &type_known_abstract, 1262 &use_declaring_class); 1263 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1264 // of the exception throw path. 1265 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1266 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1267 // Verifier type analysis proved this check cast would never cause an exception. 1268 return; 1269 } 1270 FlushAllRegs(); 1271 // May generate a call - use explicit registers 1272 LockCallTemps(); 1273 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1274 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1275 if (needs_access_check) { 1276 // Check we have access to type_idx and if not throw IllegalAccessError, 1277 // returns Class* in kRet0 1278 // InitializeTypeAndVerifyAccess(idx, method) 1279 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1280 type_idx, TargetReg(kArg1), true); 1281 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1282 } else if (use_declaring_class) { 1283 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1284 class_reg); 1285 } else { 1286 // Load dex cache entry into class_reg (kArg2) 1287 LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1288 class_reg); 1289 int32_t offset_of_type = 1290 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + 1291 (sizeof(mirror::Class*) * type_idx); 1292 LoadWordDisp(class_reg, offset_of_type, class_reg); 1293 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1294 // Need to test presence of type in dex cache at runtime 1295 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1296 LIR* cont = NewLIR0(kPseudoTargetLabel); 1297 1298 // Slow path to initialize the type. Executed if the type is NULL. 1299 class SlowPath : public LIRSlowPath { 1300 public: 1301 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1302 const RegStorage class_reg) : 1303 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1304 class_reg_(class_reg) { 1305 } 1306 1307 void Compile() { 1308 GenerateTargetLabel(); 1309 1310 // Call out to helper, which will return resolved type in kArg0 1311 // InitializeTypeFromCode(idx, method) 1312 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1313 m2l_->TargetReg(kArg1), true); 1314 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1315 m2l_->OpUnconditionalBranch(cont_); 1316 } 1317 public: 1318 const int type_idx_; 1319 const RegStorage class_reg_; 1320 }; 1321 1322 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1323 } 1324 } 1325 // At this point, class_reg (kArg2) has class 1326 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1327 1328 // Slow path for the case where the classes are not equal. In this case we need 1329 // to call a helper function to do the check. 1330 class SlowPath : public LIRSlowPath { 1331 public: 1332 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1333 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1334 } 1335 1336 void Compile() { 1337 GenerateTargetLabel(); 1338 1339 if (load_) { 1340 m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1341 m2l_->TargetReg(kArg1)); 1342 } 1343 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1344 m2l_->TargetReg(kArg1), true); 1345 1346 m2l_->OpUnconditionalBranch(cont_); 1347 } 1348 1349 private: 1350 bool load_; 1351 }; 1352 1353 if (type_known_abstract) { 1354 // Easier case, run slow path if target is non-null (slow path will load from target) 1355 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1356 LIR* cont = NewLIR0(kPseudoTargetLabel); 1357 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1358 } else { 1359 // Harder, more common case. We need to generate a forward branch over the load 1360 // if the target is null. If it's non-null we perform the load and branch to the 1361 // slow path if the classes are not equal. 1362 1363 /* Null is OK - continue */ 1364 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1365 /* load object->klass_ */ 1366 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1367 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1368 1369 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1370 LIR* cont = NewLIR0(kPseudoTargetLabel); 1371 1372 // Add the slow path that will not perform load since this is already done. 1373 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1374 1375 // Set the null check to branch to the continuation. 1376 branch1->target = cont; 1377 } 1378} 1379 1380void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1381 RegLocation rl_src1, RegLocation rl_src2) { 1382 RegLocation rl_result; 1383 if (cu_->instruction_set == kThumb2) { 1384 /* 1385 * NOTE: This is the one place in the code in which we might have 1386 * as many as six live temporary registers. There are 5 in the normal 1387 * set for Arm. Until we have spill capabilities, temporarily add 1388 * lr to the temp set. It is safe to do this locally, but note that 1389 * lr is used explicitly elsewhere in the code generator and cannot 1390 * normally be used as a general temp register. 1391 */ 1392 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1393 FreeTemp(TargetReg(kLr)); // and make it available 1394 } 1395 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1396 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1397 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1398 // The longs may overlap - use intermediate temp if so 1399 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1400 RegStorage t_reg = AllocTemp(); 1401 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1402 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1403 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1404 FreeTemp(t_reg); 1405 } else { 1406 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1407 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1408 } 1409 /* 1410 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1411 * following StoreValueWide might need to allocate a temp register. 1412 * To further work around the lack of a spill capability, explicitly 1413 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1414 * Remove when spill is functional. 1415 */ 1416 FreeRegLocTemps(rl_result, rl_src1); 1417 FreeRegLocTemps(rl_result, rl_src2); 1418 StoreValueWide(rl_dest, rl_result); 1419 if (cu_->instruction_set == kThumb2) { 1420 Clobber(TargetReg(kLr)); 1421 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1422 } 1423} 1424 1425 1426void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1427 RegLocation rl_src1, RegLocation rl_shift) { 1428 ThreadOffset<4> func_offset(-1); 1429 1430 switch (opcode) { 1431 case Instruction::SHL_LONG: 1432 case Instruction::SHL_LONG_2ADDR: 1433 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShlLong); 1434 break; 1435 case Instruction::SHR_LONG: 1436 case Instruction::SHR_LONG_2ADDR: 1437 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShrLong); 1438 break; 1439 case Instruction::USHR_LONG: 1440 case Instruction::USHR_LONG_2ADDR: 1441 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pUshrLong); 1442 break; 1443 default: 1444 LOG(FATAL) << "Unexpected case"; 1445 } 1446 FlushAllRegs(); /* Send everything to home location */ 1447 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1448 RegLocation rl_result = GetReturnWide(false); 1449 StoreValueWide(rl_dest, rl_result); 1450} 1451 1452 1453void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1454 RegLocation rl_src1, RegLocation rl_src2) { 1455 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1456 OpKind op = kOpBkpt; 1457 bool is_div_rem = false; 1458 bool check_zero = false; 1459 bool unary = false; 1460 RegLocation rl_result; 1461 bool shift_op = false; 1462 switch (opcode) { 1463 case Instruction::NEG_INT: 1464 op = kOpNeg; 1465 unary = true; 1466 break; 1467 case Instruction::NOT_INT: 1468 op = kOpMvn; 1469 unary = true; 1470 break; 1471 case Instruction::ADD_INT: 1472 case Instruction::ADD_INT_2ADDR: 1473 op = kOpAdd; 1474 break; 1475 case Instruction::SUB_INT: 1476 case Instruction::SUB_INT_2ADDR: 1477 op = kOpSub; 1478 break; 1479 case Instruction::MUL_INT: 1480 case Instruction::MUL_INT_2ADDR: 1481 op = kOpMul; 1482 break; 1483 case Instruction::DIV_INT: 1484 case Instruction::DIV_INT_2ADDR: 1485 check_zero = true; 1486 op = kOpDiv; 1487 is_div_rem = true; 1488 break; 1489 /* NOTE: returns in kArg1 */ 1490 case Instruction::REM_INT: 1491 case Instruction::REM_INT_2ADDR: 1492 check_zero = true; 1493 op = kOpRem; 1494 is_div_rem = true; 1495 break; 1496 case Instruction::AND_INT: 1497 case Instruction::AND_INT_2ADDR: 1498 op = kOpAnd; 1499 break; 1500 case Instruction::OR_INT: 1501 case Instruction::OR_INT_2ADDR: 1502 op = kOpOr; 1503 break; 1504 case Instruction::XOR_INT: 1505 case Instruction::XOR_INT_2ADDR: 1506 op = kOpXor; 1507 break; 1508 case Instruction::SHL_INT: 1509 case Instruction::SHL_INT_2ADDR: 1510 shift_op = true; 1511 op = kOpLsl; 1512 break; 1513 case Instruction::SHR_INT: 1514 case Instruction::SHR_INT_2ADDR: 1515 shift_op = true; 1516 op = kOpAsr; 1517 break; 1518 case Instruction::USHR_INT: 1519 case Instruction::USHR_INT_2ADDR: 1520 shift_op = true; 1521 op = kOpLsr; 1522 break; 1523 default: 1524 LOG(FATAL) << "Invalid word arith op: " << opcode; 1525 } 1526 if (!is_div_rem) { 1527 if (unary) { 1528 rl_src1 = LoadValue(rl_src1, kCoreReg); 1529 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1530 OpRegReg(op, rl_result.reg, rl_src1.reg); 1531 } else { 1532 if (shift_op) { 1533 rl_src2 = LoadValue(rl_src2, kCoreReg); 1534 RegStorage t_reg = AllocTemp(); 1535 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1536 rl_src1 = LoadValue(rl_src1, kCoreReg); 1537 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1538 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1539 FreeTemp(t_reg); 1540 } else { 1541 rl_src1 = LoadValue(rl_src1, kCoreReg); 1542 rl_src2 = LoadValue(rl_src2, kCoreReg); 1543 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1544 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1545 } 1546 } 1547 StoreValue(rl_dest, rl_result); 1548 } else { 1549 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1550 if (cu_->instruction_set == kMips) { 1551 rl_src1 = LoadValue(rl_src1, kCoreReg); 1552 rl_src2 = LoadValue(rl_src2, kCoreReg); 1553 if (check_zero) { 1554 AddDivZeroSlowPath(kCondEq, rl_src2.reg, 0); 1555 } 1556 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1557 done = true; 1558 } else if (cu_->instruction_set == kThumb2) { 1559 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1560 // Use ARM SDIV instruction for division. For remainder we also need to 1561 // calculate using a MUL and subtract. 1562 rl_src1 = LoadValue(rl_src1, kCoreReg); 1563 rl_src2 = LoadValue(rl_src2, kCoreReg); 1564 if (check_zero) { 1565 AddDivZeroSlowPath(kCondEq, rl_src2.reg, 0); 1566 } 1567 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1568 done = true; 1569 } 1570 } 1571 1572 // If we haven't already generated the code use the callout function. 1573 if (!done) { 1574 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1575 FlushAllRegs(); /* Send everything to home location */ 1576 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1577 RegStorage r_tgt = CallHelperSetup(func_offset); 1578 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1579 if (check_zero) { 1580 AddDivZeroSlowPath(kCondEq, TargetReg(kArg1), 0); 1581 } 1582 // NOTE: callout here is not a safepoint. 1583 CallHelper(r_tgt, func_offset, false /* not a safepoint */); 1584 if (op == kOpDiv) 1585 rl_result = GetReturn(false); 1586 else 1587 rl_result = GetReturnAlt(); 1588 } 1589 StoreValue(rl_dest, rl_result); 1590 } 1591} 1592 1593/* 1594 * The following are the first-level codegen routines that analyze the format 1595 * of each bytecode then either dispatch special purpose codegen routines 1596 * or produce corresponding Thumb instructions directly. 1597 */ 1598 1599// Returns true if no more than two bits are set in 'x'. 1600static bool IsPopCountLE2(unsigned int x) { 1601 x &= x - 1; 1602 return (x & (x - 1)) == 0; 1603} 1604 1605// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1606// and store the result in 'rl_dest'. 1607bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1608 RegLocation rl_src, RegLocation rl_dest, int lit) { 1609 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1610 return false; 1611 } 1612 // No divide instruction for Arm, so check for more special cases 1613 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1614 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1615 } 1616 int k = LowestSetBit(lit); 1617 if (k >= 30) { 1618 // Avoid special cases. 1619 return false; 1620 } 1621 rl_src = LoadValue(rl_src, kCoreReg); 1622 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1623 if (is_div) { 1624 RegStorage t_reg = AllocTemp(); 1625 if (lit == 2) { 1626 // Division by 2 is by far the most common division by constant. 1627 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1628 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1629 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1630 } else { 1631 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1632 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1633 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1634 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1635 } 1636 } else { 1637 RegStorage t_reg1 = AllocTemp(); 1638 RegStorage t_reg2 = AllocTemp(); 1639 if (lit == 2) { 1640 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1641 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1642 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1643 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1644 } else { 1645 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1646 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1647 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1648 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1649 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1650 } 1651 } 1652 StoreValue(rl_dest, rl_result); 1653 return true; 1654} 1655 1656// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1657// and store the result in 'rl_dest'. 1658bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1659 if (lit < 0) { 1660 return false; 1661 } 1662 if (lit == 0) { 1663 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1664 LoadConstant(rl_result.reg, 0); 1665 StoreValue(rl_dest, rl_result); 1666 return true; 1667 } 1668 if (lit == 1) { 1669 rl_src = LoadValue(rl_src, kCoreReg); 1670 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1671 OpRegCopy(rl_result.reg, rl_src.reg); 1672 StoreValue(rl_dest, rl_result); 1673 return true; 1674 } 1675 // There is RegRegRegShift on Arm, so check for more special cases 1676 if (cu_->instruction_set == kThumb2) { 1677 return EasyMultiply(rl_src, rl_dest, lit); 1678 } 1679 // Can we simplify this multiplication? 1680 bool power_of_two = false; 1681 bool pop_count_le2 = false; 1682 bool power_of_two_minus_one = false; 1683 if (IsPowerOfTwo(lit)) { 1684 power_of_two = true; 1685 } else if (IsPopCountLE2(lit)) { 1686 pop_count_le2 = true; 1687 } else if (IsPowerOfTwo(lit + 1)) { 1688 power_of_two_minus_one = true; 1689 } else { 1690 return false; 1691 } 1692 rl_src = LoadValue(rl_src, kCoreReg); 1693 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1694 if (power_of_two) { 1695 // Shift. 1696 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1697 } else if (pop_count_le2) { 1698 // Shift and add and shift. 1699 int first_bit = LowestSetBit(lit); 1700 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1701 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1702 } else { 1703 // Reverse subtract: (src << (shift + 1)) - src. 1704 DCHECK(power_of_two_minus_one); 1705 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1706 RegStorage t_reg = AllocTemp(); 1707 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1708 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1709 } 1710 StoreValue(rl_dest, rl_result); 1711 return true; 1712} 1713 1714void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1715 int lit) { 1716 RegLocation rl_result; 1717 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1718 int shift_op = false; 1719 bool is_div = false; 1720 1721 switch (opcode) { 1722 case Instruction::RSUB_INT_LIT8: 1723 case Instruction::RSUB_INT: { 1724 rl_src = LoadValue(rl_src, kCoreReg); 1725 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1726 if (cu_->instruction_set == kThumb2) { 1727 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1728 } else { 1729 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1730 OpRegImm(kOpAdd, rl_result.reg, lit); 1731 } 1732 StoreValue(rl_dest, rl_result); 1733 return; 1734 } 1735 1736 case Instruction::SUB_INT: 1737 case Instruction::SUB_INT_2ADDR: 1738 lit = -lit; 1739 // Intended fallthrough 1740 case Instruction::ADD_INT: 1741 case Instruction::ADD_INT_2ADDR: 1742 case Instruction::ADD_INT_LIT8: 1743 case Instruction::ADD_INT_LIT16: 1744 op = kOpAdd; 1745 break; 1746 case Instruction::MUL_INT: 1747 case Instruction::MUL_INT_2ADDR: 1748 case Instruction::MUL_INT_LIT8: 1749 case Instruction::MUL_INT_LIT16: { 1750 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1751 return; 1752 } 1753 op = kOpMul; 1754 break; 1755 } 1756 case Instruction::AND_INT: 1757 case Instruction::AND_INT_2ADDR: 1758 case Instruction::AND_INT_LIT8: 1759 case Instruction::AND_INT_LIT16: 1760 op = kOpAnd; 1761 break; 1762 case Instruction::OR_INT: 1763 case Instruction::OR_INT_2ADDR: 1764 case Instruction::OR_INT_LIT8: 1765 case Instruction::OR_INT_LIT16: 1766 op = kOpOr; 1767 break; 1768 case Instruction::XOR_INT: 1769 case Instruction::XOR_INT_2ADDR: 1770 case Instruction::XOR_INT_LIT8: 1771 case Instruction::XOR_INT_LIT16: 1772 op = kOpXor; 1773 break; 1774 case Instruction::SHL_INT_LIT8: 1775 case Instruction::SHL_INT: 1776 case Instruction::SHL_INT_2ADDR: 1777 lit &= 31; 1778 shift_op = true; 1779 op = kOpLsl; 1780 break; 1781 case Instruction::SHR_INT_LIT8: 1782 case Instruction::SHR_INT: 1783 case Instruction::SHR_INT_2ADDR: 1784 lit &= 31; 1785 shift_op = true; 1786 op = kOpAsr; 1787 break; 1788 case Instruction::USHR_INT_LIT8: 1789 case Instruction::USHR_INT: 1790 case Instruction::USHR_INT_2ADDR: 1791 lit &= 31; 1792 shift_op = true; 1793 op = kOpLsr; 1794 break; 1795 1796 case Instruction::DIV_INT: 1797 case Instruction::DIV_INT_2ADDR: 1798 case Instruction::DIV_INT_LIT8: 1799 case Instruction::DIV_INT_LIT16: 1800 case Instruction::REM_INT: 1801 case Instruction::REM_INT_2ADDR: 1802 case Instruction::REM_INT_LIT8: 1803 case Instruction::REM_INT_LIT16: { 1804 if (lit == 0) { 1805 AddDivZeroSlowPath(kCondAl, RegStorage::InvalidReg(), 0); 1806 return; 1807 } 1808 if ((opcode == Instruction::DIV_INT) || 1809 (opcode == Instruction::DIV_INT_2ADDR) || 1810 (opcode == Instruction::DIV_INT_LIT8) || 1811 (opcode == Instruction::DIV_INT_LIT16)) { 1812 is_div = true; 1813 } else { 1814 is_div = false; 1815 } 1816 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1817 return; 1818 } 1819 1820 bool done = false; 1821 if (cu_->instruction_set == kMips) { 1822 rl_src = LoadValue(rl_src, kCoreReg); 1823 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1824 done = true; 1825 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1826 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1827 done = true; 1828 } else if (cu_->instruction_set == kThumb2) { 1829 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1830 // Use ARM SDIV instruction for division. For remainder we also need to 1831 // calculate using a MUL and subtract. 1832 rl_src = LoadValue(rl_src, kCoreReg); 1833 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1834 done = true; 1835 } 1836 } 1837 1838 if (!done) { 1839 FlushAllRegs(); /* Everything to home location. */ 1840 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1841 Clobber(TargetReg(kArg0)); 1842 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1843 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); 1844 if (is_div) 1845 rl_result = GetReturn(false); 1846 else 1847 rl_result = GetReturnAlt(); 1848 } 1849 StoreValue(rl_dest, rl_result); 1850 return; 1851 } 1852 default: 1853 LOG(FATAL) << "Unexpected opcode " << opcode; 1854 } 1855 rl_src = LoadValue(rl_src, kCoreReg); 1856 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1857 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1858 if (shift_op && (lit == 0)) { 1859 OpRegCopy(rl_result.reg, rl_src.reg); 1860 } else { 1861 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1862 } 1863 StoreValue(rl_dest, rl_result); 1864} 1865 1866void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1867 RegLocation rl_src1, RegLocation rl_src2) { 1868 RegLocation rl_result; 1869 OpKind first_op = kOpBkpt; 1870 OpKind second_op = kOpBkpt; 1871 bool call_out = false; 1872 bool check_zero = false; 1873 ThreadOffset<4> func_offset(-1); 1874 int ret_reg = TargetReg(kRet0).GetReg(); 1875 1876 switch (opcode) { 1877 case Instruction::NOT_LONG: 1878 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1879 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1880 // Check for destructive overlap 1881 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1882 RegStorage t_reg = AllocTemp(); 1883 OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1884 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1885 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1886 FreeTemp(t_reg); 1887 } else { 1888 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1889 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1890 } 1891 StoreValueWide(rl_dest, rl_result); 1892 return; 1893 case Instruction::ADD_LONG: 1894 case Instruction::ADD_LONG_2ADDR: 1895 if (cu_->instruction_set != kThumb2) { 1896 GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1897 return; 1898 } 1899 first_op = kOpAdd; 1900 second_op = kOpAdc; 1901 break; 1902 case Instruction::SUB_LONG: 1903 case Instruction::SUB_LONG_2ADDR: 1904 if (cu_->instruction_set != kThumb2) { 1905 GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1906 return; 1907 } 1908 first_op = kOpSub; 1909 second_op = kOpSbc; 1910 break; 1911 case Instruction::MUL_LONG: 1912 case Instruction::MUL_LONG_2ADDR: 1913 if (cu_->instruction_set != kMips) { 1914 GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 1915 return; 1916 } else { 1917 call_out = true; 1918 ret_reg = TargetReg(kRet0).GetReg(); 1919 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul); 1920 } 1921 break; 1922 case Instruction::DIV_LONG: 1923 case Instruction::DIV_LONG_2ADDR: 1924 call_out = true; 1925 check_zero = true; 1926 ret_reg = TargetReg(kRet0).GetReg(); 1927 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLdiv); 1928 break; 1929 case Instruction::REM_LONG: 1930 case Instruction::REM_LONG_2ADDR: 1931 call_out = true; 1932 check_zero = true; 1933 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmod); 1934 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1935 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg(); 1936 break; 1937 case Instruction::AND_LONG_2ADDR: 1938 case Instruction::AND_LONG: 1939 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1940 return GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 1941 } 1942 first_op = kOpAnd; 1943 second_op = kOpAnd; 1944 break; 1945 case Instruction::OR_LONG: 1946 case Instruction::OR_LONG_2ADDR: 1947 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1948 GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 1949 return; 1950 } 1951 first_op = kOpOr; 1952 second_op = kOpOr; 1953 break; 1954 case Instruction::XOR_LONG: 1955 case Instruction::XOR_LONG_2ADDR: 1956 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1957 GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 1958 return; 1959 } 1960 first_op = kOpXor; 1961 second_op = kOpXor; 1962 break; 1963 case Instruction::NEG_LONG: { 1964 GenNegLong(rl_dest, rl_src2); 1965 return; 1966 } 1967 default: 1968 LOG(FATAL) << "Invalid long arith op"; 1969 } 1970 if (!call_out) { 1971 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1972 } else { 1973 FlushAllRegs(); /* Send everything to home location */ 1974 if (check_zero) { 1975 RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1)); 1976 RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); 1977 LoadValueDirectWideFixed(rl_src2, r_tmp2); 1978 RegStorage r_tgt = CallHelperSetup(func_offset); 1979 GenDivZeroCheck(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3))); 1980 LoadValueDirectWideFixed(rl_src1, r_tmp1); 1981 // NOTE: callout here is not a safepoint 1982 CallHelper(r_tgt, func_offset, false /* not safepoint */); 1983 } else { 1984 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 1985 } 1986 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1987 if (ret_reg == TargetReg(kRet0).GetReg()) 1988 rl_result = GetReturnWide(false); 1989 else 1990 rl_result = GetReturnWideAlt(); 1991 StoreValueWide(rl_dest, rl_result); 1992 } 1993} 1994 1995void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 1996 RegLocation rl_dest, RegLocation rl_src) { 1997 /* 1998 * Don't optimize the register usage since it calls out to support 1999 * functions 2000 */ 2001 FlushAllRegs(); /* Send everything to home location */ 2002 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 2003 if (rl_dest.wide) { 2004 RegLocation rl_result; 2005 rl_result = GetReturnWide(rl_dest.fp); 2006 StoreValueWide(rl_dest, rl_result); 2007 } else { 2008 RegLocation rl_result; 2009 rl_result = GetReturn(rl_dest.fp); 2010 StoreValue(rl_dest, rl_result); 2011 } 2012} 2013 2014/* Check if we need to check for pending suspend request */ 2015void Mir2Lir::GenSuspendTest(int opt_flags) { 2016 if (Runtime::Current()->ExplicitSuspendChecks()) { 2017 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2018 return; 2019 } 2020 FlushAllRegs(); 2021 LIR* branch = OpTestSuspend(NULL); 2022 LIR* ret_lab = NewLIR0(kPseudoTargetLabel); 2023 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab), 2024 current_dalvik_offset_); 2025 branch->target = target; 2026 suspend_launchpads_.Insert(target); 2027 } else { 2028 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2029 return; 2030 } 2031 FlushAllRegs(); // TODO: needed? 2032 LIR* inst = CheckSuspendUsingLoad(); 2033 MarkSafepointPC(inst); 2034 } 2035} 2036 2037/* Check if we need to check for pending suspend request */ 2038void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2039 if (Runtime::Current()->ExplicitSuspendChecks()) { 2040 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2041 OpUnconditionalBranch(target); 2042 return; 2043 } 2044 OpTestSuspend(target); 2045 LIR* launch_pad = 2046 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target), 2047 current_dalvik_offset_); 2048 FlushAllRegs(); 2049 OpUnconditionalBranch(launch_pad); 2050 suspend_launchpads_.Insert(launch_pad); 2051 } else { 2052 // For the implicit suspend check, just perform the trigger 2053 // load and branch to the target. 2054 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2055 OpUnconditionalBranch(target); 2056 return; 2057 } 2058 FlushAllRegs(); 2059 LIR* inst = CheckSuspendUsingLoad(); 2060 MarkSafepointPC(inst); 2061 OpUnconditionalBranch(target); 2062 } 2063} 2064 2065/* Call out to helper assembly routine that will null check obj and then lock it. */ 2066void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2067 FlushAllRegs(); 2068 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2069} 2070 2071/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2072void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2073 FlushAllRegs(); 2074 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2075} 2076 2077/* Generic code for generating a wide constant into a VR. */ 2078void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2079 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2080 LoadConstantWide(rl_result.reg, value); 2081 StoreValueWide(rl_dest, rl_result); 2082} 2083 2084} // namespace art 2085