gen_common.cc revision 9c3b089519792245ab9f658865f44b8639b8d696
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#include "dex/compiler_ir.h" 17#include "dex/compiler_internals.h" 18#include "dex/quick/arm/arm_lir.h" 19#include "dex/quick/mir_to_lir-inl.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "mirror/array.h" 22#include "mirror/object_array-inl.h" 23#include "mirror/object-inl.h" 24#include "verifier/method_verifier.h" 25#include <functional> 26 27namespace art { 28 29// Shortcuts to repeatedly used long types. 30typedef mirror::ObjectArray<mirror::Object> ObjArray; 31typedef mirror::ObjectArray<mirror::Class> ClassArray; 32 33/* 34 * This source files contains "gen" codegen routines that should 35 * be applicable to most targets. Only mid-level support utilities 36 * and "op" calls may be used here. 37 */ 38 39/* 40 * Generate a kPseudoBarrier marker to indicate the boundary of special 41 * blocks. 42 */ 43void Mir2Lir::GenBarrier() { 44 LIR* barrier = NewLIR0(kPseudoBarrier); 45 /* Mark all resources as being clobbered */ 46 DCHECK(!barrier->flags.use_def_invalid); 47 barrier->u.m.def_mask = ENCODE_ALL; 48} 49 50void Mir2Lir::GenDivZeroException() { 51 LIR* branch = OpUnconditionalBranch(nullptr); 52 AddDivZeroCheckSlowPath(branch); 53} 54 55void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 56 LIR* branch = OpCondBranch(c_code, nullptr); 57 AddDivZeroCheckSlowPath(branch); 58} 59 60void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 61 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 62 AddDivZeroCheckSlowPath(branch); 63} 64 65void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 66 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 67 public: 68 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 69 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 70 } 71 72 void Compile() OVERRIDE { 73 m2l_->ResetRegPool(); 74 m2l_->ResetDefTracking(); 75 GenerateTargetLabel(); 76 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true); 77 } 78 }; 79 80 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 81} 82 83void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 84 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 85 public: 86 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 87 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 88 index_(index), length_(length) { 89 } 90 91 void Compile() OVERRIDE { 92 m2l_->ResetRegPool(); 93 m2l_->ResetDefTracking(); 94 GenerateTargetLabel(); 95 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 96 index_, length_, true); 97 } 98 99 private: 100 const RegStorage index_; 101 const RegStorage length_; 102 }; 103 104 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 105 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 106} 107 108void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 109 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 110 public: 111 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 112 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 113 index_(index), length_(length) { 114 } 115 116 void Compile() OVERRIDE { 117 m2l_->ResetRegPool(); 118 m2l_->ResetDefTracking(); 119 GenerateTargetLabel(); 120 121 m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_); 122 m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_); 123 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 124 m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true); 125 } 126 127 private: 128 const int32_t index_; 129 const RegStorage length_; 130 }; 131 132 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 133 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 134} 135 136LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 137 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 138 public: 139 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 140 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 141 } 142 143 void Compile() OVERRIDE { 144 m2l_->ResetRegPool(); 145 m2l_->ResetDefTracking(); 146 GenerateTargetLabel(); 147 m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true); 148 } 149 }; 150 151 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 152 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 153 return branch; 154} 155 156/* Perform null-check on a register. */ 157LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 158 if (Runtime::Current()->ExplicitNullChecks()) { 159 return GenExplicitNullCheck(m_reg, opt_flags); 160 } 161 return nullptr; 162} 163 164/* Perform an explicit null-check on a register. */ 165LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 166 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 167 return NULL; 168 } 169 return GenNullCheck(m_reg); 170} 171 172void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 173 if (!Runtime::Current()->ExplicitNullChecks()) { 174 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 175 return; 176 } 177 MarkSafepointPC(last_lir_insn_); 178 } 179} 180 181void Mir2Lir::MarkPossibleStackOverflowException() { 182 if (!Runtime::Current()->ExplicitStackOverflowChecks()) { 183 MarkSafepointPC(last_lir_insn_); 184 } 185} 186 187void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 188 if (!Runtime::Current()->ExplicitNullChecks()) { 189 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 190 return; 191 } 192 // Force an implicit null check by performing a memory operation (load) from the given 193 // register with offset 0. This will cause a signal if the register contains 0 (null). 194 RegStorage tmp = AllocTemp(); 195 // TODO: for Mips, would be best to use rZERO as the bogus register target. 196 LIR* load = Load32Disp(reg, 0, tmp); 197 FreeTemp(tmp); 198 MarkSafepointPC(load); 199 } 200} 201 202void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 203 RegLocation rl_src2, LIR* taken, 204 LIR* fall_through) { 205 ConditionCode cond; 206 switch (opcode) { 207 case Instruction::IF_EQ: 208 cond = kCondEq; 209 break; 210 case Instruction::IF_NE: 211 cond = kCondNe; 212 break; 213 case Instruction::IF_LT: 214 cond = kCondLt; 215 break; 216 case Instruction::IF_GE: 217 cond = kCondGe; 218 break; 219 case Instruction::IF_GT: 220 cond = kCondGt; 221 break; 222 case Instruction::IF_LE: 223 cond = kCondLe; 224 break; 225 default: 226 cond = static_cast<ConditionCode>(0); 227 LOG(FATAL) << "Unexpected opcode " << opcode; 228 } 229 230 // Normalize such that if either operand is constant, src2 will be constant 231 if (rl_src1.is_const) { 232 RegLocation rl_temp = rl_src1; 233 rl_src1 = rl_src2; 234 rl_src2 = rl_temp; 235 cond = FlipComparisonOrder(cond); 236 } 237 238 rl_src1 = LoadValue(rl_src1, kCoreReg); 239 // Is this really an immediate comparison? 240 if (rl_src2.is_const) { 241 // If it's already live in a register or not easily materialized, just keep going 242 RegLocation rl_temp = UpdateLoc(rl_src2); 243 if ((rl_temp.location == kLocDalvikFrame) && 244 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { 245 // OK - convert this to a compare immediate and branch 246 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 247 return; 248 } 249 } 250 rl_src2 = LoadValue(rl_src2, kCoreReg); 251 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 252} 253 254void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 255 LIR* fall_through) { 256 ConditionCode cond; 257 rl_src = LoadValue(rl_src, kCoreReg); 258 switch (opcode) { 259 case Instruction::IF_EQZ: 260 cond = kCondEq; 261 break; 262 case Instruction::IF_NEZ: 263 cond = kCondNe; 264 break; 265 case Instruction::IF_LTZ: 266 cond = kCondLt; 267 break; 268 case Instruction::IF_GEZ: 269 cond = kCondGe; 270 break; 271 case Instruction::IF_GTZ: 272 cond = kCondGt; 273 break; 274 case Instruction::IF_LEZ: 275 cond = kCondLe; 276 break; 277 default: 278 cond = static_cast<ConditionCode>(0); 279 LOG(FATAL) << "Unexpected opcode " << opcode; 280 } 281 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 282} 283 284void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 285 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 286 if (rl_src.location == kLocPhysReg) { 287 OpRegCopy(rl_result.reg, rl_src.reg); 288 } else { 289 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 290 } 291 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 292 StoreValueWide(rl_dest, rl_result); 293} 294 295void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 296 RegLocation rl_src) { 297 rl_src = LoadValue(rl_src, kCoreReg); 298 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 299 OpKind op = kOpInvalid; 300 switch (opcode) { 301 case Instruction::INT_TO_BYTE: 302 op = kOp2Byte; 303 break; 304 case Instruction::INT_TO_SHORT: 305 op = kOp2Short; 306 break; 307 case Instruction::INT_TO_CHAR: 308 op = kOp2Char; 309 break; 310 default: 311 LOG(ERROR) << "Bad int conversion type"; 312 } 313 OpRegReg(op, rl_result.reg, rl_src.reg); 314 StoreValue(rl_dest, rl_result); 315} 316 317/* 318 * Let helper function take care of everything. Will call 319 * Array::AllocFromCode(type_idx, method, count); 320 * Note: AllocFromCode will handle checks for errNegativeArraySize. 321 */ 322void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 323 RegLocation rl_src) { 324 FlushAllRegs(); /* Everything to home location */ 325 ThreadOffset<4> func_offset(-1); 326 const DexFile* dex_file = cu_->dex_file; 327 CompilerDriver* driver = cu_->compiler_driver; 328 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, 329 type_idx)) { 330 bool is_type_initialized; // Ignored as an array does not have an initializer. 331 bool use_direct_type_ptr; 332 uintptr_t direct_type_ptr; 333 if (kEmbedClassInCode && 334 driver->CanEmbedTypeInCode(*dex_file, type_idx, 335 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 336 // The fast path. 337 if (!use_direct_type_ptr) { 338 LoadClassType(type_idx, kArg0); 339 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 340 CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true); 341 } else { 342 // Use the direct pointer. 343 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved); 344 CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true); 345 } 346 } else { 347 // The slow path. 348 DCHECK_EQ(func_offset.Int32Value(), -1); 349 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArray); 350 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 351 } 352 DCHECK_NE(func_offset.Int32Value(), -1); 353 } else { 354 func_offset= QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayWithAccessCheck); 355 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); 356 } 357 RegLocation rl_result = GetReturn(false); 358 StoreValue(rl_dest, rl_result); 359} 360 361/* 362 * Similar to GenNewArray, but with post-allocation initialization. 363 * Verifier guarantees we're dealing with an array class. Current 364 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 365 * Current code also throws internal unimp if not 'L', '[' or 'I'. 366 */ 367void Mir2Lir::GenFilledNewArray(CallInfo* info) { 368 int elems = info->num_arg_words; 369 int type_idx = info->index; 370 FlushAllRegs(); /* Everything to home location */ 371 ThreadOffset<4> func_offset(-1); 372 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 373 type_idx)) { 374 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArray); 375 } else { 376 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArrayWithAccessCheck); 377 } 378 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); 379 FreeTemp(TargetReg(kArg2)); 380 FreeTemp(TargetReg(kArg1)); 381 /* 382 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 383 * return region. Because AllocFromCode placed the new array 384 * in kRet0, we'll just lock it into place. When debugger support is 385 * added, it may be necessary to additionally copy all return 386 * values to a home location in thread-local storage 387 */ 388 LockTemp(TargetReg(kRet0)); 389 390 // TODO: use the correct component size, currently all supported types 391 // share array alignment with ints (see comment at head of function) 392 size_t component_size = sizeof(int32_t); 393 394 // Having a range of 0 is legal 395 if (info->is_range && (elems > 0)) { 396 /* 397 * Bit of ugliness here. We're going generate a mem copy loop 398 * on the register range, but it is possible that some regs 399 * in the range have been promoted. This is unlikely, but 400 * before generating the copy, we'll just force a flush 401 * of any regs in the source range that have been promoted to 402 * home location. 403 */ 404 for (int i = 0; i < elems; i++) { 405 RegLocation loc = UpdateLoc(info->args[i]); 406 if (loc.location == kLocPhysReg) { 407 Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 408 } 409 } 410 /* 411 * TUNING note: generated code here could be much improved, but 412 * this is an uncommon operation and isn't especially performance 413 * critical. 414 */ 415 RegStorage r_src = AllocTemp(); 416 RegStorage r_dst = AllocTemp(); 417 RegStorage r_idx = AllocTemp(); 418 RegStorage r_val; 419 switch (cu_->instruction_set) { 420 case kThumb2: 421 r_val = TargetReg(kLr); 422 break; 423 case kX86: 424 case kX86_64: 425 FreeTemp(TargetReg(kRet0)); 426 r_val = AllocTemp(); 427 break; 428 case kMips: 429 r_val = AllocTemp(); 430 break; 431 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 432 } 433 // Set up source pointer 434 RegLocation rl_first = info->args[0]; 435 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low)); 436 // Set up the target pointer 437 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0), 438 mirror::Array::DataOffset(component_size).Int32Value()); 439 // Set up the loop counter (known to be > 0) 440 LoadConstant(r_idx, elems - 1); 441 // Generate the copy loop. Going backwards for convenience 442 LIR* target = NewLIR0(kPseudoTargetLabel); 443 // Copy next element 444 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 445 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 446 FreeTemp(r_val); 447 OpDecAndBranch(kCondGe, r_idx, target); 448 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 449 // Restore the target pointer 450 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, 451 -mirror::Array::DataOffset(component_size).Int32Value()); 452 } 453 } else if (!info->is_range) { 454 // TUNING: interleave 455 for (int i = 0; i < elems; i++) { 456 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 457 Store32Disp(TargetReg(kRet0), 458 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 459 // If the LoadValue caused a temp to be allocated, free it 460 if (IsTemp(rl_arg.reg)) { 461 FreeTemp(rl_arg.reg); 462 } 463 } 464 } 465 if (info->result.location != kLocInvalid) { 466 StoreValue(info->result, GetReturn(false /* not fp */)); 467 } 468} 469 470// 471// Slow path to ensure a class is initialized for sget/sput. 472// 473class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 474 public: 475 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 476 RegStorage r_base) : 477 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 478 storage_index_(storage_index), r_base_(r_base) { 479 } 480 481 void Compile() { 482 LIR* unresolved_target = GenerateTargetLabel(); 483 uninit_->target = unresolved_target; 484 m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage), 485 storage_index_, true); 486 // Copy helper's result into r_base, a no-op on all but MIPS. 487 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0)); 488 489 m2l_->OpUnconditionalBranch(cont_); 490 } 491 492 private: 493 LIR* const uninit_; 494 const int storage_index_; 495 const RegStorage r_base_; 496}; 497 498void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 499 bool is_object) { 500 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 501 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 502 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 503 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 504 RegStorage r_base; 505 if (field_info.IsReferrersClass()) { 506 // Fast path, static storage base is this method's class 507 RegLocation rl_method = LoadCurrMethod(); 508 r_base = AllocTemp(); 509 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 510 if (IsTemp(rl_method.reg)) { 511 FreeTemp(rl_method.reg); 512 } 513 } else { 514 // Medium path, static storage base in a different class which requires checks that the other 515 // class is initialized. 516 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 517 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 518 // May do runtime call so everything to home locations. 519 FlushAllRegs(); 520 // Using fixed register to sync with possible call to runtime support. 521 RegStorage r_method = TargetReg(kArg1); 522 LockTemp(r_method); 523 LoadCurrMethodDirect(r_method); 524 r_base = TargetReg(kArg0); 525 LockTemp(r_base); 526 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 527 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 528 LoadRefDisp(r_base, offset_of_field, r_base); 529 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 530 if (!field_info.IsInitialized() && 531 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 532 // Check if r_base is NULL or a not yet initialized class. 533 534 // The slow path is invoked if the r_base is NULL or the class pointed 535 // to by it is not initialized. 536 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 537 RegStorage r_tmp = TargetReg(kArg2); 538 LockTemp(r_tmp); 539 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 540 mirror::Class::StatusOffset().Int32Value(), 541 mirror::Class::kStatusInitialized, NULL); 542 LIR* cont = NewLIR0(kPseudoTargetLabel); 543 544 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 545 field_info.StorageIndex(), r_base)); 546 547 FreeTemp(r_tmp); 548 } 549 FreeTemp(r_method); 550 } 551 // rBase now holds static storage base 552 if (is_long_or_double) { 553 RegisterClass register_kind = kAnyReg; 554 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 555 // Force long/double volatile stores into SSE registers to avoid tearing. 556 register_kind = kFPReg; 557 } 558 rl_src = LoadValueWide(rl_src, register_kind); 559 } else { 560 rl_src = LoadValue(rl_src, kAnyReg); 561 } 562 if (field_info.IsVolatile()) { 563 // There might have been a store before this volatile one so insert StoreStore barrier. 564 GenMemBarrier(kStoreStore); 565 } 566 if (is_long_or_double) { 567 StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 568 } else if (rl_src.ref) { 569 StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 570 } else { 571 Store32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg); 572 } 573 if (field_info.IsVolatile()) { 574 // A load might follow the volatile store so insert a StoreLoad barrier. 575 GenMemBarrier(kStoreLoad); 576 } 577 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 578 MarkGCCard(rl_src.reg, r_base); 579 } 580 FreeTemp(r_base); 581 } else { 582 FlushAllRegs(); // Everything to home locations 583 ThreadOffset<4> setter_offset = 584 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Static) 585 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjStatic) 586 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Static)); 587 CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true); 588 } 589} 590 591void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 592 bool is_long_or_double, bool is_object) { 593 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 594 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 595 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 596 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 597 RegStorage r_base; 598 if (field_info.IsReferrersClass()) { 599 // Fast path, static storage base is this method's class 600 RegLocation rl_method = LoadCurrMethod(); 601 r_base = AllocTemp(); 602 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); 603 } else { 604 // Medium path, static storage base in a different class which requires checks that the other 605 // class is initialized 606 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 607 // May do runtime call so everything to home locations. 608 FlushAllRegs(); 609 // Using fixed register to sync with possible call to runtime support. 610 RegStorage r_method = TargetReg(kArg1); 611 LockTemp(r_method); 612 LoadCurrMethodDirect(r_method); 613 r_base = TargetReg(kArg0); 614 LockTemp(r_base); 615 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base); 616 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 617 LoadRefDisp(r_base, offset_of_field, r_base); 618 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 619 if (!field_info.IsInitialized() && 620 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 621 // Check if r_base is NULL or a not yet initialized class. 622 623 // The slow path is invoked if the r_base is NULL or the class pointed 624 // to by it is not initialized. 625 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 626 RegStorage r_tmp = TargetReg(kArg2); 627 LockTemp(r_tmp); 628 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 629 mirror::Class::StatusOffset().Int32Value(), 630 mirror::Class::kStatusInitialized, NULL); 631 LIR* cont = NewLIR0(kPseudoTargetLabel); 632 633 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 634 field_info.StorageIndex(), r_base)); 635 636 FreeTemp(r_tmp); 637 } 638 FreeTemp(r_method); 639 } 640 // r_base now holds static storage base 641 RegisterClass result_reg_kind = kAnyReg; 642 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 643 // Force long/double volatile loads into SSE registers to avoid tearing. 644 result_reg_kind = kFPReg; 645 } 646 RegLocation rl_result = EvalLoc(rl_dest, result_reg_kind, true); 647 648 if (is_long_or_double) { 649 LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG); 650 } else if (rl_result.ref) { 651 LoadRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); 652 } else { 653 Load32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg); 654 } 655 FreeTemp(r_base); 656 657 if (field_info.IsVolatile()) { 658 // Without context sensitive analysis, we must issue the most conservative barriers. 659 // In this case, either a load or store may follow so we issue both barriers. 660 GenMemBarrier(kLoadLoad); 661 GenMemBarrier(kLoadStore); 662 } 663 664 if (is_long_or_double) { 665 StoreValueWide(rl_dest, rl_result); 666 } else { 667 StoreValue(rl_dest, rl_result); 668 } 669 } else { 670 FlushAllRegs(); // Everything to home locations 671 ThreadOffset<4> getterOffset = 672 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Static) 673 :(is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjStatic) 674 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Static)); 675 CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true); 676 if (is_long_or_double) { 677 RegLocation rl_result = GetReturnWide(rl_dest.fp); 678 StoreValueWide(rl_dest, rl_result); 679 } else { 680 RegLocation rl_result = GetReturn(rl_dest.fp); 681 StoreValue(rl_dest, rl_result); 682 } 683 } 684} 685 686// Generate code for all slow paths. 687void Mir2Lir::HandleSlowPaths() { 688 int n = slow_paths_.Size(); 689 for (int i = 0; i < n; ++i) { 690 LIRSlowPath* slowpath = slow_paths_.Get(i); 691 slowpath->Compile(); 692 } 693 slow_paths_.Reset(); 694} 695 696void Mir2Lir::HandleSuspendLaunchPads() { 697 int num_elems = suspend_launchpads_.Size(); 698 ThreadOffset<4> helper_offset = QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend); 699 for (int i = 0; i < num_elems; i++) { 700 ResetRegPool(); 701 ResetDefTracking(); 702 LIR* lab = suspend_launchpads_.Get(i); 703 LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0])); 704 current_dalvik_offset_ = lab->operands[1]; 705 AppendLIR(lab); 706 RegStorage r_tgt = CallHelperSetup(helper_offset); 707 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */); 708 OpUnconditionalBranch(resume_lab); 709 } 710} 711 712void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 713 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 714 bool is_object) { 715 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 716 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 717 if (field_info.FastGet() && !SLOW_FIELD_PATH) { 718 RegLocation rl_result; 719 RegisterClass reg_class = oat_reg_class_by_size(size); 720 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 721 rl_obj = LoadValue(rl_obj, kCoreReg); 722 if (is_long_or_double) { 723 DCHECK(rl_dest.wide); 724 GenNullCheck(rl_obj.reg, opt_flags); 725 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 726 RegisterClass result_reg_kind = kAnyReg; 727 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 728 // Force long/double volatile loads into SSE registers to avoid tearing. 729 result_reg_kind = kFPReg; 730 } 731 rl_result = EvalLoc(rl_dest, result_reg_kind, true); 732 LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, 733 rl_obj.s_reg_low); 734 MarkPossibleNullPointerException(opt_flags); 735 if (field_info.IsVolatile()) { 736 // Without context sensitive analysis, we must issue the most conservative barriers. 737 // In this case, either a load or store may follow so we issue both barriers. 738 GenMemBarrier(kLoadLoad); 739 GenMemBarrier(kLoadStore); 740 } 741 } else { 742 RegStorage reg_ptr = AllocTemp(); 743 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 744 rl_result = EvalLoc(rl_dest, reg_class, true); 745 LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG); 746 MarkPossibleNullPointerException(opt_flags); 747 if (field_info.IsVolatile()) { 748 // Without context sensitive analysis, we must issue the most conservative barriers. 749 // In this case, either a load or store may follow so we issue both barriers. 750 GenMemBarrier(kLoadLoad); 751 GenMemBarrier(kLoadStore); 752 } 753 FreeTemp(reg_ptr); 754 } 755 StoreValueWide(rl_dest, rl_result); 756 } else { 757 rl_result = EvalLoc(rl_dest, reg_class, true); 758 GenNullCheck(rl_obj.reg, opt_flags); 759 LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, k32, 760 rl_obj.s_reg_low); 761 MarkPossibleNullPointerException(opt_flags); 762 if (field_info.IsVolatile()) { 763 // Without context sensitive analysis, we must issue the most conservative barriers. 764 // In this case, either a load or store may follow so we issue both barriers. 765 GenMemBarrier(kLoadLoad); 766 GenMemBarrier(kLoadStore); 767 } 768 StoreValue(rl_dest, rl_result); 769 } 770 } else { 771 ThreadOffset<4> getterOffset = 772 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Instance) 773 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjInstance) 774 : QUICK_ENTRYPOINT_OFFSET(4, pGet32Instance)); 775 CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true); 776 if (is_long_or_double) { 777 RegLocation rl_result = GetReturnWide(rl_dest.fp); 778 StoreValueWide(rl_dest, rl_result); 779 } else { 780 RegLocation rl_result = GetReturn(rl_dest.fp); 781 StoreValue(rl_dest, rl_result); 782 } 783 } 784} 785 786void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 787 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 788 bool is_object) { 789 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 790 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 791 if (field_info.FastPut() && !SLOW_FIELD_PATH) { 792 RegisterClass reg_class = oat_reg_class_by_size(size); 793 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 794 rl_obj = LoadValue(rl_obj, kCoreReg); 795 if (is_long_or_double) { 796 RegisterClass src_reg_kind = kAnyReg; 797 if (field_info.IsVolatile() && cu_->instruction_set == kX86) { 798 // Force long/double volatile stores into SSE registers to avoid tearing. 799 src_reg_kind = kFPReg; 800 } 801 rl_src = LoadValueWide(rl_src, src_reg_kind); 802 GenNullCheck(rl_obj.reg, opt_flags); 803 RegStorage reg_ptr = AllocTemp(); 804 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); 805 if (field_info.IsVolatile()) { 806 // There might have been a store before this volatile one so insert StoreStore barrier. 807 GenMemBarrier(kStoreStore); 808 } 809 StoreBaseDispWide(reg_ptr, 0, rl_src.reg); 810 MarkPossibleNullPointerException(opt_flags); 811 if (field_info.IsVolatile()) { 812 // A load might follow the volatile store so insert a StoreLoad barrier. 813 GenMemBarrier(kStoreLoad); 814 } 815 FreeTemp(reg_ptr); 816 } else { 817 rl_src = LoadValue(rl_src, reg_class); 818 GenNullCheck(rl_obj.reg, opt_flags); 819 if (field_info.IsVolatile()) { 820 // There might have been a store before this volatile one so insert StoreStore barrier. 821 GenMemBarrier(kStoreStore); 822 } 823 Store32Disp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg); 824 MarkPossibleNullPointerException(opt_flags); 825 if (field_info.IsVolatile()) { 826 // A load might follow the volatile store so insert a StoreLoad barrier. 827 GenMemBarrier(kStoreLoad); 828 } 829 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 830 MarkGCCard(rl_src.reg, rl_obj.reg); 831 } 832 } 833 } else { 834 ThreadOffset<4> setter_offset = 835 is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Instance) 836 : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjInstance) 837 : QUICK_ENTRYPOINT_OFFSET(4, pSet32Instance)); 838 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(), 839 rl_obj, rl_src, true); 840 } 841} 842 843void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 844 RegLocation rl_src) { 845 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 846 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 847 (opt_flags & MIR_IGNORE_NULL_CHECK)); 848 ThreadOffset<4> helper = needs_range_check 849 ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithNullAndBoundCheck) 850 : QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithBoundCheck)) 851 : QUICK_ENTRYPOINT_OFFSET(4, pAputObject); 852 CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true); 853} 854 855void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 856 RegLocation rl_method = LoadCurrMethod(); 857 RegStorage res_reg = AllocTemp(); 858 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 859 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 860 *cu_->dex_file, 861 type_idx)) { 862 // Call out to helper which resolves type and verifies access. 863 // Resolved type returned in kRet0. 864 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 865 type_idx, rl_method.reg, true); 866 RegLocation rl_result = GetReturn(false); 867 StoreValue(rl_dest, rl_result); 868 } else { 869 // We're don't need access checks, load type from dex cache 870 int32_t dex_cache_offset = 871 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 872 Load32Disp(rl_method.reg, dex_cache_offset, res_reg); 873 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 874 Load32Disp(res_reg, offset_of_type, rl_result.reg); 875 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 876 type_idx) || SLOW_TYPE_PATH) { 877 // Slow path, at runtime test if type is null and if so initialize 878 FlushAllRegs(); 879 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 880 LIR* cont = NewLIR0(kPseudoTargetLabel); 881 882 // Object to generate the slow path for class resolution. 883 class SlowPath : public LIRSlowPath { 884 public: 885 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 886 const RegLocation& rl_method, const RegLocation& rl_result) : 887 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 888 rl_method_(rl_method), rl_result_(rl_result) { 889 } 890 891 void Compile() { 892 GenerateTargetLabel(); 893 894 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 895 rl_method_.reg, true); 896 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0)); 897 898 m2l_->OpUnconditionalBranch(cont_); 899 } 900 901 private: 902 const int type_idx_; 903 const RegLocation rl_method_; 904 const RegLocation rl_result_; 905 }; 906 907 // Add to list for future. 908 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 909 910 StoreValue(rl_dest, rl_result); 911 } else { 912 // Fast path, we're done - just store result 913 StoreValue(rl_dest, rl_result); 914 } 915 } 916} 917 918void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 919 /* NOTE: Most strings should be available at compile time */ 920 int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). 921 Int32Value(); 922 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 923 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 924 // slow path, resolve string if not in dex cache 925 FlushAllRegs(); 926 LockCallTemps(); // Using explicit registers 927 928 // If the Method* is already in a register, we can save a copy. 929 RegLocation rl_method = mir_graph_->GetMethodLoc(); 930 RegStorage r_method; 931 if (rl_method.location == kLocPhysReg) { 932 // A temp would conflict with register use below. 933 DCHECK(!IsTemp(rl_method.reg)); 934 r_method = rl_method.reg; 935 } else { 936 r_method = TargetReg(kArg2); 937 LoadCurrMethodDirect(r_method); 938 } 939 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 940 TargetReg(kArg0)); 941 942 // Might call out to helper, which will return resolved string in kRet0 943 Load32Disp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); 944 if (cu_->instruction_set == kThumb2 || 945 cu_->instruction_set == kMips) { 946 // OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved? 947 LoadConstant(TargetReg(kArg1), string_idx); 948 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL); 949 LIR* cont = NewLIR0(kPseudoTargetLabel); 950 GenBarrier(); 951 952 // Object to generate the slow path for string resolution. 953 class SlowPath : public LIRSlowPath { 954 public: 955 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) : 956 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) { 957 } 958 959 void Compile() { 960 GenerateTargetLabel(); 961 962 RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pResolveString)); 963 964 m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_); // .eq 965 LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt); 966 m2l_->MarkSafepointPC(call_inst); 967 m2l_->FreeTemp(r_tgt); 968 969 m2l_->OpUnconditionalBranch(cont_); 970 } 971 972 private: 973 RegStorage r_method_; 974 }; 975 976 // Add to list for future. 977 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method)); 978 } else { 979 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 980 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); 981 LoadConstant(TargetReg(kArg1), string_idx); 982 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), r_method, TargetReg(kArg1), 983 true); 984 LIR* target = NewLIR0(kPseudoTargetLabel); 985 branch->target = target; 986 } 987 GenBarrier(); 988 StoreValue(rl_dest, GetReturn(false)); 989 } else { 990 RegLocation rl_method = LoadCurrMethod(); 991 RegStorage res_reg = AllocTemp(); 992 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 993 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); 994 Load32Disp(res_reg, offset_of_string, rl_result.reg); 995 StoreValue(rl_dest, rl_result); 996 } 997} 998 999/* 1000 * Let helper function take care of everything. Will 1001 * call Class::NewInstanceFromCode(type_idx, method); 1002 */ 1003void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 1004 FlushAllRegs(); /* Everything to home location */ 1005 // alloc will always check for resolution, do we also need to verify 1006 // access because the verifier was unable to? 1007 ThreadOffset<4> func_offset(-1); 1008 const DexFile* dex_file = cu_->dex_file; 1009 CompilerDriver* driver = cu_->compiler_driver; 1010 if (driver->CanAccessInstantiableTypeWithoutChecks( 1011 cu_->method_idx, *dex_file, type_idx)) { 1012 bool is_type_initialized; 1013 bool use_direct_type_ptr; 1014 uintptr_t direct_type_ptr; 1015 if (kEmbedClassInCode && 1016 driver->CanEmbedTypeInCode(*dex_file, type_idx, 1017 &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { 1018 // The fast path. 1019 if (!use_direct_type_ptr) { 1020 LoadClassType(type_idx, kArg0); 1021 if (!is_type_initialized) { 1022 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1023 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1024 } else { 1025 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1026 CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true); 1027 } 1028 } else { 1029 // Use the direct pointer. 1030 if (!is_type_initialized) { 1031 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved); 1032 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1033 } else { 1034 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized); 1035 CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true); 1036 } 1037 } 1038 } else { 1039 // The slow path. 1040 DCHECK_EQ(func_offset.Int32Value(), -1); 1041 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObject); 1042 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1043 } 1044 DCHECK_NE(func_offset.Int32Value(), -1); 1045 } else { 1046 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectWithAccessCheck); 1047 CallRuntimeHelperImmMethod(func_offset, type_idx, true); 1048 } 1049 RegLocation rl_result = GetReturn(false); 1050 StoreValue(rl_dest, rl_result); 1051} 1052 1053void Mir2Lir::GenThrow(RegLocation rl_src) { 1054 FlushAllRegs(); 1055 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true); 1056} 1057 1058// For final classes there are no sub-classes to check and so we can answer the instance-of 1059// question with simple comparisons. 1060void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 1061 RegLocation rl_src) { 1062 // X86 has its own implementation. 1063 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1064 1065 RegLocation object = LoadValue(rl_src, kCoreReg); 1066 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1067 RegStorage result_reg = rl_result.reg; 1068 if (result_reg == object.reg) { 1069 result_reg = AllocTypedTemp(false, kCoreReg); 1070 } 1071 LoadConstant(result_reg, 0); // assume false 1072 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1073 1074 RegStorage check_class = AllocTypedTemp(false, kCoreReg); 1075 RegStorage object_class = AllocTypedTemp(false, kCoreReg); 1076 1077 LoadCurrMethodDirect(check_class); 1078 if (use_declaring_class) { 1079 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); 1080 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1081 } else { 1082 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1083 check_class); 1084 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class); 1085 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1086 LoadRefDisp(check_class, offset_of_type, check_class); 1087 } 1088 1089 LIR* ne_branchover = NULL; 1090 // FIXME: what should we be comparing here? compressed or decompressed references? 1091 if (cu_->instruction_set == kThumb2) { 1092 OpRegReg(kOpCmp, check_class, object_class); // Same? 1093 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1094 LoadConstant(result_reg, 1); // .eq case - load true 1095 OpEndIT(it); 1096 } else { 1097 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL); 1098 LoadConstant(result_reg, 1); // eq case - load true 1099 } 1100 LIR* target = NewLIR0(kPseudoTargetLabel); 1101 null_branchover->target = target; 1102 if (ne_branchover != NULL) { 1103 ne_branchover->target = target; 1104 } 1105 FreeTemp(object_class); 1106 FreeTemp(check_class); 1107 if (IsTemp(result_reg)) { 1108 OpRegCopy(rl_result.reg, result_reg); 1109 FreeTemp(result_reg); 1110 } 1111 StoreValue(rl_dest, rl_result); 1112} 1113 1114void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1115 bool type_known_abstract, bool use_declaring_class, 1116 bool can_assume_type_is_in_dex_cache, 1117 uint32_t type_idx, RegLocation rl_dest, 1118 RegLocation rl_src) { 1119 // X86 has its own implementation. 1120 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1121 1122 FlushAllRegs(); 1123 // May generate a call - use explicit registers 1124 LockCallTemps(); 1125 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1126 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1127 if (needs_access_check) { 1128 // Check we have access to type_idx and if not throw IllegalAccessError, 1129 // returns Class* in kArg0 1130 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1131 type_idx, true); 1132 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1133 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1134 } else if (use_declaring_class) { 1135 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1136 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1137 class_reg); 1138 } else { 1139 // Load dex cache entry into class_reg (kArg2) 1140 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1141 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1142 class_reg); 1143 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1144 LoadRefDisp(class_reg, offset_of_type, class_reg); 1145 if (!can_assume_type_is_in_dex_cache) { 1146 // Need to test presence of type in dex cache at runtime 1147 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 1148 // Not resolved 1149 // Call out to helper, which will return resolved type in kRet0 1150 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 1151 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path 1152 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ 1153 // Rejoin code paths 1154 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 1155 hop_branch->target = hop_target; 1156 } 1157 } 1158 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1159 RegLocation rl_result = GetReturn(false); 1160 if (cu_->instruction_set == kMips) { 1161 // On MIPS rArg0 != rl_result, place false in result if branch is taken. 1162 LoadConstant(rl_result.reg, 0); 1163 } 1164 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1165 1166 /* load object->klass_ */ 1167 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1168 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1169 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1170 LIR* branchover = NULL; 1171 if (type_known_final) { 1172 // rl_result == ref == null == 0. 1173 if (cu_->instruction_set == kThumb2) { 1174 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1175 LIR* it = OpIT(kCondEq, "E"); // if-convert the test 1176 LoadConstant(rl_result.reg, 1); // .eq case - load true 1177 LoadConstant(rl_result.reg, 0); // .ne case - load false 1178 OpEndIT(it); 1179 } else { 1180 LoadConstant(rl_result.reg, 0); // ne case - load false 1181 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); 1182 LoadConstant(rl_result.reg, 1); // eq case - load true 1183 } 1184 } else { 1185 if (cu_->instruction_set == kThumb2) { 1186 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1187 LIR* it = nullptr; 1188 if (!type_known_abstract) { 1189 /* Uses conditional nullification */ 1190 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? 1191 it = OpIT(kCondEq, "EE"); // if-convert the test 1192 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true 1193 } 1194 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1195 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1196 if (it != nullptr) { 1197 OpEndIT(it); 1198 } 1199 FreeTemp(r_tgt); 1200 } else { 1201 if (!type_known_abstract) { 1202 /* Uses branchovers */ 1203 LoadConstant(rl_result.reg, 1); // assume true 1204 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); 1205 } 1206 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 1207 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class 1208 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1209 FreeTemp(r_tgt); 1210 } 1211 } 1212 // TODO: only clobber when type isn't final? 1213 ClobberCallerSave(); 1214 /* branch targets here */ 1215 LIR* target = NewLIR0(kPseudoTargetLabel); 1216 StoreValue(rl_dest, rl_result); 1217 branch1->target = target; 1218 if (branchover != NULL) { 1219 branchover->target = target; 1220 } 1221} 1222 1223void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1224 bool type_known_final, type_known_abstract, use_declaring_class; 1225 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1226 *cu_->dex_file, 1227 type_idx, 1228 &type_known_final, 1229 &type_known_abstract, 1230 &use_declaring_class); 1231 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1232 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1233 1234 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1235 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1236 } else { 1237 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1238 use_declaring_class, can_assume_type_is_in_dex_cache, 1239 type_idx, rl_dest, rl_src); 1240 } 1241} 1242 1243void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1244 bool type_known_final, type_known_abstract, use_declaring_class; 1245 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1246 *cu_->dex_file, 1247 type_idx, 1248 &type_known_final, 1249 &type_known_abstract, 1250 &use_declaring_class); 1251 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1252 // of the exception throw path. 1253 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1254 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1255 // Verifier type analysis proved this check cast would never cause an exception. 1256 return; 1257 } 1258 FlushAllRegs(); 1259 // May generate a call - use explicit registers 1260 LockCallTemps(); 1261 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method* 1262 RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class* 1263 if (needs_access_check) { 1264 // Check we have access to type_idx and if not throw IllegalAccessError, 1265 // returns Class* in kRet0 1266 // InitializeTypeAndVerifyAccess(idx, method) 1267 CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 1268 type_idx, TargetReg(kArg1), true); 1269 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path 1270 } else if (use_declaring_class) { 1271 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1272 class_reg); 1273 } else { 1274 // Load dex cache entry into class_reg (kArg2) 1275 LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1276 class_reg); 1277 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1278 LoadRefDisp(class_reg, offset_of_type, class_reg); 1279 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1280 // Need to test presence of type in dex cache at runtime 1281 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1282 LIR* cont = NewLIR0(kPseudoTargetLabel); 1283 1284 // Slow path to initialize the type. Executed if the type is NULL. 1285 class SlowPath : public LIRSlowPath { 1286 public: 1287 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1288 const RegStorage class_reg) : 1289 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1290 class_reg_(class_reg) { 1291 } 1292 1293 void Compile() { 1294 GenerateTargetLabel(); 1295 1296 // Call out to helper, which will return resolved type in kArg0 1297 // InitializeTypeFromCode(idx, method) 1298 m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, 1299 m2l_->TargetReg(kArg1), true); 1300 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0)); // Align usage with fast path 1301 m2l_->OpUnconditionalBranch(cont_); 1302 } 1303 public: 1304 const int type_idx_; 1305 const RegStorage class_reg_; 1306 }; 1307 1308 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1309 } 1310 } 1311 // At this point, class_reg (kArg2) has class 1312 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref 1313 1314 // Slow path for the case where the classes are not equal. In this case we need 1315 // to call a helper function to do the check. 1316 class SlowPath : public LIRSlowPath { 1317 public: 1318 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1319 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1320 } 1321 1322 void Compile() { 1323 GenerateTargetLabel(); 1324 1325 if (load_) { 1326 m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), 1327 m2l_->TargetReg(kArg1)); 1328 } 1329 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2), 1330 m2l_->TargetReg(kArg1), true); 1331 1332 m2l_->OpUnconditionalBranch(cont_); 1333 } 1334 1335 private: 1336 bool load_; 1337 }; 1338 1339 if (type_known_abstract) { 1340 // Easier case, run slow path if target is non-null (slow path will load from target) 1341 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL); 1342 LIR* cont = NewLIR0(kPseudoTargetLabel); 1343 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1344 } else { 1345 // Harder, more common case. We need to generate a forward branch over the load 1346 // if the target is null. If it's non-null we perform the load and branch to the 1347 // slow path if the classes are not equal. 1348 1349 /* Null is OK - continue */ 1350 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); 1351 /* load object->klass_ */ 1352 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1353 LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1)); 1354 1355 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL); 1356 LIR* cont = NewLIR0(kPseudoTargetLabel); 1357 1358 // Add the slow path that will not perform load since this is already done. 1359 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1360 1361 // Set the null check to branch to the continuation. 1362 branch1->target = cont; 1363 } 1364} 1365 1366void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1367 RegLocation rl_src1, RegLocation rl_src2) { 1368 RegLocation rl_result; 1369 if (cu_->instruction_set == kThumb2) { 1370 /* 1371 * NOTE: This is the one place in the code in which we might have 1372 * as many as six live temporary registers. There are 5 in the normal 1373 * set for Arm. Until we have spill capabilities, temporarily add 1374 * lr to the temp set. It is safe to do this locally, but note that 1375 * lr is used explicitly elsewhere in the code generator and cannot 1376 * normally be used as a general temp register. 1377 */ 1378 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool 1379 FreeTemp(TargetReg(kLr)); // and make it available 1380 } 1381 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1382 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1383 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1384 // The longs may overlap - use intermediate temp if so 1385 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1386 RegStorage t_reg = AllocTemp(); 1387 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1388 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1389 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1390 FreeTemp(t_reg); 1391 } else { 1392 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1393 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1394 } 1395 /* 1396 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1397 * following StoreValueWide might need to allocate a temp register. 1398 * To further work around the lack of a spill capability, explicitly 1399 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1400 * Remove when spill is functional. 1401 */ 1402 FreeRegLocTemps(rl_result, rl_src1); 1403 FreeRegLocTemps(rl_result, rl_src2); 1404 StoreValueWide(rl_dest, rl_result); 1405 if (cu_->instruction_set == kThumb2) { 1406 Clobber(TargetReg(kLr)); 1407 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool 1408 } 1409} 1410 1411 1412void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1413 RegLocation rl_src1, RegLocation rl_shift) { 1414 ThreadOffset<4> func_offset(-1); 1415 1416 switch (opcode) { 1417 case Instruction::SHL_LONG: 1418 case Instruction::SHL_LONG_2ADDR: 1419 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShlLong); 1420 break; 1421 case Instruction::SHR_LONG: 1422 case Instruction::SHR_LONG_2ADDR: 1423 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShrLong); 1424 break; 1425 case Instruction::USHR_LONG: 1426 case Instruction::USHR_LONG_2ADDR: 1427 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pUshrLong); 1428 break; 1429 default: 1430 LOG(FATAL) << "Unexpected case"; 1431 } 1432 FlushAllRegs(); /* Send everything to home location */ 1433 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false); 1434 RegLocation rl_result = GetReturnWide(false); 1435 StoreValueWide(rl_dest, rl_result); 1436} 1437 1438 1439void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1440 RegLocation rl_src1, RegLocation rl_src2) { 1441 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1442 OpKind op = kOpBkpt; 1443 bool is_div_rem = false; 1444 bool check_zero = false; 1445 bool unary = false; 1446 RegLocation rl_result; 1447 bool shift_op = false; 1448 switch (opcode) { 1449 case Instruction::NEG_INT: 1450 op = kOpNeg; 1451 unary = true; 1452 break; 1453 case Instruction::NOT_INT: 1454 op = kOpMvn; 1455 unary = true; 1456 break; 1457 case Instruction::ADD_INT: 1458 case Instruction::ADD_INT_2ADDR: 1459 op = kOpAdd; 1460 break; 1461 case Instruction::SUB_INT: 1462 case Instruction::SUB_INT_2ADDR: 1463 op = kOpSub; 1464 break; 1465 case Instruction::MUL_INT: 1466 case Instruction::MUL_INT_2ADDR: 1467 op = kOpMul; 1468 break; 1469 case Instruction::DIV_INT: 1470 case Instruction::DIV_INT_2ADDR: 1471 check_zero = true; 1472 op = kOpDiv; 1473 is_div_rem = true; 1474 break; 1475 /* NOTE: returns in kArg1 */ 1476 case Instruction::REM_INT: 1477 case Instruction::REM_INT_2ADDR: 1478 check_zero = true; 1479 op = kOpRem; 1480 is_div_rem = true; 1481 break; 1482 case Instruction::AND_INT: 1483 case Instruction::AND_INT_2ADDR: 1484 op = kOpAnd; 1485 break; 1486 case Instruction::OR_INT: 1487 case Instruction::OR_INT_2ADDR: 1488 op = kOpOr; 1489 break; 1490 case Instruction::XOR_INT: 1491 case Instruction::XOR_INT_2ADDR: 1492 op = kOpXor; 1493 break; 1494 case Instruction::SHL_INT: 1495 case Instruction::SHL_INT_2ADDR: 1496 shift_op = true; 1497 op = kOpLsl; 1498 break; 1499 case Instruction::SHR_INT: 1500 case Instruction::SHR_INT_2ADDR: 1501 shift_op = true; 1502 op = kOpAsr; 1503 break; 1504 case Instruction::USHR_INT: 1505 case Instruction::USHR_INT_2ADDR: 1506 shift_op = true; 1507 op = kOpLsr; 1508 break; 1509 default: 1510 LOG(FATAL) << "Invalid word arith op: " << opcode; 1511 } 1512 if (!is_div_rem) { 1513 if (unary) { 1514 rl_src1 = LoadValue(rl_src1, kCoreReg); 1515 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1516 OpRegReg(op, rl_result.reg, rl_src1.reg); 1517 } else { 1518 if (shift_op) { 1519 rl_src2 = LoadValue(rl_src2, kCoreReg); 1520 RegStorage t_reg = AllocTemp(); 1521 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1522 rl_src1 = LoadValue(rl_src1, kCoreReg); 1523 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1524 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1525 FreeTemp(t_reg); 1526 } else { 1527 rl_src1 = LoadValue(rl_src1, kCoreReg); 1528 rl_src2 = LoadValue(rl_src2, kCoreReg); 1529 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1530 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1531 } 1532 } 1533 StoreValue(rl_dest, rl_result); 1534 } else { 1535 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1536 if (cu_->instruction_set == kMips) { 1537 rl_src1 = LoadValue(rl_src1, kCoreReg); 1538 rl_src2 = LoadValue(rl_src2, kCoreReg); 1539 if (check_zero) { 1540 GenDivZeroCheck(rl_src2.reg); 1541 } 1542 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1543 done = true; 1544 } else if (cu_->instruction_set == kThumb2) { 1545 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1546 // Use ARM SDIV instruction for division. For remainder we also need to 1547 // calculate using a MUL and subtract. 1548 rl_src1 = LoadValue(rl_src1, kCoreReg); 1549 rl_src2 = LoadValue(rl_src2, kCoreReg); 1550 if (check_zero) { 1551 GenDivZeroCheck(rl_src2.reg); 1552 } 1553 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1554 done = true; 1555 } 1556 } 1557 1558 // If we haven't already generated the code use the callout function. 1559 if (!done) { 1560 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1561 FlushAllRegs(); /* Send everything to home location */ 1562 LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); 1563 RegStorage r_tgt = CallHelperSetup(func_offset); 1564 LoadValueDirectFixed(rl_src1, TargetReg(kArg0)); 1565 if (check_zero) { 1566 GenDivZeroCheck(TargetReg(kArg1)); 1567 } 1568 // NOTE: callout here is not a safepoint. 1569 CallHelper(r_tgt, func_offset, false /* not a safepoint */); 1570 if (op == kOpDiv) 1571 rl_result = GetReturn(false); 1572 else 1573 rl_result = GetReturnAlt(); 1574 } 1575 StoreValue(rl_dest, rl_result); 1576 } 1577} 1578 1579/* 1580 * The following are the first-level codegen routines that analyze the format 1581 * of each bytecode then either dispatch special purpose codegen routines 1582 * or produce corresponding Thumb instructions directly. 1583 */ 1584 1585// Returns true if no more than two bits are set in 'x'. 1586static bool IsPopCountLE2(unsigned int x) { 1587 x &= x - 1; 1588 return (x & (x - 1)) == 0; 1589} 1590 1591// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1592// and store the result in 'rl_dest'. 1593bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1594 RegLocation rl_src, RegLocation rl_dest, int lit) { 1595 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1596 return false; 1597 } 1598 // No divide instruction for Arm, so check for more special cases 1599 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1600 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1601 } 1602 int k = LowestSetBit(lit); 1603 if (k >= 30) { 1604 // Avoid special cases. 1605 return false; 1606 } 1607 rl_src = LoadValue(rl_src, kCoreReg); 1608 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1609 if (is_div) { 1610 RegStorage t_reg = AllocTemp(); 1611 if (lit == 2) { 1612 // Division by 2 is by far the most common division by constant. 1613 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1614 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1615 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1616 } else { 1617 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1618 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1619 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1620 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1621 } 1622 } else { 1623 RegStorage t_reg1 = AllocTemp(); 1624 RegStorage t_reg2 = AllocTemp(); 1625 if (lit == 2) { 1626 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1627 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1628 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1629 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1630 } else { 1631 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1632 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1633 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1634 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1635 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1636 } 1637 } 1638 StoreValue(rl_dest, rl_result); 1639 return true; 1640} 1641 1642// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1643// and store the result in 'rl_dest'. 1644bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1645 if (lit < 0) { 1646 return false; 1647 } 1648 if (lit == 0) { 1649 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1650 LoadConstant(rl_result.reg, 0); 1651 StoreValue(rl_dest, rl_result); 1652 return true; 1653 } 1654 if (lit == 1) { 1655 rl_src = LoadValue(rl_src, kCoreReg); 1656 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1657 OpRegCopy(rl_result.reg, rl_src.reg); 1658 StoreValue(rl_dest, rl_result); 1659 return true; 1660 } 1661 // There is RegRegRegShift on Arm, so check for more special cases 1662 if (cu_->instruction_set == kThumb2) { 1663 return EasyMultiply(rl_src, rl_dest, lit); 1664 } 1665 // Can we simplify this multiplication? 1666 bool power_of_two = false; 1667 bool pop_count_le2 = false; 1668 bool power_of_two_minus_one = false; 1669 if (IsPowerOfTwo(lit)) { 1670 power_of_two = true; 1671 } else if (IsPopCountLE2(lit)) { 1672 pop_count_le2 = true; 1673 } else if (IsPowerOfTwo(lit + 1)) { 1674 power_of_two_minus_one = true; 1675 } else { 1676 return false; 1677 } 1678 rl_src = LoadValue(rl_src, kCoreReg); 1679 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1680 if (power_of_two) { 1681 // Shift. 1682 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1683 } else if (pop_count_le2) { 1684 // Shift and add and shift. 1685 int first_bit = LowestSetBit(lit); 1686 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1687 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1688 } else { 1689 // Reverse subtract: (src << (shift + 1)) - src. 1690 DCHECK(power_of_two_minus_one); 1691 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1692 RegStorage t_reg = AllocTemp(); 1693 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1694 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1695 } 1696 StoreValue(rl_dest, rl_result); 1697 return true; 1698} 1699 1700void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1701 int lit) { 1702 RegLocation rl_result; 1703 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1704 int shift_op = false; 1705 bool is_div = false; 1706 1707 switch (opcode) { 1708 case Instruction::RSUB_INT_LIT8: 1709 case Instruction::RSUB_INT: { 1710 rl_src = LoadValue(rl_src, kCoreReg); 1711 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1712 if (cu_->instruction_set == kThumb2) { 1713 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1714 } else { 1715 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1716 OpRegImm(kOpAdd, rl_result.reg, lit); 1717 } 1718 StoreValue(rl_dest, rl_result); 1719 return; 1720 } 1721 1722 case Instruction::SUB_INT: 1723 case Instruction::SUB_INT_2ADDR: 1724 lit = -lit; 1725 // Intended fallthrough 1726 case Instruction::ADD_INT: 1727 case Instruction::ADD_INT_2ADDR: 1728 case Instruction::ADD_INT_LIT8: 1729 case Instruction::ADD_INT_LIT16: 1730 op = kOpAdd; 1731 break; 1732 case Instruction::MUL_INT: 1733 case Instruction::MUL_INT_2ADDR: 1734 case Instruction::MUL_INT_LIT8: 1735 case Instruction::MUL_INT_LIT16: { 1736 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1737 return; 1738 } 1739 op = kOpMul; 1740 break; 1741 } 1742 case Instruction::AND_INT: 1743 case Instruction::AND_INT_2ADDR: 1744 case Instruction::AND_INT_LIT8: 1745 case Instruction::AND_INT_LIT16: 1746 op = kOpAnd; 1747 break; 1748 case Instruction::OR_INT: 1749 case Instruction::OR_INT_2ADDR: 1750 case Instruction::OR_INT_LIT8: 1751 case Instruction::OR_INT_LIT16: 1752 op = kOpOr; 1753 break; 1754 case Instruction::XOR_INT: 1755 case Instruction::XOR_INT_2ADDR: 1756 case Instruction::XOR_INT_LIT8: 1757 case Instruction::XOR_INT_LIT16: 1758 op = kOpXor; 1759 break; 1760 case Instruction::SHL_INT_LIT8: 1761 case Instruction::SHL_INT: 1762 case Instruction::SHL_INT_2ADDR: 1763 lit &= 31; 1764 shift_op = true; 1765 op = kOpLsl; 1766 break; 1767 case Instruction::SHR_INT_LIT8: 1768 case Instruction::SHR_INT: 1769 case Instruction::SHR_INT_2ADDR: 1770 lit &= 31; 1771 shift_op = true; 1772 op = kOpAsr; 1773 break; 1774 case Instruction::USHR_INT_LIT8: 1775 case Instruction::USHR_INT: 1776 case Instruction::USHR_INT_2ADDR: 1777 lit &= 31; 1778 shift_op = true; 1779 op = kOpLsr; 1780 break; 1781 1782 case Instruction::DIV_INT: 1783 case Instruction::DIV_INT_2ADDR: 1784 case Instruction::DIV_INT_LIT8: 1785 case Instruction::DIV_INT_LIT16: 1786 case Instruction::REM_INT: 1787 case Instruction::REM_INT_2ADDR: 1788 case Instruction::REM_INT_LIT8: 1789 case Instruction::REM_INT_LIT16: { 1790 if (lit == 0) { 1791 GenDivZeroException(); 1792 return; 1793 } 1794 if ((opcode == Instruction::DIV_INT) || 1795 (opcode == Instruction::DIV_INT_2ADDR) || 1796 (opcode == Instruction::DIV_INT_LIT8) || 1797 (opcode == Instruction::DIV_INT_LIT16)) { 1798 is_div = true; 1799 } else { 1800 is_div = false; 1801 } 1802 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1803 return; 1804 } 1805 1806 bool done = false; 1807 if (cu_->instruction_set == kMips) { 1808 rl_src = LoadValue(rl_src, kCoreReg); 1809 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1810 done = true; 1811 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1812 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1813 done = true; 1814 } else if (cu_->instruction_set == kThumb2) { 1815 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1816 // Use ARM SDIV instruction for division. For remainder we also need to 1817 // calculate using a MUL and subtract. 1818 rl_src = LoadValue(rl_src, kCoreReg); 1819 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1820 done = true; 1821 } 1822 } 1823 1824 if (!done) { 1825 FlushAllRegs(); /* Everything to home location. */ 1826 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); 1827 Clobber(TargetReg(kArg0)); 1828 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod); 1829 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); 1830 if (is_div) 1831 rl_result = GetReturn(false); 1832 else 1833 rl_result = GetReturnAlt(); 1834 } 1835 StoreValue(rl_dest, rl_result); 1836 return; 1837 } 1838 default: 1839 LOG(FATAL) << "Unexpected opcode " << opcode; 1840 } 1841 rl_src = LoadValue(rl_src, kCoreReg); 1842 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1843 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1844 if (shift_op && (lit == 0)) { 1845 OpRegCopy(rl_result.reg, rl_src.reg); 1846 } else { 1847 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1848 } 1849 StoreValue(rl_dest, rl_result); 1850} 1851 1852void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1853 RegLocation rl_src1, RegLocation rl_src2) { 1854 RegLocation rl_result; 1855 OpKind first_op = kOpBkpt; 1856 OpKind second_op = kOpBkpt; 1857 bool call_out = false; 1858 bool check_zero = false; 1859 ThreadOffset<4> func_offset(-1); 1860 int ret_reg = TargetReg(kRet0).GetReg(); 1861 1862 switch (opcode) { 1863 case Instruction::NOT_LONG: 1864 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1865 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1866 // Check for destructive overlap 1867 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1868 RegStorage t_reg = AllocTemp(); 1869 OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1870 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1871 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1872 FreeTemp(t_reg); 1873 } else { 1874 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1875 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1876 } 1877 StoreValueWide(rl_dest, rl_result); 1878 return; 1879 case Instruction::ADD_LONG: 1880 case Instruction::ADD_LONG_2ADDR: 1881 if (cu_->instruction_set != kThumb2) { 1882 GenAddLong(opcode, rl_dest, rl_src1, rl_src2); 1883 return; 1884 } 1885 first_op = kOpAdd; 1886 second_op = kOpAdc; 1887 break; 1888 case Instruction::SUB_LONG: 1889 case Instruction::SUB_LONG_2ADDR: 1890 if (cu_->instruction_set != kThumb2) { 1891 GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 1892 return; 1893 } 1894 first_op = kOpSub; 1895 second_op = kOpSbc; 1896 break; 1897 case Instruction::MUL_LONG: 1898 case Instruction::MUL_LONG_2ADDR: 1899 if (cu_->instruction_set != kMips) { 1900 GenMulLong(opcode, rl_dest, rl_src1, rl_src2); 1901 return; 1902 } else { 1903 call_out = true; 1904 ret_reg = TargetReg(kRet0).GetReg(); 1905 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul); 1906 } 1907 break; 1908 case Instruction::DIV_LONG: 1909 case Instruction::DIV_LONG_2ADDR: 1910 call_out = true; 1911 check_zero = true; 1912 ret_reg = TargetReg(kRet0).GetReg(); 1913 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLdiv); 1914 break; 1915 case Instruction::REM_LONG: 1916 case Instruction::REM_LONG_2ADDR: 1917 call_out = true; 1918 check_zero = true; 1919 func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmod); 1920 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1921 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg(); 1922 break; 1923 case Instruction::AND_LONG_2ADDR: 1924 case Instruction::AND_LONG: 1925 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1926 return GenAndLong(opcode, rl_dest, rl_src1, rl_src2); 1927 } 1928 first_op = kOpAnd; 1929 second_op = kOpAnd; 1930 break; 1931 case Instruction::OR_LONG: 1932 case Instruction::OR_LONG_2ADDR: 1933 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1934 GenOrLong(opcode, rl_dest, rl_src1, rl_src2); 1935 return; 1936 } 1937 first_op = kOpOr; 1938 second_op = kOpOr; 1939 break; 1940 case Instruction::XOR_LONG: 1941 case Instruction::XOR_LONG_2ADDR: 1942 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1943 GenXorLong(opcode, rl_dest, rl_src1, rl_src2); 1944 return; 1945 } 1946 first_op = kOpXor; 1947 second_op = kOpXor; 1948 break; 1949 case Instruction::NEG_LONG: { 1950 GenNegLong(rl_dest, rl_src2); 1951 return; 1952 } 1953 default: 1954 LOG(FATAL) << "Invalid long arith op"; 1955 } 1956 if (!call_out) { 1957 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1958 } else { 1959 FlushAllRegs(); /* Send everything to home location */ 1960 if (check_zero) { 1961 RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1)); 1962 RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)); 1963 LoadValueDirectWideFixed(rl_src2, r_tmp2); 1964 RegStorage r_tgt = CallHelperSetup(func_offset); 1965 GenDivZeroCheckWide(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3))); 1966 LoadValueDirectWideFixed(rl_src1, r_tmp1); 1967 // NOTE: callout here is not a safepoint 1968 CallHelper(r_tgt, func_offset, false /* not safepoint */); 1969 } else { 1970 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); 1971 } 1972 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1973 if (ret_reg == TargetReg(kRet0).GetReg()) 1974 rl_result = GetReturnWide(false); 1975 else 1976 rl_result = GetReturnWideAlt(); 1977 StoreValueWide(rl_dest, rl_result); 1978 } 1979} 1980 1981void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset, 1982 RegLocation rl_dest, RegLocation rl_src) { 1983 /* 1984 * Don't optimize the register usage since it calls out to support 1985 * functions 1986 */ 1987 FlushAllRegs(); /* Send everything to home location */ 1988 CallRuntimeHelperRegLocation(func_offset, rl_src, false); 1989 if (rl_dest.wide) { 1990 RegLocation rl_result; 1991 rl_result = GetReturnWide(rl_dest.fp); 1992 StoreValueWide(rl_dest, rl_result); 1993 } else { 1994 RegLocation rl_result; 1995 rl_result = GetReturn(rl_dest.fp); 1996 StoreValue(rl_dest, rl_result); 1997 } 1998} 1999 2000/* Check if we need to check for pending suspend request */ 2001void Mir2Lir::GenSuspendTest(int opt_flags) { 2002 if (Runtime::Current()->ExplicitSuspendChecks()) { 2003 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2004 return; 2005 } 2006 FlushAllRegs(); 2007 LIR* branch = OpTestSuspend(NULL); 2008 LIR* ret_lab = NewLIR0(kPseudoTargetLabel); 2009 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab), 2010 current_dalvik_offset_); 2011 branch->target = target; 2012 suspend_launchpads_.Insert(target); 2013 } else { 2014 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2015 return; 2016 } 2017 FlushAllRegs(); // TODO: needed? 2018 LIR* inst = CheckSuspendUsingLoad(); 2019 MarkSafepointPC(inst); 2020 } 2021} 2022 2023/* Check if we need to check for pending suspend request */ 2024void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 2025 if (Runtime::Current()->ExplicitSuspendChecks()) { 2026 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2027 OpUnconditionalBranch(target); 2028 return; 2029 } 2030 OpTestSuspend(target); 2031 LIR* launch_pad = 2032 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target), 2033 current_dalvik_offset_); 2034 FlushAllRegs(); 2035 OpUnconditionalBranch(launch_pad); 2036 suspend_launchpads_.Insert(launch_pad); 2037 } else { 2038 // For the implicit suspend check, just perform the trigger 2039 // load and branch to the target. 2040 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 2041 OpUnconditionalBranch(target); 2042 return; 2043 } 2044 FlushAllRegs(); 2045 LIR* inst = CheckSuspendUsingLoad(); 2046 MarkSafepointPC(inst); 2047 OpUnconditionalBranch(target); 2048 } 2049} 2050 2051/* Call out to helper assembly routine that will null check obj and then lock it. */ 2052void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 2053 FlushAllRegs(); 2054 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true); 2055} 2056 2057/* Call out to helper assembly routine that will null check obj and then unlock it. */ 2058void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2059 FlushAllRegs(); 2060 CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true); 2061} 2062 2063/* Generic code for generating a wide constant into a VR. */ 2064void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2065 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2066 LoadConstantWide(rl_result.reg, value); 2067 StoreValueWide(rl_dest, rl_result); 2068} 2069 2070} // namespace art 2071